From 548cb517854bdfc2a13084a27f1547f6d1734361 Mon Sep 17 00:00:00 2001 From: Mauro Gentile Date: Thu, 30 Mar 2023 19:48:55 +0200 Subject: [PATCH 001/132] First Arcanization no refcount, no lifetime --- player/src/bin/play.rs | 4 +- player/src/lib.rs | 22 +- player/tests/test.rs | 6 +- wgpu-core/LICENSE.APACHE | 177 +- wgpu-core/LICENSE.MIT | 22 +- wgpu-core/src/binding_model.rs | 115 +- wgpu-core/src/command/bind.rs | 13 +- wgpu-core/src/command/bundle.rs | 90 +- wgpu-core/src/command/clear.rs | 81 +- wgpu-core/src/command/compute.rs | 113 +- wgpu-core/src/command/memory_init.rs | 38 +- wgpu-core/src/command/mod.rs | 260 +- wgpu-core/src/command/query.rs | 78 +- wgpu-core/src/command/render.rs | 257 +- wgpu-core/src/command/transfer.rs | 250 +- wgpu-core/src/device/device.rs | 3097 ++++++++++++++ wgpu-core/src/device/global.rs | 2557 +++++++++++ wgpu-core/src/device/life.rs | 498 ++- wgpu-core/src/device/mod.rs | 5850 +------------------------- wgpu-core/src/device/queue.rs | 616 +-- wgpu-core/src/error.rs | 7 +- wgpu-core/src/global.rs | 168 + wgpu-core/src/hal_api.rs | 138 + wgpu-core/src/hub.rs | 1210 +----- wgpu-core/src/identity.rs | 183 + wgpu-core/src/instance.rs | 196 +- wgpu-core/src/lib.rs | 158 +- wgpu-core/src/pipeline.rs | 118 +- wgpu-core/src/present.rs | 270 +- wgpu-core/src/registry.rs | 132 + wgpu-core/src/resource.rs | 387 +- wgpu-core/src/storage.rs | 252 ++ wgpu-core/src/track/buffer.rs | 121 +- wgpu-core/src/track/metadata.rs | 95 +- wgpu-core/src/track/mod.rs | 101 +- wgpu-core/src/track/stateless.rs | 62 +- wgpu-core/src/track/texture.rs | 213 +- wgpu-hal/LICENSE.APACHE | 177 +- wgpu-hal/LICENSE.MIT | 22 +- wgpu-hal/src/dx11/device.rs | 6 +- wgpu-hal/src/dx11/mod.rs | 10 +- wgpu-hal/src/dx12/adapter.rs | 3 +- wgpu-hal/src/dx12/instance.rs | 3 +- wgpu-hal/src/dx12/mod.rs | 58 +- wgpu-hal/src/empty.rs | 14 +- wgpu-hal/src/gles/adapter.rs | 9 +- wgpu-hal/src/gles/egl.rs | 29 +- wgpu-hal/src/gles/mod.rs | 16 +- wgpu-hal/src/gles/queue.rs | 44 +- wgpu-hal/src/gles/web.rs | 33 +- wgpu-hal/src/lib.rs | 26 +- wgpu-hal/src/metal/mod.rs | 2 +- wgpu-hal/src/metal/surface.rs | 8 +- wgpu-hal/src/vulkan/adapter.rs | 8 +- wgpu-hal/src/vulkan/device.rs | 2 +- wgpu-hal/src/vulkan/instance.rs | 22 +- wgpu-hal/src/vulkan/mod.rs | 52 +- wgpu-types/LICENSE.APACHE | 177 +- wgpu-types/LICENSE.MIT | 22 +- wgpu/LICENSE.APACHE | 177 +- wgpu/LICENSE.MIT | 22 +- wgpu/src/backend/direct.rs | 46 +- wgpu/src/lib.rs | 105 +- 63 files changed, 10053 insertions(+), 8995 deletions(-) mode change 120000 => 100644 wgpu-core/LICENSE.APACHE mode change 120000 => 100644 wgpu-core/LICENSE.MIT create mode 100644 wgpu-core/src/device/device.rs create mode 100644 wgpu-core/src/device/global.rs create mode 100644 wgpu-core/src/global.rs create mode 100644 wgpu-core/src/hal_api.rs create mode 100644 wgpu-core/src/identity.rs create mode 100644 wgpu-core/src/registry.rs create mode 100644 wgpu-core/src/storage.rs mode change 120000 => 100644 wgpu-hal/LICENSE.APACHE mode change 120000 => 100644 wgpu-hal/LICENSE.MIT mode change 120000 => 100644 wgpu-types/LICENSE.APACHE mode change 120000 => 100644 wgpu-types/LICENSE.MIT mode change 120000 => 100644 wgpu/LICENSE.APACHE mode change 120000 => 100644 wgpu/LICENSE.MIT diff --git a/player/src/bin/play.rs b/player/src/bin/play.rs index fb2665e3a3..3623737b6c 100644 --- a/player/src/bin/play.rs +++ b/player/src/bin/play.rs @@ -43,12 +43,12 @@ fn main() { .build(&event_loop) .unwrap(); - let global = wgc::hub::Global::new( + let global = wgc::global::Global::new( "player", IdentityPassThroughFactory, wgt::InstanceDescriptor::default(), ); - let mut command_buffer_id_manager = wgc::hub::IdentityManager::default(); + let mut command_buffer_id_manager = wgc::identity::IdentityManager::default(); #[cfg(feature = "winit")] let surface = global.instance_create_surface( diff --git a/player/src/lib.rs b/player/src/lib.rs index 0ef6080b77..8702b982ec 100644 --- a/player/src/lib.rs +++ b/player/src/lib.rs @@ -15,7 +15,9 @@ use std::{borrow::Cow, fmt::Debug, fs, marker::PhantomData, path::Path}; #[derive(Debug)] pub struct IdentityPassThrough(PhantomData); -impl wgc::hub::IdentityHandler for IdentityPassThrough { +impl wgc::identity::IdentityHandler + for IdentityPassThrough +{ type Input = I; fn process(&self, id: I, backend: wgt::Backend) -> I { let (index, epoch, _backend) = id.unzip(); @@ -26,7 +28,7 @@ impl wgc::hub::IdentityHandler for Ident pub struct IdentityPassThroughFactory; -impl wgc::hub::IdentityHandlerFactory +impl wgc::identity::IdentityHandlerFactory for IdentityPassThroughFactory { type Filter = IdentityPassThrough; @@ -34,25 +36,25 @@ impl wgc::hub::IdentityHandlerFactory IdentityPassThrough(PhantomData) } } -impl wgc::hub::GlobalIdentityHandlerFactory for IdentityPassThroughFactory {} +impl wgc::global::GlobalIdentityHandlerFactory for IdentityPassThroughFactory {} pub trait GlobalPlay { - fn encode_commands( + fn encode_commands( &self, encoder: wgc::id::CommandEncoderId, commands: Vec, ) -> wgc::id::CommandBufferId; - fn process( + fn process( &self, device: wgc::id::DeviceId, action: trace::Action, dir: &Path, - comb_manager: &mut wgc::hub::IdentityManager, + comb_manager: &mut wgc::identity::IdentityManager, ); } -impl GlobalPlay for wgc::hub::Global { - fn encode_commands( +impl GlobalPlay for wgc::global::Global { + fn encode_commands( &self, encoder: wgc::id::CommandEncoderId, commands: Vec, @@ -146,12 +148,12 @@ impl GlobalPlay for wgc::hub::Global { cmd_buf } - fn process( + fn process( &self, device: wgc::id::DeviceId, action: trace::Action, dir: &Path, - comb_manager: &mut wgc::hub::IdentityManager, + comb_manager: &mut wgc::identity::IdentityManager, ) { use wgc::device::trace::Action; log::info!("action {:?}", action); diff --git a/player/tests/test.rs b/player/tests/test.rs index 750b823648..7534e04b59 100644 --- a/player/tests/test.rs +++ b/player/tests/test.rs @@ -78,7 +78,7 @@ impl Test<'_> { fn run( self, dir: &Path, - global: &wgc::hub::Global, + global: &wgc::global::Global, adapter: wgc::id::AdapterId, test_num: u32, ) { @@ -98,7 +98,7 @@ impl Test<'_> { panic!("{:?}", e); } - let mut command_buffer_id_manager = wgc::hub::IdentityManager::default(); + let mut command_buffer_id_manager = wgc::identity::IdentityManager::default(); println!("\t\t\tRunning..."); for action in self.actions { wgc::gfx_select!(device => global.process(device, action, dir, &mut command_buffer_id_manager)); @@ -178,7 +178,7 @@ impl Corpus { let dir = path.parent().unwrap(); let corpus: Corpus = ron::de::from_reader(File::open(&path).unwrap()).unwrap(); - let global = wgc::hub::Global::new( + let global = wgc::global::Global::new( "test", IdentityPassThroughFactory, wgt::InstanceDescriptor { diff --git a/wgpu-core/LICENSE.APACHE b/wgpu-core/LICENSE.APACHE deleted file mode 120000 index 7141cad5b2..0000000000 --- a/wgpu-core/LICENSE.APACHE +++ /dev/null @@ -1 +0,0 @@ -../LICENSE.APACHE \ No newline at end of file diff --git a/wgpu-core/LICENSE.APACHE b/wgpu-core/LICENSE.APACHE new file mode 100644 index 0000000000..d9a10c0d8e --- /dev/null +++ b/wgpu-core/LICENSE.APACHE @@ -0,0 +1,176 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS diff --git a/wgpu-core/LICENSE.MIT b/wgpu-core/LICENSE.MIT deleted file mode 120000 index 6b8772d1a7..0000000000 --- a/wgpu-core/LICENSE.MIT +++ /dev/null @@ -1 +0,0 @@ -../LICENSE.MIT \ No newline at end of file diff --git a/wgpu-core/LICENSE.MIT b/wgpu-core/LICENSE.MIT new file mode 100644 index 0000000000..4699691b8e --- /dev/null +++ b/wgpu-core/LICENSE.MIT @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2021 The gfx-rs developers + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/wgpu-core/src/binding_model.rs b/wgpu-core/src/binding_model.rs index dbf96f0439..fe63d7081e 100644 --- a/wgpu-core/src/binding_model.rs +++ b/wgpu-core/src/binding_model.rs @@ -1,12 +1,16 @@ use crate::{ - device::{DeviceError, MissingDownlevelFlags, MissingFeatures, SHADER_STAGE_COUNT}, + device::{Device, DeviceError, MissingDownlevelFlags, MissingFeatures, SHADER_STAGE_COUNT}, error::{ErrorFormatter, PrettyError}, - hub::{HalApi, Resource}, - id::{BindGroupLayoutId, BufferId, DeviceId, SamplerId, TextureId, TextureViewId, Valid}, + hal_api::HalApi, + id::{ + BindGroupId, BindGroupLayoutId, BufferId, PipelineLayoutId, SamplerId, TextureId, + TextureViewId, Valid, + }, init_tracker::{BufferInitTrackerAction, TextureInitTrackerAction}, + resource::{Resource, ResourceInfo}, track::{BindGroupStates, UsageConflict}, validation::{MissingBufferUsageError, MissingTextureUsageError}, - FastHashMap, Label, LifeGuard, MultiRefCount, Stored, + FastHashMap, Label, }; use arrayvec::ArrayVec; @@ -16,7 +20,7 @@ use serde::Deserialize; #[cfg(feature = "trace")] use serde::Serialize; -use std::{borrow::Cow, ops::Range}; +use std::{borrow::Cow, ops::Range, sync::Arc}; use thiserror::Error; @@ -439,30 +443,46 @@ pub(crate) type BindEntryMap = FastHashMap; /// - produced pipeline layouts /// - pipelines with implicit layouts #[derive(Debug)] -pub struct BindGroupLayout { - pub(crate) raw: A::BindGroupLayout, - pub(crate) device_id: Stored, - pub(crate) multi_ref_count: MultiRefCount, +pub struct BindGroupLayout { + pub(crate) raw: Option>, + pub(crate) device: Arc>, pub(crate) entries: BindEntryMap, - #[allow(unused)] - pub(crate) dynamic_count: usize, pub(crate) count_validator: BindingTypeMaxCountValidator, + pub(crate) info: ResourceInfo, #[cfg(debug_assertions)] pub(crate) label: String, } -impl Resource for BindGroupLayout { +impl Drop for BindGroupLayout { + fn drop(&mut self) { + let raw = self.raw.take().unwrap(); + if let Ok(raw) = Arc::try_unwrap(raw) { + unsafe { + use hal::Device; + self.device + .raw + .as_ref() + .unwrap() + .destroy_bind_group_layout(raw); + } + } else { + panic!("BindGroupLayout raw cannot be destroyed because is still in use"); + } + } +} + +impl Resource for BindGroupLayout { const TYPE: &'static str = "BindGroupLayout"; - fn life_guard(&self) -> &LifeGuard { - unreachable!() + fn info(&self) -> &ResourceInfo { + &self.info } - fn label(&self) -> &str { + fn label(&self) -> String { #[cfg(debug_assertions)] - return &self.label; + return self.label.clone(); #[cfg(not(debug_assertions))] - return ""; + return String::new(""); } } @@ -561,15 +581,33 @@ pub struct PipelineLayoutDescriptor<'a> { } #[derive(Debug)] -pub struct PipelineLayout { - pub(crate) raw: A::PipelineLayout, - pub(crate) device_id: Stored, - pub(crate) life_guard: LifeGuard, +pub struct PipelineLayout { + pub(crate) raw: Option>, + pub(crate) device: Arc>, + pub(crate) info: ResourceInfo, pub(crate) bind_group_layout_ids: ArrayVec, { hal::MAX_BIND_GROUPS }>, pub(crate) push_constant_ranges: ArrayVec, } -impl PipelineLayout { +impl Drop for PipelineLayout { + fn drop(&mut self) { + let raw = self.raw.take().unwrap(); + if let Ok(raw) = Arc::try_unwrap(raw) { + unsafe { + use hal::Device; + self.device + .raw + .as_ref() + .unwrap() + .destroy_pipeline_layout(raw); + } + } else { + panic!("PipelineLayout raw cannot be destroyed because is still in use"); + } + } +} + +impl PipelineLayout { /// Validate push constants match up with expected ranges. pub(crate) fn validate_push_constant_ranges( &self, @@ -649,11 +687,11 @@ impl PipelineLayout { } } -impl Resource for PipelineLayout { +impl Resource for PipelineLayout { const TYPE: &'static str = "PipelineLayout"; - fn life_guard(&self) -> &LifeGuard { - &self.life_guard + fn info(&self) -> &ResourceInfo { + &self.info } } @@ -755,11 +793,12 @@ pub(crate) fn buffer_binding_type_alignment( } } +#[derive(Debug)] pub struct BindGroup { - pub(crate) raw: A::BindGroup, - pub(crate) device_id: Stored, + pub(crate) raw: Option>, + pub(crate) device: Arc>, pub(crate) layout_id: Valid, - pub(crate) life_guard: LifeGuard, + pub(crate) info: ResourceInfo, pub(crate) used: BindGroupStates, pub(crate) used_buffer_ranges: Vec, pub(crate) used_texture_ranges: Vec, @@ -769,6 +808,20 @@ pub struct BindGroup { pub(crate) late_buffer_binding_sizes: Vec, } +impl Drop for BindGroup { + fn drop(&mut self) { + let raw = self.raw.take().unwrap(); + if let Ok(raw) = Arc::try_unwrap(raw) { + unsafe { + use hal::Device; + self.device.raw.as_ref().unwrap().destroy_bind_group(raw); + } + } else { + panic!("BindGroup cannot be destroyed because is still in use"); + } + } +} + impl BindGroup { pub(crate) fn validate_dynamic_bindings( &self, @@ -819,11 +872,11 @@ impl BindGroup { } } -impl Resource for BindGroup { +impl Resource for BindGroup { const TYPE: &'static str = "BindGroup"; - fn life_guard(&self) -> &LifeGuard { - &self.life_guard + fn info(&self) -> &ResourceInfo { + &self.info } } diff --git a/wgpu-core/src/command/bind.rs b/wgpu-core/src/command/bind.rs index fdcb60c52d..0cc13f6aea 100644 --- a/wgpu-core/src/command/bind.rs +++ b/wgpu-core/src/command/bind.rs @@ -1,10 +1,10 @@ use crate::{ binding_model::{BindGroup, LateMinBufferBindingSizeMismatch, PipelineLayout}, device::SHADER_STAGE_COUNT, - hub::{HalApi, Storage}, + hal_api::HalApi, id::{BindGroupId, BindGroupLayoutId, PipelineLayoutId, Valid}, pipeline::LateSizedBufferGroup, - Stored, + storage::Storage, }; use arrayvec::ArrayVec; @@ -140,7 +140,7 @@ struct LateBufferBinding { #[derive(Debug, Default)] pub(super) struct EntryPayload { - pub(super) group_id: Option>, + pub(super) group_id: Option>, pub(super) dynamic_offsets: Vec, late_buffer_bindings: Vec, /// Since `LateBufferBinding` may contain information about the bindings @@ -236,10 +236,7 @@ impl Binder { debug_assert_eq!(A::VARIANT, bind_group_id.0.backend()); let payload = &mut self.payloads[index]; - payload.group_id = Some(Stored { - value: bind_group_id, - ref_count: bind_group.life_guard.add_ref(), - }); + payload.group_id = Some(bind_group_id); payload.dynamic_offsets.clear(); payload.dynamic_offsets.extend_from_slice(offsets); @@ -271,7 +268,7 @@ impl Binder { let payloads = &self.payloads; self.manager .list_active() - .map(move |index| payloads[index].group_id.as_ref().unwrap().value) + .map(move |index| *payloads[index].group_id.as_ref().unwrap()) } pub(super) fn invalid_mask(&self) -> BindGroupMask { diff --git a/wgpu-core/src/command/bundle.rs b/wgpu-core/src/command/bundle.rs index d92120d214..a9a004b0d2 100644 --- a/wgpu-core/src/command/bundle.rs +++ b/wgpu-core/src/command/bundle.rs @@ -90,16 +90,21 @@ use crate::{ RenderPassCompatibilityCheckType, RenderPassContext, SHADER_STAGE_COUNT, }, error::{ErrorFormatter, PrettyError}, - hub::{GlobalIdentityHandlerFactory, HalApi, Hub, Resource, Storage, Token}, - id, + hal_api::HalApi, + hub::Hub, + id::{self, RenderBundleId}, + identity::GlobalIdentityHandlerFactory, init_tracker::{BufferInitTrackerAction, MemoryInitKind, TextureInitTrackerAction}, pipeline::{self, PipelineFlags}, resource, + resource::{Resource, ResourceInfo}, + storage::Storage, track::RenderBundleScope, validation::check_buffer_usage, - Label, LabelHelpers, LifeGuard, Stored, + Label, LabelHelpers, }; use arrayvec::ArrayVec; + use std::{borrow::Cow, mem, num::NonZeroU32, ops::Range}; use thiserror::Error; @@ -253,14 +258,13 @@ impl RenderBundleEncoder { desc: &RenderBundleDescriptor, device: &Device, hub: &Hub, - token: &mut Token>, ) -> Result, RenderBundleError> { - let (pipeline_layout_guard, mut token) = hub.pipeline_layouts.read(token); - let (bind_group_guard, mut token) = hub.bind_groups.read(&mut token); - let (pipeline_guard, mut token) = hub.render_pipelines.read(&mut token); - let (query_set_guard, mut token) = hub.query_sets.read(&mut token); - let (buffer_guard, mut token) = hub.buffers.read(&mut token); - let (texture_guard, _) = hub.textures.read(&mut token); + let pipeline_layout_guard = hub.pipeline_layouts.read(); + let bind_group_guard = hub.bind_groups.read(); + let pipeline_guard = hub.render_pipelines.read(); + let query_set_guard = hub.query_sets.read(); + let buffer_guard = hub.buffers.read(); + let texture_guard = hub.textures.read(); let mut state = State { trackers: RenderBundleScope::new( @@ -298,7 +302,7 @@ impl RenderBundleEncoder { .add_single(&*bind_group_guard, bind_group_id) .ok_or(RenderCommandError::InvalidBindGroup(bind_group_id)) .map_pass_err(scope)?; - self.check_valid_to_use(bind_group.device_id.value) + self.check_valid_to_use(bind_group.device.info.id()) .map_pass_err(scope)?; let max_bind_groups = device.limits.max_bind_groups; @@ -363,7 +367,7 @@ impl RenderBundleEncoder { .add_single(&*pipeline_guard, pipeline_id) .ok_or(RenderCommandError::InvalidPipeline(pipeline_id)) .map_pass_err(scope)?; - self.check_valid_to_use(pipeline.device_id.value) + self.check_valid_to_use(pipeline.device.info.id()) .map_pass_err(scope)?; self.context @@ -380,7 +384,7 @@ impl RenderBundleEncoder { .map_pass_err(scope); } - let layout = &pipeline_layout_guard[pipeline.layout_id.value]; + let layout = &pipeline_layout_guard[pipeline.layout_id]; let pipeline_state = PipelineState::new(pipeline_id, pipeline, layout); commands.push(command); @@ -405,7 +409,7 @@ impl RenderBundleEncoder { .buffers .merge_single(&*buffer_guard, buffer_id, hal::BufferUses::INDEX) .map_pass_err(scope)?; - self.check_valid_to_use(buffer.device_id.value) + self.check_valid_to_use(buffer.device.info.id()) .map_pass_err(scope)?; check_buffer_usage(buffer.usage, wgt::BufferUsages::INDEX) .map_pass_err(scope)?; @@ -414,7 +418,7 @@ impl RenderBundleEncoder { Some(s) => offset + s.get(), None => buffer.size, }; - buffer_memory_init_actions.extend(buffer.initialization_status.create_action( + buffer_memory_init_actions.extend(buffer.initialization_status.read().create_action( buffer_id, offset..end, MemoryInitKind::NeedsInitializedMemory, @@ -433,7 +437,7 @@ impl RenderBundleEncoder { .buffers .merge_single(&*buffer_guard, buffer_id, hal::BufferUses::VERTEX) .map_pass_err(scope)?; - self.check_valid_to_use(buffer.device_id.value) + self.check_valid_to_use(buffer.device.info.id()) .map_pass_err(scope)?; check_buffer_usage(buffer.usage, wgt::BufferUsages::VERTEX) .map_pass_err(scope)?; @@ -442,7 +446,7 @@ impl RenderBundleEncoder { Some(s) => offset + s.get(), None => buffer.size, }; - buffer_memory_init_actions.extend(buffer.initialization_status.create_action( + buffer_memory_init_actions.extend(buffer.initialization_status.read().create_action( buffer_id, offset..end, MemoryInitKind::NeedsInitializedMemory, @@ -569,12 +573,12 @@ impl RenderBundleEncoder { .buffers .merge_single(&*buffer_guard, buffer_id, hal::BufferUses::INDIRECT) .map_pass_err(scope)?; - self.check_valid_to_use(buffer.device_id.value) + self.check_valid_to_use(buffer.device.info.id()) .map_pass_err(scope)?; check_buffer_usage(buffer.usage, wgt::BufferUsages::INDIRECT) .map_pass_err(scope)?; - buffer_memory_init_actions.extend(buffer.initialization_status.create_action( + buffer_memory_init_actions.extend(buffer.initialization_status.read().create_action( buffer_id, offset..(offset + mem::size_of::() as u64), MemoryInitKind::NeedsInitializedMemory, @@ -607,12 +611,12 @@ impl RenderBundleEncoder { .buffers .merge_single(&*buffer_guard, buffer_id, hal::BufferUses::INDIRECT) .map_pass_err(scope)?; - self.check_valid_to_use(buffer.device_id.value) + self.check_valid_to_use(buffer.device.info.id()) .map_pass_err(scope)?; check_buffer_usage(buffer.usage, wgt::BufferUsages::INDIRECT) .map_pass_err(scope)?; - buffer_memory_init_actions.extend(buffer.initialization_status.create_action( + buffer_memory_init_actions.extend(buffer.initialization_status.read().create_action( buffer_id, offset..(offset + mem::size_of::() as u64), MemoryInitKind::NeedsInitializedMemory, @@ -654,15 +658,12 @@ impl RenderBundleEncoder { }, is_depth_read_only: self.is_depth_read_only, is_stencil_read_only: self.is_stencil_read_only, - device_id: Stored { - value: id::Valid(self.parent_id), - ref_count: device.life_guard.add_ref(), - }, + device_id: id::Valid(self.parent_id), used: state.trackers, buffer_memory_init_actions, texture_memory_init_actions, context: self.context, - life_guard: LifeGuard::new(desc.label.borrow_or_default()), + info: ResourceInfo::new(desc.label.borrow_or_default()), }) } @@ -727,18 +728,19 @@ pub type RenderBundleDescriptor<'a> = wgt::RenderBundleDescriptor>; //Note: here, `RenderBundle` is just wrapping a raw stream of render commands. // The plan is to back it by an actual Vulkan secondary buffer, D3D12 Bundle, // or Metal indirect command buffer. +#[derive(Debug)] pub struct RenderBundle { // Normalized command stream. It can be executed verbatim, // without re-binding anything on the pipeline change. base: BasePass, pub(super) is_depth_read_only: bool, pub(super) is_stencil_read_only: bool, - pub(crate) device_id: Stored, + pub(crate) device_id: id::Valid, pub(crate) used: RenderBundleScope, pub(super) buffer_memory_init_actions: Vec, pub(super) texture_memory_init_actions: Vec, pub(super) context: RenderPassContext, - pub(crate) life_guard: LifeGuard, + pub(crate) info: ResourceInfo, } unsafe impl Send for RenderBundle {} @@ -781,9 +783,12 @@ impl RenderBundle { let bind_group = bind_group_guard.get(bind_group_id).unwrap(); unsafe { raw.set_bind_group( - &pipeline_layout_guard[pipeline_layout_id.unwrap()].raw, + pipeline_layout_guard[pipeline_layout_id.unwrap()] + .raw + .as_ref() + .unwrap(), index as u32, - &bind_group.raw, + bind_group.raw.as_ref().unwrap(), &offsets[..num_dynamic_offsets as usize], ) }; @@ -791,9 +796,9 @@ impl RenderBundle { } RenderCommand::SetPipeline(pipeline_id) => { let pipeline = pipeline_guard.get(pipeline_id).unwrap(); - unsafe { raw.set_render_pipeline(&pipeline.raw) }; + unsafe { raw.set_render_pipeline(pipeline.raw.as_ref().unwrap()) }; - pipeline_layout_id = Some(pipeline.layout_id.value); + pipeline_layout_id = Some(pipeline.layout_id); } RenderCommand::SetIndexBuffer { buffer_id, @@ -808,7 +813,7 @@ impl RenderBundle { .as_ref() .ok_or(ExecutionError::DestroyedBuffer(buffer_id))?; let bb = hal::BufferBinding { - buffer, + buffer: buffer.as_ref(), offset, size, }; @@ -827,7 +832,7 @@ impl RenderBundle { .as_ref() .ok_or(ExecutionError::DestroyedBuffer(buffer_id))?; let bb = hal::BufferBinding { - buffer, + buffer: buffer.as_ref(), offset, size, }; @@ -849,7 +854,12 @@ impl RenderBundle { [(values_offset as usize)..values_end_offset]; unsafe { - raw.set_push_constants(&pipeline_layout.raw, stages, offset, data_slice) + raw.set_push_constants( + pipeline_layout.raw.as_ref().unwrap(), + stages, + offset, + data_slice, + ) } } else { super::push_constant_clear( @@ -858,7 +868,7 @@ impl RenderBundle { |clear_offset, clear_data| { unsafe { raw.set_push_constants( - &pipeline_layout.raw, + pipeline_layout.raw.as_ref().unwrap(), stages, clear_offset, clear_data, @@ -951,11 +961,11 @@ impl RenderBundle { } } -impl Resource for RenderBundle { +impl Resource for RenderBundle { const TYPE: &'static str = "RenderBundle"; - fn life_guard(&self) -> &LifeGuard { - &self.life_guard + fn info(&self) -> &ResourceInfo { + &self.info } } @@ -1102,7 +1112,7 @@ impl PipelineState { ) -> Self { Self { id: pipeline_id, - layout_id: pipeline.layout_id.value, + layout_id: pipeline.layout_id, steps: pipeline.vertex_steps.to_vec(), push_constant_ranges: layout.push_constant_ranges.iter().cloned().collect(), used_bind_groups: layout.bind_group_layout_ids.len(), diff --git a/wgpu-core/src/command/clear.rs b/wgpu-core/src/command/clear.rs index 9cf99ac1d0..1ba02ef255 100644 --- a/wgpu-core/src/command/clear.rs +++ b/wgpu-core/src/command/clear.rs @@ -5,10 +5,13 @@ use crate::device::trace::Command as TraceCommand; use crate::{ command::CommandBuffer, get_lowest_common_denom, - hub::{self, Global, GlobalIdentityHandlerFactory, HalApi, Token}, + global::Global, + hal_api::HalApi, id::{BufferId, CommandEncoderId, DeviceId, TextureId, Valid}, + identity::GlobalIdentityHandlerFactory, init_tracker::{MemoryInitKind, TextureInitRange}, resource::{Texture, TextureClearMode}, + storage::Storage, track::{TextureSelector, TextureTracker}, }; @@ -75,18 +78,20 @@ impl Global { profiling::scope!("CommandEncoder::fill_buffer"); let hub = A::hub(self); - let mut token = Token::root(); - let (mut cmd_buf_guard, mut token) = hub.command_buffers.write(&mut token); - let cmd_buf = CommandBuffer::get_encoder_mut(&mut *cmd_buf_guard, command_encoder_id) + + let cmd_buf_guard = hub.command_buffers.read(); + let cmd_buf = CommandBuffer::get_encoder(&*cmd_buf_guard, command_encoder_id) .map_err(|_| ClearError::InvalidCommandEncoder(command_encoder_id))?; - let (buffer_guard, _) = hub.buffers.read(&mut token); + let mut cmd_buf_data = cmd_buf.data.lock(); + let cmd_buf_data = cmd_buf_data.as_mut().unwrap(); + let buffer_guard = hub.buffers.read(); #[cfg(feature = "trace")] - if let Some(ref mut list) = cmd_buf.commands { + if let Some(ref mut list) = cmd_buf_data.commands { list.push(TraceCommand::ClearBuffer { dst, offset, size }); } - let (dst_buffer, dst_pending) = cmd_buf + let (dst_buffer, dst_pending) = cmd_buf_data .trackers .buffers .set_single(&*buffer_guard, dst, hal::BufferUses::COPY_DST) @@ -127,16 +132,16 @@ impl Global { } // Mark dest as initialized. - cmd_buf - .buffer_memory_init_actions - .extend(dst_buffer.initialization_status.create_action( + cmd_buf_data.buffer_memory_init_actions.extend( + dst_buffer.initialization_status.read().create_action( dst, offset..end, MemoryInitKind::ImplicitlyInitialized, - )); + ), + ); // actual hal barrier & operation let dst_barrier = dst_pending.map(|pending| pending.into_hal(dst_buffer)); - let cmd_buf_raw = cmd_buf.encoder.open(); + let cmd_buf_raw = cmd_buf_data.encoder.open(); unsafe { cmd_buf_raw.transition_buffers(dst_barrier.into_iter()); cmd_buf_raw.clear_buffer(dst_raw, offset..end); @@ -153,16 +158,17 @@ impl Global { profiling::scope!("CommandEncoder::clear_texture"); let hub = A::hub(self); - let mut token = Token::root(); - let (device_guard, mut token) = hub.devices.write(&mut token); - let (mut cmd_buf_guard, mut token) = hub.command_buffers.write(&mut token); - let cmd_buf = CommandBuffer::get_encoder_mut(&mut *cmd_buf_guard, command_encoder_id) + + let cmd_buf_guard = hub.command_buffers.read(); + let cmd_buf = CommandBuffer::get_encoder(&*cmd_buf_guard, command_encoder_id) .map_err(|_| ClearError::InvalidCommandEncoder(command_encoder_id))?; - let (_, mut token) = hub.buffers.read(&mut token); // skip token - let (texture_guard, _) = hub.textures.read(&mut token); + let mut cmd_buf_data = cmd_buf.data.lock(); + let cmd_buf_data = cmd_buf_data.as_mut().unwrap(); + + let texture_guard = hub.textures.read(); #[cfg(feature = "trace")] - if let Some(ref mut list) = cmd_buf.commands { + if let Some(ref mut list) = cmd_buf_data.commands { list.push(TraceCommand::ClearTexture { dst, subresource_range: *subresource_range, @@ -211,8 +217,8 @@ impl Global { }); } - let device = &device_guard[cmd_buf.device_id.value]; - + let device = &cmd_buf.device; + let (encoder, tracker) = cmd_buf_data.open_encoder_and_tracker(); clear_texture( &*texture_guard, Valid(dst), @@ -220,16 +226,16 @@ impl Global { mip_range: subresource_mip_range, layer_range: subresource_layer_range, }, - cmd_buf.encoder.open(), - &mut cmd_buf.trackers.textures, + encoder, + &mut tracker.textures, &device.alignments, - &device.zero_buffer, + device.zero_buffer.as_ref().unwrap(), ) } } pub(crate) fn clear_texture( - storage: &hub::Storage, TextureId>, + storage: &Storage, TextureId>, dst_texture_id: Valid, range: TextureInitRange, encoder: &mut A::CommandEncoder, @@ -241,11 +247,13 @@ pub(crate) fn clear_texture( let dst_raw = dst_texture .inner + .as_ref() + .unwrap() .as_raw() .ok_or(ClearError::InvalidTexture(dst_texture_id.0))?; // Issue the right barrier. - let clear_usage = match dst_texture.clear_mode { + let clear_usage = match *dst_texture.clear_mode.read() { TextureClearMode::BufferCopy => hal::TextureUses::COPY_DST, TextureClearMode::RenderPass { is_color: false, .. @@ -283,7 +291,7 @@ pub(crate) fn clear_texture( } // Record actual clearing - match dst_texture.clear_mode { + match *dst_texture.clear_mode.read() { TextureClearMode::BufferCopy => clear_texture_via_buffer_copies::( &dst_texture.desc, alignments, @@ -302,7 +310,7 @@ pub(crate) fn clear_texture( Ok(()) } -fn clear_texture_via_buffer_copies( +fn clear_texture_via_buffer_copies( texture_desc: &wgt::TextureDescriptor<(), Vec>, alignments: &hal::Alignments, zero_buffer: &A::Buffer, // Buffer of size device::ZERO_BUFFER_SIZE @@ -394,7 +402,7 @@ fn clear_texture_via_buffer_copies( } } -fn clear_texture_via_render_passes( +fn clear_texture_via_render_passes( dst_texture: &Texture, range: TextureInitRange, is_color: bool, @@ -407,6 +415,7 @@ fn clear_texture_via_render_passes( height: dst_texture.desc.size.height, depth_or_array_layers: 1, // Only one layer is cleared at a time. }; + let clear_mode = &dst_texture.clear_mode.read(); for mip_level in range.mip_range { let extent = extent_base.mip_level_size(mip_level, dst_texture.desc.dimension); @@ -415,7 +424,12 @@ fn clear_texture_via_render_passes( let (color_attachments, depth_stencil_attachment) = if is_color { color_attachments_tmp = [Some(hal::ColorAttachment { target: hal::Attachment { - view: dst_texture.get_clear_view(mip_level, depth_or_layer), + view: Texture::get_clear_view( + clear_mode, + &dst_texture.desc, + mip_level, + depth_or_layer, + ), usage: hal::TextureUses::COLOR_TARGET, }, resolve_target: None, @@ -428,7 +442,12 @@ fn clear_texture_via_render_passes( &[][..], Some(hal::DepthStencilAttachment { target: hal::Attachment { - view: dst_texture.get_clear_view(mip_level, depth_or_layer), + view: Texture::get_clear_view( + clear_mode, + &dst_texture.desc, + mip_level, + depth_or_layer, + ), usage: hal::TextureUses::DEPTH_STENCIL_WRITE, }, depth_ops: hal::AttachmentOps::STORE, diff --git a/wgpu-core/src/command/compute.rs b/wgpu-core/src/command/compute.rs index e369823775..1370a9dcbb 100644 --- a/wgpu-core/src/command/compute.rs +++ b/wgpu-core/src/command/compute.rs @@ -11,11 +11,14 @@ use crate::{ }, device::{MissingDownlevelFlags, MissingFeatures}, error::{ErrorFormatter, PrettyError}, - hub::{Global, GlobalIdentityHandlerFactory, HalApi, Storage, Token}, + global::Global, + hal_api::HalApi, id, + identity::GlobalIdentityHandlerFactory, init_tracker::MemoryInitKind, pipeline, resource::{self, Buffer, Texture}, + storage::Storage, track::{Tracker, UsageConflict, UsageScope}, validation::{check_buffer_usage, MissingBufferUsageError}, Label, @@ -333,36 +336,35 @@ impl Global { let init_scope = PassErrorScope::Pass(encoder_id); let hub = A::hub(self); - let mut token = Token::root(); - let (device_guard, mut token) = hub.devices.read(&mut token); - - let (mut cmd_buf_guard, mut token) = hub.command_buffers.write(&mut token); + let cmd_buf_guard = hub.command_buffers.read(); // Spell out the type, to placate rust-analyzer. // https://github.com/rust-lang/rust-analyzer/issues/12247 - let cmd_buf: &mut CommandBuffer = - CommandBuffer::get_encoder_mut(&mut *cmd_buf_guard, encoder_id) - .map_pass_err(init_scope)?; + let cmd_buf: &CommandBuffer = + CommandBuffer::get_encoder(&*cmd_buf_guard, encoder_id).map_pass_err(init_scope)?; + let mut cmd_buf_data = cmd_buf.data.lock(); + let cmd_buf_data = cmd_buf_data.as_mut().unwrap(); // will be reset to true if recording is done without errors - cmd_buf.status = CommandEncoderStatus::Error; - let raw = cmd_buf.encoder.open(); + let (encoder, status, tracker, buffer_memory_init_actions, texture_memory_actions) = + cmd_buf_data.raw_mut(); - let device = &device_guard[cmd_buf.device_id.value]; + *status = CommandEncoderStatus::Error; + let raw = encoder.open(); + let device = &cmd_buf.device; #[cfg(feature = "trace")] - if let Some(ref mut list) = cmd_buf.commands { + if let Some(ref mut list) = cmd_buf_data.commands { list.push(crate::device::trace::Command::RunComputePass { base: BasePass::from_ref(base), }); } - let (_, mut token) = hub.render_bundles.read(&mut token); - let (pipeline_layout_guard, mut token) = hub.pipeline_layouts.read(&mut token); - let (bind_group_guard, mut token) = hub.bind_groups.read(&mut token); - let (pipeline_guard, mut token) = hub.compute_pipelines.read(&mut token); - let (query_set_guard, mut token) = hub.query_sets.read(&mut token); - let (buffer_guard, mut token) = hub.buffers.read(&mut token); - let (texture_guard, _) = hub.textures.read(&mut token); + let pipeline_layout_guard = hub.pipeline_layouts.read(); + let bind_group_guard = hub.bind_groups.read(); + let pipeline_guard = hub.compute_pipelines.read(); + let query_set_guard = hub.query_sets.read(); + let buffer_guard = hub.buffers.read(); + let texture_guard = hub.textures.read(); let mut state = State { binder: Binder::new(), @@ -375,7 +377,7 @@ impl Global { let mut string_offset = 0; let mut active_query = None; - cmd_buf.trackers.set_size( + tracker.set_size( Some(&*buffer_guard), Some(&*texture_guard), None, @@ -421,8 +423,7 @@ impl Global { ); dynamic_offset_count += num_dynamic_offsets as usize; - let bind_group: &BindGroup = cmd_buf - .trackers + let bind_group: &BindGroup = tracker .bind_groups .add_single(&*bind_group_guard, bind_group_id) .ok_or(ComputePassErrorInner::InvalidBindGroup(bind_group_id)) @@ -431,10 +432,12 @@ impl Global { .validate_dynamic_bindings(index, &temp_offsets, &cmd_buf.limits) .map_pass_err(scope)?; - cmd_buf.buffer_memory_init_actions.extend( + buffer_memory_init_actions.extend( bind_group.used_buffer_ranges.iter().filter_map( |action| match buffer_guard.get(action.id) { - Ok(buffer) => buffer.initialization_status.check_action(action), + Ok(buffer) => { + buffer.initialization_status.read().check_action(action) + } Err(_) => None, }, ), @@ -442,9 +445,7 @@ impl Global { for action in bind_group.used_texture_ranges.iter() { pending_discard_init_fixups.extend( - cmd_buf - .texture_memory_actions - .register_init_action(action, &texture_guard), + texture_memory_actions.register_init_action(action, &texture_guard), ); } @@ -456,10 +457,15 @@ impl Global { &temp_offsets, ); if !entries.is_empty() { - let pipeline_layout = - &pipeline_layout_guard[pipeline_layout_id.unwrap()].raw; + let pipeline_layout = pipeline_layout_guard[pipeline_layout_id.unwrap()] + .raw + .as_ref() + .unwrap(); for (i, e) in entries.iter().enumerate() { - let raw_bg = &bind_group_guard[e.group_id.as_ref().unwrap().value].raw; + let raw_bg = bind_group_guard[*e.group_id.as_ref().unwrap()] + .raw + .as_ref() + .unwrap(); unsafe { raw.set_bind_group( pipeline_layout, @@ -476,33 +482,34 @@ impl Global { state.pipeline = Some(pipeline_id); - let pipeline: &pipeline::ComputePipeline = cmd_buf - .trackers + let pipeline: &pipeline::ComputePipeline = tracker .compute_pipelines .add_single(&*pipeline_guard, pipeline_id) .ok_or(ComputePassErrorInner::InvalidPipeline(pipeline_id)) .map_pass_err(scope)?; unsafe { - raw.set_compute_pipeline(&pipeline.raw); + raw.set_compute_pipeline(pipeline.raw.as_ref().unwrap()); } // Rebind resources - if state.binder.pipeline_layout_id != Some(pipeline.layout_id.value) { - let pipeline_layout = &pipeline_layout_guard[pipeline.layout_id.value]; + if state.binder.pipeline_layout_id != Some(pipeline.layout_id) { + let pipeline_layout = &pipeline_layout_guard[pipeline.layout_id]; let (start_index, entries) = state.binder.change_pipeline_layout( &*pipeline_layout_guard, - pipeline.layout_id.value, + pipeline.layout_id, &pipeline.late_sized_buffer_groups, ); if !entries.is_empty() { for (i, e) in entries.iter().enumerate() { - let raw_bg = - &bind_group_guard[e.group_id.as_ref().unwrap().value].raw; + let raw_bg = bind_group_guard[*e.group_id.as_ref().unwrap()] + .raw + .as_ref() + .unwrap(); unsafe { raw.set_bind_group( - &pipeline_layout.raw, + pipeline_layout.raw.as_ref().unwrap(), start_index as u32 + i as u32, raw_bg, &e.dynamic_offsets, @@ -523,7 +530,7 @@ impl Global { size_bytes, |clear_offset, clear_data| unsafe { raw.set_push_constants( - &pipeline_layout.raw, + pipeline_layout.raw.as_ref().unwrap(), wgt::ShaderStages::COMPUTE, clear_offset, clear_data, @@ -566,7 +573,7 @@ impl Global { unsafe { raw.set_push_constants( - &pipeline_layout.raw, + pipeline_layout.raw.as_ref().unwrap(), wgt::ShaderStages::COMPUTE, offset, data_slice, @@ -583,15 +590,15 @@ impl Global { pending_discard_init_fixups.drain(..), raw, &texture_guard, - &mut cmd_buf.trackers.textures, - device, + &mut tracker.textures, + &device, ); state.is_ready().map_pass_err(scope)?; state .flush_states( raw, - &mut cmd_buf.trackers, + tracker, &*bind_group_guard, &*buffer_guard, &*texture_guard, @@ -656,8 +663,8 @@ impl Global { let stride = 3 * 4; // 3 integers, x/y/z group size - cmd_buf.buffer_memory_init_actions.extend( - indirect_buffer.initialization_status.create_action( + buffer_memory_init_actions.extend( + indirect_buffer.initialization_status.read().create_action( buffer_id, offset..(offset + stride), MemoryInitKind::NeedsInitializedMemory, @@ -667,7 +674,7 @@ impl Global { state .flush_states( raw, - &mut cmd_buf.trackers, + tracker, &*bind_group_guard, &*buffer_guard, &*texture_guard, @@ -717,8 +724,7 @@ impl Global { .require_features(wgt::Features::TIMESTAMP_QUERY_INSIDE_PASSES) .map_pass_err(scope)?; - let query_set: &resource::QuerySet = cmd_buf - .trackers + let query_set: &resource::QuerySet = tracker .query_sets .add_single(&*query_set_guard, query_set_id) .ok_or(ComputePassErrorInner::InvalidQuerySet(query_set_id)) @@ -734,8 +740,7 @@ impl Global { } => { let scope = PassErrorScope::BeginPipelineStatisticsQuery; - let query_set: &resource::QuerySet = cmd_buf - .trackers + let query_set: &resource::QuerySet = tracker .query_sets .add_single(&*query_set_guard, query_set_id) .ok_or(ComputePassErrorInner::InvalidQuerySet(query_set_id)) @@ -763,7 +768,7 @@ impl Global { unsafe { raw.end_compute_pass(); } - cmd_buf.status = CommandEncoderStatus::Recording; + *status = CommandEncoderStatus::Recording; // There can be entries left in pending_discard_init_fixups if a bind // group was set, but not used (i.e. no Dispatch occurred) @@ -774,8 +779,8 @@ impl Global { pending_discard_init_fixups.into_iter(), raw, &texture_guard, - &mut cmd_buf.trackers.textures, - device, + &mut tracker.textures, + &device, ); Ok(()) diff --git a/wgpu-core/src/command/memory_init.rs b/wgpu-core/src/command/memory_init.rs index 52735fec51..75ffcfcbdd 100644 --- a/wgpu-core/src/command/memory_init.rs +++ b/wgpu-core/src/command/memory_init.rs @@ -4,10 +4,11 @@ use hal::CommandEncoder; use crate::{ device::Device, - hub::{HalApi, Storage}, + hal_api::HalApi, id::{self, TextureId}, init_tracker::*, resource::{Buffer, Texture}, + storage::Storage, track::{TextureTracker, Tracker}, FastHashMap, }; @@ -49,7 +50,7 @@ impl CommandBufferTextureMemoryActions { // Returns previously discarded surface that need to be initialized *immediately* now. // Only returns a non-empty list if action is MemoryInitKind::NeedsInitializedMemory. #[must_use] - pub(crate) fn register_init_action( + pub(crate) fn register_init_action( &mut self, action: &TextureInitTrackerAction, texture_guard: &Storage, TextureId>, @@ -65,7 +66,7 @@ impl CommandBufferTextureMemoryActions { // mean splitting up the action which is more than we'd win here. self.init_actions .extend(match texture_guard.get(action.id) { - Ok(texture) => texture.initialization_status.check_action(action), + Ok(texture) => texture.initialization_status.read().check_action(action), Err(_) => return immediately_necessary_clears, // texture no longer exists }); @@ -108,7 +109,7 @@ impl CommandBufferTextureMemoryActions { // Shortcut for register_init_action when it is known that the action is an // implicit init, not requiring any immediate resource init. - pub(crate) fn register_implicit_init( + pub(crate) fn register_implicit_init( &mut self, id: id::Valid, range: TextureInitRange, @@ -151,7 +152,7 @@ pub(crate) fn fixup_discarded_surfaces< encoder, texture_tracker, &device.alignments, - &device.zero_buffer, + device.zero_buffer.as_ref().unwrap(), ) .unwrap(); } @@ -163,7 +164,7 @@ impl BakedCommands { pub(crate) fn initialize_buffer_memory( &mut self, device_tracker: &mut Tracker, - buffer_guard: &mut Storage, id::BufferId>, + buffer_guard: &Storage, id::BufferId>, ) -> Result<(), DestroyedBufferError> { // Gather init ranges for each buffer so we can collapse them. // It is not possible to do this at an earlier point since previously @@ -171,8 +172,9 @@ impl BakedCommands { let mut uninitialized_ranges_per_buffer = FastHashMap::default(); for buffer_use in self.buffer_memory_init_actions.drain(..) { let buffer = buffer_guard - .get_mut(buffer_use.id) + .get(buffer_use.id) .map_err(|_| DestroyedBufferError(buffer_use.id))?; + let mut initialization_status = buffer.initialization_status.write(); // align the end to 4 let end_remainder = buffer_use.range.end % wgt::COPY_BUFFER_ALIGNMENT; @@ -181,9 +183,7 @@ impl BakedCommands { } else { buffer_use.range.end + wgt::COPY_BUFFER_ALIGNMENT - end_remainder }; - let uninitialized_ranges = buffer - .initialization_status - .drain(buffer_use.range.start..end); + let uninitialized_ranges = initialization_status.drain(buffer_use.range.start..end); match buffer_use.kind { MemoryInitKind::ImplicitlyInitialized => {} @@ -226,14 +226,14 @@ impl BakedCommands { .1; let buffer = buffer_guard - .get_mut(buffer_id) + .get(buffer_id) .map_err(|_| DestroyedBufferError(buffer_id))?; let raw_buf = buffer.raw.as_ref().ok_or(DestroyedBufferError(buffer_id))?; unsafe { self.encoder.transition_buffers( transition - .map(|pending| pending.into_hal(buffer)) + .map(|pending| pending.into_hal(&buffer)) .into_iter(), ); } @@ -269,18 +269,17 @@ impl BakedCommands { pub(crate) fn initialize_texture_memory( &mut self, device_tracker: &mut Tracker, - texture_guard: &mut Storage, TextureId>, + texture_guard: &Storage, TextureId>, device: &Device, ) -> Result<(), DestroyedTextureError> { let mut ranges: Vec = Vec::new(); for texture_use in self.texture_memory_actions.drain_init_actions() { let texture = texture_guard - .get_mut(texture_use.id) + .get(texture_use.id) .map_err(|_| DestroyedTextureError(texture_use.id))?; - + let mut initialization_status = texture.initialization_status.write(); let use_range = texture_use.range; - let affected_mip_trackers = texture - .initialization_status + let affected_mip_trackers = initialization_status .mips .iter_mut() .enumerate() @@ -314,7 +313,7 @@ impl BakedCommands { &mut self.encoder, &mut device_tracker.textures, &device.alignments, - &device.zero_buffer, + device.zero_buffer.as_ref().unwrap(), ) .unwrap(); } @@ -325,10 +324,11 @@ impl BakedCommands { // after its execution. for surface_discard in self.texture_memory_actions.discards.iter() { let texture = texture_guard - .get_mut(surface_discard.texture) + .get(surface_discard.texture) .map_err(|_| DestroyedTextureError(surface_discard.texture))?; texture .initialization_status + .write() .discard(surface_discard.mip_level, surface_discard.layer); } diff --git a/wgpu-core/src/command/mod.rs b/wgpu-core/src/command/mod.rs index 899cf30d59..e6f9364913 100644 --- a/wgpu-core/src/command/mod.rs +++ b/wgpu-core/src/command/mod.rs @@ -9,6 +9,7 @@ mod render; mod transfer; use std::slice; +use std::sync::Arc; pub(crate) use self::clear::clear_texture; pub use self::{ @@ -17,17 +18,24 @@ pub use self::{ use self::memory_init::CommandBufferTextureMemoryActions; +use crate::device::Device; use crate::error::{ErrorFormatter, PrettyError}; +use crate::id::CommandBufferId; use crate::init_tracker::BufferInitTrackerAction; +use crate::resource::{Resource, ResourceInfo}; use crate::track::{Tracker, UsageScope}; use crate::{ - hub::{Global, GlobalIdentityHandlerFactory, HalApi, Storage, Token}, + global::Global, + hal_api::HalApi, id, + identity::GlobalIdentityHandlerFactory, resource::{Buffer, Texture}, - Label, Stored, + storage::Storage, + Label, }; use hal::CommandEncoder as _; +use parking_lot::Mutex; use thiserror::Error; #[cfg(feature = "trace")] @@ -36,13 +44,13 @@ use crate::device::trace::Command as TraceCommand; const PUSH_CONSTANT_CLEAR_ARRAY: &[u32] = &[0_u32; 64]; #[derive(Debug)] -enum CommandEncoderStatus { +pub(crate) enum CommandEncoderStatus { Recording, Finished, Error, } -struct CommandEncoder { +pub(crate) struct CommandEncoder { raw: A::CommandEncoder, list: Vec, is_open: bool, @@ -92,49 +100,110 @@ pub struct BakedCommands { pub(crate) struct DestroyedBufferError(pub id::BufferId); pub(crate) struct DestroyedTextureError(pub id::TextureId); -pub struct CommandBuffer { +pub struct CommandBufferMutable { encoder: CommandEncoder, status: CommandEncoderStatus, - pub(crate) device_id: Stored, pub(crate) trackers: Tracker, buffer_memory_init_actions: Vec, texture_memory_actions: CommandBufferTextureMemoryActions, - limits: wgt::Limits, - support_clear_texture: bool, #[cfg(feature = "trace")] pub(crate) commands: Option>, } +impl CommandBufferMutable { + pub(crate) fn open_encoder_and_tracker(&mut self) -> (&mut A::CommandEncoder, &mut Tracker) { + let encoder = self.encoder.open(); + let tracker = &mut self.trackers; + (encoder, tracker) + } + pub(crate) fn raw_mut( + &mut self, + ) -> ( + &mut CommandEncoder, + &mut CommandEncoderStatus, + &mut Tracker, + &mut Vec, + &mut CommandBufferTextureMemoryActions, + ) { + let encoder = &mut self.encoder; + let status = &mut self.status; + let tracker = &mut self.trackers; + let buffer_memory_init_actions = &mut self.buffer_memory_init_actions; + let texture_memory_actions = &mut self.texture_memory_actions; + ( + encoder, + status, + tracker, + buffer_memory_init_actions, + texture_memory_actions, + ) + } +} + +pub struct CommandBuffer { + pub(crate) device: Arc>, + limits: wgt::Limits, + support_clear_texture: bool, + pub(crate) info: ResourceInfo, + pub(crate) data: Mutex>>, +} + +impl Drop for CommandBuffer { + fn drop(&mut self) { + if self.data.lock().is_none() { + return; + } + let mut baked = self.into_baked(); + unsafe { + baked.encoder.reset_all(baked.list.into_iter()); + } + unsafe { + use hal::Device; + self.device + .raw + .as_ref() + .unwrap() + .destroy_command_encoder(baked.encoder); + } + } +} + impl CommandBuffer { pub(crate) fn new( encoder: A::CommandEncoder, - device_id: Stored, - limits: wgt::Limits, - _downlevel: wgt::DownlevelCapabilities, - features: wgt::Features, + device: &Arc>, #[cfg(feature = "trace")] enable_tracing: bool, label: &Label, ) -> Self { + let label = crate::LabelHelpers::borrow_option(label).map(|s| s.to_string()); CommandBuffer { - encoder: CommandEncoder { - raw: encoder, - is_open: false, - list: Vec::new(), - label: crate::LabelHelpers::borrow_option(label).map(|s| s.to_string()), - }, - status: CommandEncoderStatus::Recording, - device_id, - trackers: Tracker::new(), - buffer_memory_init_actions: Default::default(), - texture_memory_actions: Default::default(), - limits, - support_clear_texture: features.contains(wgt::Features::CLEAR_TEXTURE), - #[cfg(feature = "trace")] - commands: if enable_tracing { - Some(Vec::new()) - } else { - None - }, + device: device.clone(), + limits: device.limits.clone(), + support_clear_texture: device.features.contains(wgt::Features::CLEAR_TEXTURE), + info: ResourceInfo::new( + label + .as_ref() + .unwrap_or(&String::from("")) + .as_str(), + ), + data: Mutex::new(Some(CommandBufferMutable { + encoder: CommandEncoder { + raw: encoder, + is_open: false, + list: Vec::new(), + label, + }, + status: CommandEncoderStatus::Recording, + trackers: Tracker::new(), + buffer_memory_init_actions: Default::default(), + texture_memory_actions: Default::default(), + #[cfg(feature = "trace")] + commands: if enable_tracing { + Some(Vec::new()) + } else { + None + }, + })), } } @@ -179,12 +248,12 @@ impl CommandBuffer { profiling::scope!("drain_barriers"); let buffer_barriers = base.buffers.drain().map(|pending| { - let buf = unsafe { &buffer_guard.get_unchecked(pending.id) }; - pending.into_hal(buf) + let buf = unsafe { buffer_guard.get_unchecked(pending.id) }; + pending.into_hal(&buf) }); let texture_barriers = base.textures.drain().map(|pending| { let tex = unsafe { texture_guard.get_unchecked(pending.id) }; - pending.into_hal(tex) + pending.into_hal(&tex) }); unsafe { @@ -195,13 +264,13 @@ impl CommandBuffer { } impl CommandBuffer { - fn get_encoder_mut( - storage: &mut Storage, + fn get_encoder( + storage: &Storage, id: id::CommandEncoderId, - ) -> Result<&mut Self, CommandEncoderError> { - match storage.get_mut(id) { - Ok(cmd_buf) => match cmd_buf.status { - CommandEncoderStatus::Recording => Ok(cmd_buf), + ) -> Result<&Self, CommandEncoderError> { + match storage.get(id) { + Ok(cmd_buf) => match cmd_buf.data.lock().as_ref().unwrap().status { + CommandEncoderStatus::Recording => Ok(&cmd_buf), CommandEncoderStatus::Finished => Err(CommandEncoderError::NotRecording), CommandEncoderStatus::Error => Err(CommandEncoderError::Invalid), }, @@ -210,32 +279,45 @@ impl CommandBuffer { } pub fn is_finished(&self) -> bool { - match self.status { + match self.data.lock().as_ref().unwrap().status { CommandEncoderStatus::Finished => true, _ => false, } } - pub(crate) fn into_baked(self) -> BakedCommands { + pub(crate) fn into_baked(&mut self) -> BakedCommands { + let data = self.data.lock().take().unwrap(); BakedCommands { - encoder: self.encoder.raw, - list: self.encoder.list, - trackers: self.trackers, - buffer_memory_init_actions: self.buffer_memory_init_actions, - texture_memory_actions: self.texture_memory_actions, + encoder: data.encoder.raw, + list: data.encoder.list, + trackers: data.trackers, + buffer_memory_init_actions: data.buffer_memory_init_actions, + texture_memory_actions: data.texture_memory_actions, + } + } + + pub(crate) fn from_arc_into_baked(self: Arc) -> BakedCommands { + if let Ok(mut command_buffer) = Arc::try_unwrap(self) { + command_buffer.into_baked() + } else { + panic!("CommandBuffer cannot be destroyed because is still in use"); } } } -impl crate::hub::Resource for CommandBuffer { +impl Resource for CommandBuffer { const TYPE: &'static str = "CommandBuffer"; - fn life_guard(&self) -> &crate::LifeGuard { - unreachable!() + fn info(&self) -> &ResourceInfo { + &self.info } - fn label(&self) -> &str { - self.encoder.label.as_ref().map_or("", |s| s.as_str()) + fn label(&self) -> String { + if let Some(label) = &self.data.lock().as_ref().unwrap().encoder.label { + label.clone() + } else { + String::new() + } } } @@ -340,29 +422,31 @@ impl Global { &self, encoder_id: id::CommandEncoderId, _desc: &wgt::CommandBufferDescriptor, } -impl QueryResetMap { +impl QueryResetMap { pub fn new() -> Self { Self { map: FastHashMap::default(), @@ -65,7 +68,10 @@ impl QueryResetMap { // We've hit the end of a run, dispatch a reset (Some(start), false) => { run_start = None; - unsafe { raw_encoder.reset_queries(&query_set.raw, start..idx as u32) }; + unsafe { + raw_encoder + .reset_queries(query_set.raw.as_ref().unwrap(), start..idx as u32) + }; } // We're starting a run (None, true) => { @@ -204,7 +210,7 @@ impl QuerySet { }); } - Ok(&self.raw) + Ok(self.raw.as_ref().unwrap()) } pub(super) fn validate_and_write_timestamp( @@ -225,7 +231,8 @@ impl QuerySet { unsafe { // If we don't have a reset state tracker which can defer resets, we must reset now. if needs_reset { - raw_encoder.reset_queries(&self.raw, query_index..(query_index + 1)); + raw_encoder + .reset_queries(self.raw.as_ref().unwrap(), query_index..(query_index + 1)); } raw_encoder.write_timestamp(query_set, query_index); } @@ -259,7 +266,8 @@ impl QuerySet { unsafe { // If we don't have a reset state tracker which can defer resets, we must reset now. if needs_reset { - raw_encoder.reset_queries(&self.raw, query_index..(query_index + 1)); + raw_encoder + .reset_queries(self.raw.as_ref().unwrap(), query_index..(query_index + 1)); } raw_encoder.begin_query(query_set, query_index); } @@ -277,7 +285,7 @@ pub(super) fn end_pipeline_statistics_query( // We can unwrap here as the validity was validated when the active query was set let query_set = storage.get(query_set_id).unwrap(); - unsafe { raw_encoder.end_query(&query_set.raw, query_index) }; + unsafe { raw_encoder.end_query(query_set.raw.as_ref().unwrap(), query_index) }; Ok(()) } else { @@ -293,24 +301,25 @@ impl Global { query_index: u32, ) -> Result<(), QueryError> { let hub = A::hub(self); - let mut token = Token::root(); - let (mut cmd_buf_guard, mut token) = hub.command_buffers.write(&mut token); - let (query_set_guard, _) = hub.query_sets.read(&mut token); + let cmd_buf_guard = hub.command_buffers.read(); - let cmd_buf = CommandBuffer::get_encoder_mut(&mut cmd_buf_guard, command_encoder_id)?; - let raw_encoder = cmd_buf.encoder.open(); + let cmd_buf = CommandBuffer::get_encoder(&cmd_buf_guard, command_encoder_id)?; + let mut cmd_buf_data = cmd_buf.data.lock(); + let cmd_buf_data = cmd_buf_data.as_mut().unwrap(); + let (encoder, _, tracker, _, _) = cmd_buf_data.raw_mut(); + let raw_encoder = encoder.open(); #[cfg(feature = "trace")] - if let Some(ref mut list) = cmd_buf.commands { + if let Some(ref mut list) = cmd_buf_data.commands { list.push(TraceCommand::WriteTimestamp { query_set_id, query_index, }); } - let query_set = cmd_buf - .trackers + let query_set_guard = hub.query_sets.read(); + let query_set = tracker .query_sets .add_single(&*query_set_guard, query_set_id) .ok_or(QueryError::InvalidQuerySet(query_set_id))?; @@ -330,17 +339,17 @@ impl Global { destination_offset: BufferAddress, ) -> Result<(), QueryError> { let hub = A::hub(self); - let mut token = Token::root(); - let (mut cmd_buf_guard, mut token) = hub.command_buffers.write(&mut token); - let (query_set_guard, mut token) = hub.query_sets.read(&mut token); - let (buffer_guard, _) = hub.buffers.read(&mut token); + let cmd_buf_guard = hub.command_buffers.read(); - let cmd_buf = CommandBuffer::get_encoder_mut(&mut cmd_buf_guard, command_encoder_id)?; - let raw_encoder = cmd_buf.encoder.open(); + let cmd_buf = CommandBuffer::get_encoder(&cmd_buf_guard, command_encoder_id)?; + let mut cmd_buf_data = cmd_buf.data.lock(); + let cmd_buf_data = cmd_buf_data.as_mut().unwrap(); + let (encoder, _, tracker, buffer_memory_init_actions, _) = cmd_buf_data.raw_mut(); + let raw_encoder = encoder.open(); #[cfg(feature = "trace")] - if let Some(ref mut list) = cmd_buf.commands { + if let Some(ref mut list) = cmd_buf_data.commands { list.push(TraceCommand::ResolveQuerySet { query_set_id, start_query, @@ -353,15 +362,14 @@ impl Global { if destination_offset % wgt::QUERY_RESOLVE_BUFFER_ALIGNMENT != 0 { return Err(QueryError::Resolve(ResolveError::BufferOffsetAlignment)); } - - let query_set = cmd_buf - .trackers + let query_set_guard = hub.query_sets.read(); + let query_set = tracker .query_sets .add_single(&*query_set_guard, query_set_id) .ok_or(QueryError::InvalidQuerySet(query_set_id))?; - let (dst_buffer, dst_pending) = cmd_buf - .trackers + let buffer_guard = hub.buffers.read(); + let (dst_buffer, dst_pending) = tracker .buffers .set_single(&*buffer_guard, destination, hal::BufferUses::COPY_DST) .ok_or(QueryError::InvalidBuffer(destination))?; @@ -404,18 +412,16 @@ impl Global { .into()); } - cmd_buf - .buffer_memory_init_actions - .extend(dst_buffer.initialization_status.create_action( - destination, - buffer_start_offset..buffer_end_offset, - MemoryInitKind::ImplicitlyInitialized, - )); + buffer_memory_init_actions.extend(dst_buffer.initialization_status.read().create_action( + destination, + buffer_start_offset..buffer_end_offset, + MemoryInitKind::ImplicitlyInitialized, + )); unsafe { raw_encoder.transition_buffers(dst_barrier.into_iter()); raw_encoder.copy_query_results( - &query_set.raw, + query_set.raw.as_ref().unwrap(), start_query..end_query, dst_buffer.raw.as_ref().unwrap(), destination_offset, diff --git a/wgpu-core/src/command/render.rs b/wgpu-core/src/command/render.rs index 5de2a48ff6..05d44d8063 100644 --- a/wgpu-core/src/command/render.rs +++ b/wgpu-core/src/command/render.rs @@ -14,16 +14,19 @@ use crate::{ RenderPassCompatibilityCheckType, RenderPassCompatibilityError, RenderPassContext, }, error::{ErrorFormatter, PrettyError}, - hub::{Global, GlobalIdentityHandlerFactory, HalApi, Storage, Token}, + global::Global, + hal_api::HalApi, id, + identity::GlobalIdentityHandlerFactory, init_tracker::{MemoryInitKind, TextureInitRange, TextureInitTrackerAction}, pipeline::{self, PipelineFlags}, resource::{self, Buffer, Texture, TextureView, TextureViewNotRenderableReason}, - track::{TextureSelector, UsageConflict, UsageScope}, + storage::Storage, + track::{TextureSelector, Tracker, UsageConflict, UsageScope}, validation::{ check_buffer_usage, check_texture_usage, MissingBufferUsageError, MissingTextureUsageError, }, - Label, Stored, + Label, }; use arrayvec::ArrayVec; @@ -41,7 +44,9 @@ use serde::Serialize; use std::{borrow::Cow, fmt, iter, marker::PhantomData, mem, num::NonZeroU32, ops::Range, str}; -use super::{memory_init::TextureSurfaceDiscard, CommandBufferTextureMemoryActions}; +use super::{ + memory_init::TextureSurfaceDiscard, CommandBufferTextureMemoryActions, CommandEncoder, +}; /// Operation to perform to the output attachment at the start of a renderpass. #[repr(C)] @@ -637,12 +642,12 @@ where } struct RenderAttachment<'a> { - texture_id: &'a Stored, + texture_id: &'a id::Valid, selector: &'a TextureSelector, usage: hal::TextureUses, } -impl TextureView { +impl TextureView { fn to_render_attachment(&self, usage: hal::TextureUses) -> RenderAttachment { RenderAttachment { texture_id: &self.parent_id, @@ -681,7 +686,7 @@ impl<'a, A: HalApi> RenderPassInfo<'a, A> { if channel.load_op == LoadOp::Load { pending_discard_init_fixups.extend(texture_memory_actions.register_init_action( &TextureInitTrackerAction { - id: view.parent_id.value.0, + id: view.parent_id.0, range: TextureInitRange::from(view.selector.clone()), // Note that this is needed even if the target is discarded, kind: MemoryInitKind::NeedsInitializedMemory, @@ -691,7 +696,7 @@ impl<'a, A: HalApi> RenderPassInfo<'a, A> { } else if channel.store_op == StoreOp::Store { // Clear + Store texture_memory_actions.register_implicit_init( - view.parent_id.value, + view.parent_id, TextureInitRange::from(view.selector.clone()), texture_guard, ); @@ -701,7 +706,7 @@ impl<'a, A: HalApi> RenderPassInfo<'a, A> { // discard right away be alright since the texture can't be used // during the pass anyways texture_memory_actions.discard(TextureSurfaceDiscard { - texture: view.parent_id.value.0, + texture: view.parent_id.0, mip_level: view.selector.mips.start, layer: view.selector.layers.start, }); @@ -713,7 +718,9 @@ impl<'a, A: HalApi> RenderPassInfo<'a, A> { label: Option<&str>, color_attachments: &[Option], depth_stencil_attachment: Option<&RenderPassDepthStencilAttachment>, - cmd_buf: &mut CommandBuffer, + encoder: &mut CommandEncoder, + tracker: &mut Tracker, + texture_memory_actions: &mut CommandBufferTextureMemoryActions, view_guard: &'a Storage, id::TextureViewId>, buffer_guard: &'a Storage, id::BufferId>, texture_guard: &'a Storage, id::TextureId>, @@ -806,8 +813,7 @@ impl<'a, A: HalApi> RenderPassInfo<'a, A> { let mut depth_stencil = None; if let Some(at) = depth_stencil_attachment { - let view: &TextureView = cmd_buf - .trackers + let view: &TextureView = tracker .views .add_single(view_guard, at.view) .ok_or(RenderPassErrorInner::InvalidAttachment(at.view))?; @@ -827,7 +833,7 @@ impl<'a, A: HalApi> RenderPassInfo<'a, A> { { Self::add_pass_texture_init_actions( &at.depth, - &mut cmd_buf.texture_memory_actions, + texture_memory_actions, view, texture_guard, &mut pending_discard_init_fixups, @@ -835,7 +841,7 @@ impl<'a, A: HalApi> RenderPassInfo<'a, A> { } else if !ds_aspects.contains(hal::FormatAspects::DEPTH) { Self::add_pass_texture_init_actions( &at.stencil, - &mut cmd_buf.texture_memory_actions, + texture_memory_actions, view, texture_guard, &mut pending_discard_init_fixups, @@ -866,9 +872,9 @@ impl<'a, A: HalApi> RenderPassInfo<'a, A> { at.depth.load_op == LoadOp::Load || at.stencil.load_op == LoadOp::Load; if need_init_beforehand { pending_discard_init_fixups.extend( - cmd_buf.texture_memory_actions.register_init_action( + texture_memory_actions.register_init_action( &TextureInitTrackerAction { - id: view.parent_id.value.0, + id: view.parent_id.0, range: TextureInitRange::from(view.selector.clone()), kind: MemoryInitKind::NeedsInitializedMemory, }, @@ -887,8 +893,8 @@ impl<'a, A: HalApi> RenderPassInfo<'a, A> { // (possible optimization: Delay and potentially drop this zeroing) if at.depth.store_op != at.stencil.store_op { if !need_init_beforehand { - cmd_buf.texture_memory_actions.register_implicit_init( - view.parent_id.value, + texture_memory_actions.register_implicit_init( + view.parent_id, TextureInitRange::from(view.selector.clone()), texture_guard, ); @@ -904,7 +910,7 @@ impl<'a, A: HalApi> RenderPassInfo<'a, A> { } else if at.depth.store_op == StoreOp::Discard { // Both are discarded using the regular path. discarded_surfaces.push(TextureSurfaceDiscard { - texture: view.parent_id.value.0, + texture: view.parent_id.0, mip_level: view.selector.mips.start, layer: view.selector.layers.start, }); @@ -922,7 +928,7 @@ impl<'a, A: HalApi> RenderPassInfo<'a, A> { depth_stencil = Some(hal::DepthStencilAttachment { target: hal::Attachment { - view: &view.raw, + view: view.raw.as_ref().unwrap().as_ref(), usage, }, depth_ops: at.depth.hal_ops(), @@ -938,8 +944,7 @@ impl<'a, A: HalApi> RenderPassInfo<'a, A> { colors.push(None); continue; }; - let color_view: &TextureView = cmd_buf - .trackers + let color_view: &TextureView = tracker .views .add_single(view_guard, at.view) .ok_or(RenderPassErrorInner::InvalidAttachment(at.view))?; @@ -964,7 +969,7 @@ impl<'a, A: HalApi> RenderPassInfo<'a, A> { Self::add_pass_texture_init_actions( &at.channel, - &mut cmd_buf.texture_memory_actions, + texture_memory_actions, color_view, texture_guard, &mut pending_discard_init_fixups, @@ -974,8 +979,7 @@ impl<'a, A: HalApi> RenderPassInfo<'a, A> { let mut hal_resolve_target = None; if let Some(resolve_target) = at.resolve_target { - let resolve_view: &TextureView = cmd_buf - .trackers + let resolve_view: &TextureView = tracker .views .add_single(view_guard, resolve_target) .ok_or(RenderPassErrorInner::InvalidAttachment(resolve_target))?; @@ -1026,8 +1030,8 @@ impl<'a, A: HalApi> RenderPassInfo<'a, A> { }); } - cmd_buf.texture_memory_actions.register_implicit_init( - resolve_view.parent_id.value, + texture_memory_actions.register_implicit_init( + resolve_view.parent_id, TextureInitRange::from(resolve_view.selector.clone()), texture_guard, ); @@ -1035,14 +1039,14 @@ impl<'a, A: HalApi> RenderPassInfo<'a, A> { .push(resolve_view.to_render_attachment(hal::TextureUses::COLOR_TARGET)); hal_resolve_target = Some(hal::Attachment { - view: &resolve_view.raw, + view: resolve_view.raw.as_ref().unwrap().as_ref(), usage: hal::TextureUses::COLOR_TARGET, }); } colors.push(Some(hal::ColorAttachment { target: hal::Attachment { - view: &color_view.raw, + view: color_view.raw.as_ref().unwrap(), usage: hal::TextureUses::COLOR_TARGET, }, resolve_target: hal_resolve_target, @@ -1087,7 +1091,7 @@ impl<'a, A: HalApi> RenderPassInfo<'a, A> { multiview, }; unsafe { - cmd_buf.encoder.raw.begin_render_pass(&hal_desc); + encoder.raw.begin_render_pass(&hal_desc); }; Ok(Self { @@ -1115,10 +1119,10 @@ impl<'a, A: HalApi> RenderPassInfo<'a, A> { } for ra in self.render_attachments { - if !texture_guard.contains(ra.texture_id.value.0) { + if !texture_guard.contains(ra.texture_id.0) { return Err(RenderPassErrorInner::SurfaceTextureDropped); } - let texture = &texture_guard[ra.texture_id.value]; + let texture = &texture_guard[*ra.texture_id]; check_texture_usage(texture.desc.usage, TextureUsages::RENDER_ATTACHMENT)?; // the tracker set of the pass is always in "extend" mode @@ -1127,9 +1131,8 @@ impl<'a, A: HalApi> RenderPassInfo<'a, A> { .textures .merge_single( texture_guard, - ra.texture_id.value, + *ra.texture_id, Some(ra.selector.clone()), - &ra.texture_id.ref_count, ra.usage, ) .map_err(UsageConflict::from)? @@ -1164,7 +1167,7 @@ impl<'a, A: HalApi> RenderPassInfo<'a, A> { color_attachments: &[], depth_stencil_attachment: Some(hal::DepthStencilAttachment { target: hal::Attachment { - view: &view.raw, + view: view.raw.as_ref().unwrap().as_ref(), usage: hal::TextureUses::DEPTH_STENCIL_WRITE, }, depth_ops, @@ -1211,24 +1214,25 @@ impl Global { let init_scope = PassErrorScope::Pass(encoder_id); let hub = A::hub(self); - let mut token = Token::root(); - let (device_guard, mut token) = hub.devices.read(&mut token); let (scope, query_reset_state, pending_discard_init_fixups) = { - let (mut cmb_guard, mut token) = hub.command_buffers.write(&mut token); + let cmb_guard = hub.command_buffers.read(); // Spell out the type, to placate rust-analyzer. // https://github.com/rust-lang/rust-analyzer/issues/12247 - let cmd_buf: &mut CommandBuffer = - CommandBuffer::get_encoder_mut(&mut *cmb_guard, encoder_id) - .map_pass_err(init_scope)?; + let cmd_buf: &CommandBuffer = + CommandBuffer::get_encoder(&*cmb_guard, encoder_id).map_pass_err(init_scope)?; + let mut cmd_buf_data = cmd_buf.data.lock(); + let cmd_buf_data = cmd_buf_data.as_mut().unwrap(); + let (encoder, status, tracker, buffer_memory_init_actions, texture_memory_actions) = + cmd_buf_data.raw_mut(); // close everything while the new command encoder is filled - cmd_buf.encoder.close(); + encoder.close(); // will be reset to true if recording is done without errors - cmd_buf.status = CommandEncoderStatus::Error; + *status = CommandEncoderStatus::Error; #[cfg(feature = "trace")] - if let Some(ref mut list) = cmd_buf.commands { + if let Some(ref mut list) = cmd_buf_data.commands { list.push(crate::device::trace::Command::RunRenderPass { base: BasePass::from_ref(base), target_colors: color_attachments.to_vec(), @@ -1236,17 +1240,17 @@ impl Global { }); } - let device = &device_guard[cmd_buf.device_id.value]; - cmd_buf.encoder.open_pass(base.label); + let device = &cmd_buf.device; + encoder.open_pass(base.label); - let (bundle_guard, mut token) = hub.render_bundles.read(&mut token); - let (pipeline_layout_guard, mut token) = hub.pipeline_layouts.read(&mut token); - let (bind_group_guard, mut token) = hub.bind_groups.read(&mut token); - let (render_pipeline_guard, mut token) = hub.render_pipelines.read(&mut token); - let (query_set_guard, mut token) = hub.query_sets.read(&mut token); - let (buffer_guard, mut token) = hub.buffers.read(&mut token); - let (texture_guard, mut token) = hub.textures.read(&mut token); - let (view_guard, _) = hub.texture_views.read(&mut token); + let bundle_guard = hub.render_bundles.read(); + let pipeline_layout_guard = hub.pipeline_layouts.read(); + let bind_group_guard = hub.bind_groups.read(); + let render_pipeline_guard = hub.render_pipelines.read(); + let query_set_guard = hub.query_sets.read(); + let buffer_guard = hub.buffers.read(); + let texture_guard = hub.textures.read(); + let view_guard = hub.texture_views.read(); log::trace!( "Encoding render pass begin in command buffer {:?}", @@ -1254,18 +1258,20 @@ impl Global { ); let mut info = RenderPassInfo::start( - device, + &device, base.label, color_attachments, depth_stencil_attachment, - cmd_buf, + encoder, + tracker, + texture_memory_actions, &*view_guard, &*buffer_guard, &*texture_guard, ) .map_pass_err(init_scope)?; - cmd_buf.trackers.set_size( + tracker.set_size( Some(&*buffer_guard), Some(&*texture_guard), Some(&*view_guard), @@ -1277,7 +1283,7 @@ impl Global { Some(&*query_set_guard), ); - let raw = &mut cmd_buf.encoder.raw; + let raw = &mut encoder.raw; let mut state = State { pipeline_flags: PipelineFlags::empty(), @@ -1319,8 +1325,7 @@ impl Global { ); dynamic_offset_count += num_dynamic_offsets as usize; - let bind_group: &crate::binding_model::BindGroup = cmd_buf - .trackers + let bind_group: &crate::binding_model::BindGroup = tracker .bind_groups .add_single(&*bind_group_guard, bind_group_id) .ok_or(RenderCommandError::InvalidBindGroup(bind_group_id)) @@ -1338,19 +1343,19 @@ impl Global { //Note: stateless trackers are not merged: the lifetime reference // is held to the bind group itself. - cmd_buf.buffer_memory_init_actions.extend( + buffer_memory_init_actions.extend( bind_group.used_buffer_ranges.iter().filter_map(|action| { match buffer_guard.get(action.id) { - Ok(buffer) => buffer.initialization_status.check_action(action), + Ok(buffer) => { + buffer.initialization_status.read().check_action(action) + } Err(_) => None, } }), ); for action in bind_group.used_texture_ranges.iter() { info.pending_discard_init_fixups.extend( - cmd_buf - .texture_memory_actions - .register_init_action(action, &texture_guard), + texture_memory_actions.register_init_action(action, &texture_guard), ); } @@ -1362,12 +1367,16 @@ impl Global { &temp_offsets, ); if !entries.is_empty() { - let pipeline_layout = - &pipeline_layout_guard[pipeline_layout_id.unwrap()].raw; + let pipeline_layout = pipeline_layout_guard + [pipeline_layout_id.unwrap()] + .raw + .as_ref() + .unwrap(); for (i, e) in entries.iter().enumerate() { - let raw_bg = - &bind_group_guard[e.group_id.as_ref().unwrap().value].raw; - + let raw_bg = bind_group_guard[*e.group_id.as_ref().unwrap()] + .raw + .as_ref() + .unwrap(); unsafe { raw.set_bind_group( pipeline_layout, @@ -1383,8 +1392,7 @@ impl Global { let scope = PassErrorScope::SetPipelineRender(pipeline_id); state.pipeline = Some(pipeline_id); - let pipeline: &pipeline::RenderPipeline = cmd_buf - .trackers + let pipeline: &pipeline::RenderPipeline = tracker .render_pipelines .add_single(&*render_pipeline_guard, pipeline_id) .ok_or(RenderCommandError::InvalidPipeline(pipeline_id)) @@ -1414,7 +1422,7 @@ impl Global { .require(pipeline.flags.contains(PipelineFlags::BLEND_CONSTANT)); unsafe { - raw.set_render_pipeline(&pipeline.raw); + raw.set_render_pipeline(pipeline.raw.as_ref().unwrap()); } if pipeline.flags.contains(PipelineFlags::STENCIL_REFERENCE) { @@ -1424,22 +1432,23 @@ impl Global { } // Rebind resource - if state.binder.pipeline_layout_id != Some(pipeline.layout_id.value) { - let pipeline_layout = &pipeline_layout_guard[pipeline.layout_id.value]; + if state.binder.pipeline_layout_id != Some(pipeline.layout_id) { + let pipeline_layout = &pipeline_layout_guard[pipeline.layout_id]; let (start_index, entries) = state.binder.change_pipeline_layout( &*pipeline_layout_guard, - pipeline.layout_id.value, + pipeline.layout_id, &pipeline.late_sized_buffer_groups, ); if !entries.is_empty() { for (i, e) in entries.iter().enumerate() { - let raw_bg = - &bind_group_guard[e.group_id.as_ref().unwrap().value].raw; - + let raw_bg = bind_group_guard[*e.group_id.as_ref().unwrap()] + .raw + .as_ref() + .unwrap(); unsafe { raw.set_bind_group( - &pipeline_layout.raw, + pipeline_layout.raw.as_ref().unwrap(), start_index as u32 + i as u32, raw_bg, &e.dynamic_offsets, @@ -1460,7 +1469,7 @@ impl Global { size_bytes, |clear_offset, clear_data| unsafe { raw.set_push_constants( - &pipeline_layout.raw, + pipeline_layout.raw.as_ref().unwrap(), range.stages, clear_offset, clear_data, @@ -1522,8 +1531,8 @@ impl Global { state.index.format = Some(index_format); state.index.update_limit(); - cmd_buf.buffer_memory_init_actions.extend( - buffer.initialization_status.create_action( + buffer_memory_init_actions.extend( + buffer.initialization_status.read().create_action( buffer_id, offset..end, MemoryInitKind::NeedsInitializedMemory, @@ -1531,7 +1540,7 @@ impl Global { ); let bb = hal::BufferBinding { - buffer: buf_raw, + buffer: buf_raw.as_ref(), offset, size, }; @@ -1573,8 +1582,8 @@ impl Global { }; vertex_state.bound = true; - cmd_buf.buffer_memory_init_actions.extend( - buffer.initialization_status.create_action( + buffer_memory_init_actions.extend( + buffer.initialization_status.read().create_action( buffer_id, offset..(offset + vertex_state.total_size), MemoryInitKind::NeedsInitializedMemory, @@ -1582,7 +1591,7 @@ impl Global { ); let bb = hal::BufferBinding { - buffer: buf_raw, + buffer: buf_raw.as_ref(), offset, size, }; @@ -1672,7 +1681,12 @@ impl Global { .map_pass_err(scope)?; unsafe { - raw.set_push_constants(&pipeline_layout.raw, stages, offset, data_slice) + raw.set_push_constants( + pipeline_layout.raw.as_ref().unwrap(), + stages, + offset, + data_slice, + ) } } RenderCommand::SetScissor(ref rect) => { @@ -1834,8 +1848,8 @@ impl Global { .map_pass_err(scope); } - cmd_buf.buffer_memory_init_actions.extend( - indirect_buffer.initialization_status.create_action( + buffer_memory_init_actions.extend( + indirect_buffer.initialization_status.read().create_action( buffer_id, offset..end_offset, MemoryInitKind::NeedsInitializedMemory, @@ -1918,8 +1932,8 @@ impl Global { }) .map_pass_err(scope); } - cmd_buf.buffer_memory_init_actions.extend( - indirect_buffer.initialization_status.create_action( + buffer_memory_init_actions.extend( + indirect_buffer.initialization_status.read().create_action( buffer_id, offset..end_offset, MemoryInitKind::NeedsInitializedMemory, @@ -1936,8 +1950,8 @@ impl Global { }) .map_pass_err(scope); } - cmd_buf.buffer_memory_init_actions.extend( - count_buffer.initialization_status.create_action( + buffer_memory_init_actions.extend( + count_buffer.initialization_status.read().create_action( count_buffer_id, count_buffer_offset..end_count_offset, MemoryInitKind::NeedsInitializedMemory, @@ -2005,8 +2019,7 @@ impl Global { .require_features(wgt::Features::TIMESTAMP_QUERY_INSIDE_PASSES) .map_pass_err(scope)?; - let query_set: &resource::QuerySet = cmd_buf - .trackers + let query_set: &resource::QuerySet = tracker .query_sets .add_single(&*query_set_guard, query_set_id) .ok_or(RenderCommandError::InvalidQuerySet(query_set_id)) @@ -2027,8 +2040,7 @@ impl Global { } => { let scope = PassErrorScope::BeginPipelineStatisticsQuery; - let query_set: &resource::QuerySet = cmd_buf - .trackers + let query_set: &resource::QuerySet = tracker .query_sets .add_single(&*query_set_guard, query_set_id) .ok_or(RenderCommandError::InvalidQuerySet(query_set_id)) @@ -2052,8 +2064,7 @@ impl Global { } RenderCommand::ExecuteBundle(bundle_id) => { let scope = PassErrorScope::ExecuteBundle; - let bundle: &command::RenderBundle = cmd_buf - .trackers + let bundle: &command::RenderBundle = tracker .bundles .add_single(&*bundle_guard, bundle_id) .ok_or(RenderCommandError::InvalidRenderBundle(bundle_id)) @@ -2081,20 +2092,20 @@ impl Global { .map_pass_err(scope); } - cmd_buf.buffer_memory_init_actions.extend( + buffer_memory_init_actions.extend( bundle .buffer_memory_init_actions .iter() .filter_map(|action| match buffer_guard.get(action.id) { - Ok(buffer) => buffer.initialization_status.check_action(action), + Ok(buffer) => { + buffer.initialization_status.read().check_action(action) + } Err(_) => None, }), ); for action in bundle.texture_memory_init_actions.iter() { info.pending_discard_init_fixups.extend( - cmd_buf - .texture_memory_actions - .register_init_action(action, &texture_guard), + texture_memory_actions.register_init_action(action, &texture_guard), ); } @@ -2121,8 +2132,7 @@ impl Global { info.usage_scope .merge_render_bundle(&*texture_guard, &bundle.used) .map_pass_err(scope)?; - cmd_buf - .trackers + tracker .add_from_render_bundle(&bundle.used) .map_pass_err(scope)?; }; @@ -2135,39 +2145,42 @@ impl Global { let (trackers, pending_discard_init_fixups) = info.finish(raw, &*texture_guard).map_pass_err(init_scope)?; - cmd_buf.encoder.close(); + encoder.close(); (trackers, query_reset_state, pending_discard_init_fixups) }; - let (mut cmb_guard, mut token) = hub.command_buffers.write(&mut token); - let (query_set_guard, mut token) = hub.query_sets.read(&mut token); - let (buffer_guard, mut token) = hub.buffers.read(&mut token); - let (texture_guard, _) = hub.textures.read(&mut token); + let cmb_guard = hub.command_buffers.read(); + let query_set_guard = hub.query_sets.read(); + let buffer_guard = hub.buffers.read(); + let texture_guard = hub.textures.read(); - let cmd_buf = cmb_guard.get_mut(encoder_id).unwrap(); + let cmd_buf = cmb_guard.get(encoder_id).unwrap(); + let mut cmd_buf_data = cmd_buf.data.lock(); + let cmd_buf_data = cmd_buf_data.as_mut().unwrap(); + let (encoder, status, tracker, _, _) = cmd_buf_data.raw_mut(); { - let transit = cmd_buf.encoder.open(); + let transit = encoder.open(); fixup_discarded_surfaces( pending_discard_init_fixups.into_iter(), transit, &texture_guard, - &mut cmd_buf.trackers.textures, - &device_guard[cmd_buf.device_id.value], + &mut tracker.textures, + &cmd_buf.device, ); query_reset_state .reset_queries( transit, &query_set_guard, - cmd_buf.device_id.value.0.backend(), + cmd_buf.device.info.id().0.backend(), ) .map_err(RenderCommandError::InvalidQuerySet) .map_pass_err(PassErrorScope::QueryReset)?; super::CommandBuffer::insert_barriers_from_scope( transit, - &mut cmd_buf.trackers, + tracker, &scope, &*buffer_guard, &*texture_guard, @@ -2179,10 +2192,10 @@ impl Global { //Note: we could just hold onto this raw pass while recording the // auxiliary encoder, but then handling errors and cleaning up // would be more complicated, so we re-use `open()`/`close()`. - let pass_raw = cmd_buf.encoder.list.pop().unwrap(); - cmd_buf.encoder.close(); - cmd_buf.encoder.list.push(pass_raw); - cmd_buf.status = CommandEncoderStatus::Recording; + let pass_raw = encoder.list.pop().unwrap(); + encoder.close(); + encoder.list.push(pass_raw); + *status = CommandEncoderStatus::Recording; Ok(()) } diff --git a/wgpu-core/src/command/transfer.rs b/wgpu-core/src/command/transfer.rs index c0df8c9422..9e584d3510 100644 --- a/wgpu-core/src/command/transfer.rs +++ b/wgpu-core/src/command/transfer.rs @@ -5,14 +5,17 @@ use crate::{ conv, device::{Device, MissingDownlevelFlags}, error::{ErrorFormatter, PrettyError}, - hub::{Global, GlobalIdentityHandlerFactory, HalApi, Storage, Token}, + global::Global, + hal_api::HalApi, id::{BufferId, CommandEncoderId, TextureId, Valid}, + identity::GlobalIdentityHandlerFactory, init_tracker::{ has_copy_partial_init_tracker_coverage, MemoryInitKind, TextureInitRange, TextureInitTrackerAction, }, resource::{Texture, TextureErrorDimension}, - track::TextureSelector, + storage::Storage, + track::{TextureSelector, Tracker}, }; use arrayvec::ArrayVec; @@ -22,6 +25,8 @@ use wgt::{BufferAddress, BufferUsages, Extent3d, TextureUsages}; use std::iter; +use super::{memory_init::CommandBufferTextureMemoryActions, CommandEncoder}; + pub type ImageCopyBuffer = wgt::ImageCopyBuffer; pub type ImageCopyTexture = wgt::ImageCopyTexture; pub type ImageCopyTextureTagged = wgt::ImageCopyTextureTagged; @@ -177,7 +182,7 @@ pub enum CopyError { Transfer(#[from] TransferError), } -pub(crate) fn extract_texture_selector( +pub(crate) fn extract_texture_selector( copy_texture: &ImageCopyTexture, copy_size: &Extent3d, texture: &Texture, @@ -431,7 +436,9 @@ pub(crate) fn validate_texture_copy_range( fn handle_texture_init( init_kind: MemoryInitKind, - cmd_buf: &mut CommandBuffer, + encoder: &mut CommandEncoder, + trackers: &mut Tracker, + texture_memory_actions: &mut CommandBufferTextureMemoryActions, device: &Device, copy_texture: &ImageCopyTexture, copy_size: &Extent3d, @@ -448,13 +455,12 @@ fn handle_texture_init( }; // Register the init action. - let immediate_inits = cmd_buf - .texture_memory_actions - .register_init_action(&{ init_action }, texture_guard); + let immediate_inits = + texture_memory_actions.register_init_action(&{ init_action }, texture_guard); // In rare cases we may need to insert an init operation immediately onto the command buffer. if !immediate_inits.is_empty() { - let cmd_buf_raw = cmd_buf.encoder.open(); + let cmd_buf_raw = encoder.open(); for init in immediate_inits { clear_texture( texture_guard, @@ -464,9 +470,9 @@ fn handle_texture_init( layer_range: init.layer..(init.layer + 1), }, cmd_buf_raw, - &mut cmd_buf.trackers.textures, + &mut trackers.textures, &device.alignments, - &device.zero_buffer, + device.zero_buffer.as_ref().unwrap(), ) .unwrap(); } @@ -478,7 +484,9 @@ fn handle_texture_init( /// Ensure the source texture of a transfer is in the right initialization /// state, and record the state for after the transfer operation. fn handle_src_texture_init( - cmd_buf: &mut CommandBuffer, + encoder: &mut CommandEncoder, + trackers: &mut Tracker, + texture_memory_actions: &mut CommandBufferTextureMemoryActions, device: &Device, source: &ImageCopyTexture, copy_size: &Extent3d, @@ -490,7 +498,9 @@ fn handle_src_texture_init( handle_texture_init( MemoryInitKind::NeedsInitializedMemory, - cmd_buf, + encoder, + trackers, + texture_memory_actions, device, source, copy_size, @@ -504,7 +514,9 @@ fn handle_src_texture_init( /// Ensure the destination texture of a transfer is in the right initialization /// state, and record the state for after the transfer operation. fn handle_dst_texture_init( - cmd_buf: &mut CommandBuffer, + encoder: &mut CommandEncoder, + trackers: &mut Tracker, + texture_memory_actions: &mut CommandBufferTextureMemoryActions, device: &Device, destination: &ImageCopyTexture, copy_size: &Extent3d, @@ -530,7 +542,9 @@ fn handle_dst_texture_init( handle_texture_init( dst_init_kind, - cmd_buf, + encoder, + trackers, + texture_memory_actions, device, destination, copy_size, @@ -555,17 +569,17 @@ impl Global { return Err(TransferError::SameSourceDestinationBuffer.into()); } let hub = A::hub(self); - let mut token = Token::root(); - let (device_guard, mut token) = hub.devices.read(&mut token); - let (mut cmd_buf_guard, mut token) = hub.command_buffers.write(&mut token); - let cmd_buf = CommandBuffer::get_encoder_mut(&mut *cmd_buf_guard, command_encoder_id)?; - let (buffer_guard, _) = hub.buffers.read(&mut token); + let cmd_buf_guard = hub.command_buffers.read(); + let cmd_buf = CommandBuffer::get_encoder(&*cmd_buf_guard, command_encoder_id)?; + let mut cmd_buf_data = cmd_buf.data.lock(); + let cmd_buf_data = cmd_buf_data.as_mut().unwrap(); + let buffer_guard = hub.buffers.read(); - let device = &device_guard[cmd_buf.device_id.value]; + let device = &cmd_buf.device; #[cfg(feature = "trace")] - if let Some(ref mut list) = cmd_buf.commands { + if let Some(ref mut list) = cmd_buf_data.commands { list.push(TraceCommand::CopyBufferToBuffer { src: source, src_offset: source_offset, @@ -575,7 +589,7 @@ impl Global { }); } - let (src_buffer, src_pending) = cmd_buf + let (src_buffer, src_pending) = cmd_buf_data .trackers .buffers .set_single(&*buffer_guard, source, hal::BufferUses::COPY_SRC) @@ -590,7 +604,7 @@ impl Global { // expecting only a single barrier let src_barrier = src_pending.map(|pending| pending.into_hal(src_buffer)); - let (dst_buffer, dst_pending) = cmd_buf + let (dst_buffer, dst_pending) = cmd_buf_data .trackers .buffers .set_single(&*buffer_guard, destination, hal::BufferUses::COPY_DST) @@ -661,27 +675,27 @@ impl Global { } // Make sure source is initialized memory and mark dest as initialized. - cmd_buf - .buffer_memory_init_actions - .extend(dst_buffer.initialization_status.create_action( + cmd_buf_data.buffer_memory_init_actions.extend( + dst_buffer.initialization_status.read().create_action( destination, destination_offset..(destination_offset + size), MemoryInitKind::ImplicitlyInitialized, - )); - cmd_buf - .buffer_memory_init_actions - .extend(src_buffer.initialization_status.create_action( + ), + ); + cmd_buf_data.buffer_memory_init_actions.extend( + src_buffer.initialization_status.read().create_action( source, source_offset..(source_offset + size), MemoryInitKind::NeedsInitializedMemory, - )); + ), + ); let region = hal::BufferCopy { src_offset: source_offset, dst_offset: destination_offset, size: wgt::BufferSize::new(size).unwrap(), }; - let cmd_buf_raw = cmd_buf.encoder.open(); + let cmd_buf_raw = cmd_buf_data.encoder.open(); unsafe { cmd_buf_raw.transition_buffers(src_barrier.into_iter().chain(dst_barrier)); cmd_buf_raw.copy_buffer_to_buffer(src_raw, dst_raw, iter::once(region)); @@ -699,18 +713,20 @@ impl Global { profiling::scope!("CommandEncoder::copy_buffer_to_texture"); let hub = A::hub(self); - let mut token = Token::root(); - let (device_guard, mut token) = hub.devices.read(&mut token); - let (mut cmd_buf_guard, mut token) = hub.command_buffers.write(&mut token); - let cmd_buf = CommandBuffer::get_encoder_mut(&mut *cmd_buf_guard, command_encoder_id)?; - let (buffer_guard, mut token) = hub.buffers.read(&mut token); - let (texture_guard, _) = hub.textures.read(&mut token); + let cmd_buf_guard = hub.command_buffers.read(); + let cmd_buf = CommandBuffer::get_encoder(&*cmd_buf_guard, command_encoder_id)?; + let mut cmd_buf_data = cmd_buf.data.lock(); + let cmd_buf_data = cmd_buf_data.as_mut().unwrap(); + let (encoder, _, tracker, buffer_memory_init_actions, texture_memory_actions) = + cmd_buf_data.raw_mut(); + let buffer_guard = hub.buffers.read(); + let texture_guard = hub.textures.read(); - let device = &device_guard[cmd_buf.device_id.value]; + let device = &cmd_buf.device; #[cfg(feature = "trace")] - if let Some(ref mut list) = cmd_buf.commands { + if let Some(ref mut list) = cmd_buf_data.commands { list.push(TraceCommand::CopyBufferToTexture { src: *source, dst: *destination, @@ -734,15 +750,22 @@ impl Global { copy_size, )?; - let (dst_range, dst_base) = extract_texture_selector(destination, copy_size, dst_texture)?; + let (dst_range, dst_base) = extract_texture_selector(destination, copy_size, &dst_texture)?; // Handle texture init *before* dealing with barrier transitions so we // have an easier time inserting "immediate-inits" that may be required // by prior discards in rare cases. - handle_dst_texture_init(cmd_buf, device, destination, copy_size, &texture_guard)?; + handle_dst_texture_init( + encoder, + tracker, + texture_memory_actions, + &device, + destination, + copy_size, + &texture_guard, + )?; - let (src_buffer, src_pending) = cmd_buf - .trackers + let (src_buffer, src_pending) = tracker .buffers .set_single(&*buffer_guard, source.buffer, hal::BufferUses::COPY_SRC) .ok_or(TransferError::InvalidBuffer(source.buffer))?; @@ -755,11 +778,10 @@ impl Global { } let src_barrier = src_pending.map(|pending| pending.into_hal(src_buffer)); - let dst_pending = cmd_buf - .trackers + let dst_pending = tracker .textures .set_single( - dst_texture, + &dst_texture, destination.texture, dst_range, hal::TextureUses::COPY_DST, @@ -767,6 +789,8 @@ impl Global { .ok_or(TransferError::InvalidTexture(destination.texture))?; let dst_raw = dst_texture .inner + .as_ref() + .unwrap() .as_raw() .ok_or(TransferError::InvalidTexture(destination.texture))?; if !dst_texture.desc.usage.contains(TextureUsages::COPY_DST) { @@ -774,7 +798,7 @@ impl Global { TransferError::MissingCopyDstUsageFlag(None, Some(destination.texture)).into(), ); } - let dst_barrier = dst_pending.map(|pending| pending.into_hal(dst_texture)); + let dst_barrier = dst_pending.map(|pending| pending.into_hal(&dst_texture)); if !dst_base.aspect.is_one() { return Err(TransferError::CopyAspectNotOne.into()); @@ -804,13 +828,11 @@ impl Global { .map_err(TransferError::from)?; } - cmd_buf - .buffer_memory_init_actions - .extend(src_buffer.initialization_status.create_action( - source.buffer, - source.layout.offset..(source.layout.offset + required_buffer_bytes_in_copy), - MemoryInitKind::NeedsInitializedMemory, - )); + buffer_memory_init_actions.extend(src_buffer.initialization_status.read().create_action( + source.buffer, + source.layout.offset..(source.layout.offset + required_buffer_bytes_in_copy), + MemoryInitKind::NeedsInitializedMemory, + )); let regions = (0..array_layer_count).map(|rel_array_layer| { let mut texture_base = dst_base.clone(); @@ -824,7 +846,7 @@ impl Global { } }); - let cmd_buf_raw = cmd_buf.encoder.open(); + let cmd_buf_raw = encoder.open(); unsafe { cmd_buf_raw.transition_textures(dst_barrier.into_iter()); cmd_buf_raw.transition_buffers(src_barrier.into_iter()); @@ -843,18 +865,21 @@ impl Global { profiling::scope!("CommandEncoder::copy_texture_to_buffer"); let hub = A::hub(self); - let mut token = Token::root(); - let (device_guard, mut token) = hub.devices.read(&mut token); - let (mut cmd_buf_guard, mut token) = hub.command_buffers.write(&mut token); - let cmd_buf = CommandBuffer::get_encoder_mut(&mut *cmd_buf_guard, command_encoder_id)?; - let (buffer_guard, mut token) = hub.buffers.read(&mut token); - let (texture_guard, _) = hub.textures.read(&mut token); + let cmd_buf_guard = hub.command_buffers.read(); + let cmd_buf = CommandBuffer::get_encoder(&*cmd_buf_guard, command_encoder_id)?; + let mut cmd_buf_data = cmd_buf.data.lock(); + let cmd_buf_data = cmd_buf_data.as_mut().unwrap(); + let (encoder, _, tracker, buffer_memory_init_actions, texture_memory_actions) = + cmd_buf_data.raw_mut(); + + let buffer_guard = hub.buffers.read(); + let texture_guard = hub.textures.read(); - let device = &device_guard[cmd_buf.device_id.value]; + let device = &cmd_buf.device; #[cfg(feature = "trace")] - if let Some(ref mut list) = cmd_buf.commands { + if let Some(ref mut list) = cmd_buf_data.commands { list.push(TraceCommand::CopyTextureToBuffer { src: *source, dst: *destination, @@ -874,18 +899,25 @@ impl Global { let (hal_copy_size, array_layer_count) = validate_texture_copy_range(source, &src_texture.desc, CopySide::Source, copy_size)?; - let (src_range, src_base) = extract_texture_selector(source, copy_size, src_texture)?; + let (src_range, src_base) = extract_texture_selector(source, copy_size, &src_texture)?; // Handle texture init *before* dealing with barrier transitions so we // have an easier time inserting "immediate-inits" that may be required // by prior discards in rare cases. - handle_src_texture_init(cmd_buf, device, source, copy_size, &texture_guard)?; + handle_src_texture_init( + encoder, + tracker, + texture_memory_actions, + &device, + source, + copy_size, + &texture_guard, + )?; - let src_pending = cmd_buf - .trackers + let src_pending = tracker .textures .set_single( - src_texture, + &src_texture, source.texture, src_range, hal::TextureUses::COPY_SRC, @@ -893,6 +925,8 @@ impl Global { .ok_or(TransferError::InvalidTexture(source.texture))?; let src_raw = src_texture .inner + .as_ref() + .unwrap() .as_raw() .ok_or(TransferError::InvalidTexture(source.texture))?; if !src_texture.desc.usage.contains(TextureUsages::COPY_SRC) { @@ -911,10 +945,9 @@ impl Global { } .into()); } - let src_barrier = src_pending.map(|pending| pending.into_hal(src_texture)); + let src_barrier = src_pending.map(|pending| pending.into_hal(&src_texture)); - let (dst_buffer, dst_pending) = cmd_buf - .trackers + let (dst_buffer, dst_pending) = tracker .buffers .set_single( &*buffer_guard, @@ -961,14 +994,11 @@ impl Global { .map_err(TransferError::from)?; } - cmd_buf - .buffer_memory_init_actions - .extend(dst_buffer.initialization_status.create_action( - destination.buffer, - destination.layout.offset - ..(destination.layout.offset + required_buffer_bytes_in_copy), - MemoryInitKind::ImplicitlyInitialized, - )); + buffer_memory_init_actions.extend(dst_buffer.initialization_status.read().create_action( + destination.buffer, + destination.layout.offset..(destination.layout.offset + required_buffer_bytes_in_copy), + MemoryInitKind::ImplicitlyInitialized, + )); let regions = (0..array_layer_count).map(|rel_array_layer| { let mut texture_base = src_base.clone(); @@ -981,7 +1011,7 @@ impl Global { size: hal_copy_size, } }); - let cmd_buf_raw = cmd_buf.encoder.open(); + let cmd_buf_raw = encoder.open(); unsafe { cmd_buf_raw.transition_buffers(dst_barrier.into_iter()); cmd_buf_raw.transition_textures(src_barrier.into_iter()); @@ -1005,18 +1035,18 @@ impl Global { profiling::scope!("CommandEncoder::copy_texture_to_texture"); let hub = A::hub(self); - let mut token = Token::root(); - let (device_guard, mut token) = hub.devices.read(&mut token); - let (mut cmd_buf_guard, mut token) = hub.command_buffers.write(&mut token); - let cmd_buf = CommandBuffer::get_encoder_mut(&mut *cmd_buf_guard, command_encoder_id)?; - let (_, mut token) = hub.buffers.read(&mut token); // skip token - let (texture_guard, _) = hub.textures.read(&mut token); + let cmd_buf_guard = hub.command_buffers.read(); + let cmd_buf = CommandBuffer::get_encoder(&*cmd_buf_guard, command_encoder_id)?; + let mut cmd_buf_data = cmd_buf.data.lock(); + let cmd_buf_data = cmd_buf_data.as_mut().unwrap(); + let (encoder, _, tracker, _, texture_memory_actions) = cmd_buf_data.raw_mut(); + let texture_guard = hub.textures.read(); - let device = &device_guard[cmd_buf.device_id.value]; + let device = &cmd_buf.device; #[cfg(feature = "trace")] - if let Some(ref mut list) = cmd_buf.commands { + if let Some(ref mut list) = cmd_buf_data.commands { list.push(TraceCommand::CopyTextureToTexture { src: *source, dst: *destination, @@ -1057,9 +1087,9 @@ impl Global { copy_size, )?; - let (src_range, src_tex_base) = extract_texture_selector(source, copy_size, src_texture)?; + let (src_range, src_tex_base) = extract_texture_selector(source, copy_size, &src_texture)?; let (dst_range, dst_tex_base) = - extract_texture_selector(destination, copy_size, dst_texture)?; + extract_texture_selector(destination, copy_size, &dst_texture)?; let src_texture_aspects = hal::FormatAspects::from(src_texture.desc.format); let dst_texture_aspects = hal::FormatAspects::from(dst_texture.desc.format); if src_tex_base.aspect != src_texture_aspects { @@ -1072,14 +1102,30 @@ impl Global { // Handle texture init *before* dealing with barrier transitions so we // have an easier time inserting "immediate-inits" that may be required // by prior discards in rare cases. - handle_src_texture_init(cmd_buf, device, source, copy_size, &texture_guard)?; - handle_dst_texture_init(cmd_buf, device, destination, copy_size, &texture_guard)?; + handle_src_texture_init( + encoder, + tracker, + texture_memory_actions, + &device, + source, + copy_size, + &texture_guard, + )?; + handle_dst_texture_init( + encoder, + tracker, + texture_memory_actions, + &device, + destination, + copy_size, + &texture_guard, + )?; - let src_pending = cmd_buf + let src_pending = cmd_buf_data .trackers .textures .set_single( - src_texture, + &src_texture, source.texture, src_range, hal::TextureUses::COPY_SRC, @@ -1087,6 +1133,8 @@ impl Global { .ok_or(TransferError::InvalidTexture(source.texture))?; let src_raw = src_texture .inner + .as_ref() + .unwrap() .as_raw() .ok_or(TransferError::InvalidTexture(source.texture))?; if !src_texture.desc.usage.contains(TextureUsages::COPY_SRC) { @@ -1096,14 +1144,14 @@ impl Global { //TODO: try to avoid this the collection. It's needed because both // `src_pending` and `dst_pending` try to hold `trackers.textures` mutably. let mut barriers: ArrayVec<_, 2> = src_pending - .map(|pending| pending.into_hal(src_texture)) + .map(|pending| pending.into_hal(&src_texture)) .collect(); - let dst_pending = cmd_buf + let dst_pending = cmd_buf_data .trackers .textures .set_single( - dst_texture, + &dst_texture, destination.texture, dst_range, hal::TextureUses::COPY_DST, @@ -1111,6 +1159,8 @@ impl Global { .ok_or(TransferError::InvalidTexture(destination.texture))?; let dst_raw = dst_texture .inner + .as_ref() + .unwrap() .as_raw() .ok_or(TransferError::InvalidTexture(destination.texture))?; if !dst_texture.desc.usage.contains(TextureUsages::COPY_DST) { @@ -1119,7 +1169,7 @@ impl Global { ); } - barriers.extend(dst_pending.map(|pending| pending.into_hal(dst_texture))); + barriers.extend(dst_pending.map(|pending| pending.into_hal(&dst_texture))); let hal_copy_size = hal::CopyExtent { width: src_copy_size.width.min(dst_copy_size.width), @@ -1137,7 +1187,7 @@ impl Global { size: hal_copy_size, } }); - let cmd_buf_raw = cmd_buf.encoder.open(); + let cmd_buf_raw = cmd_buf_data.encoder.open(); unsafe { cmd_buf_raw.transition_textures(barriers.into_iter()); cmd_buf_raw.copy_texture_to_texture( diff --git a/wgpu-core/src/device/device.rs b/wgpu-core/src/device/device.rs new file mode 100644 index 0000000000..1b35f3a219 --- /dev/null +++ b/wgpu-core/src/device/device.rs @@ -0,0 +1,3097 @@ +use crate::{ + binding_model, command, conv, + device::life::{LifetimeTracker, WaitIdleError}, + device::queue::PendingWrites, + device::{ + AttachmentData, CommandAllocator, MissingDownlevelFlags, MissingFeatures, + RenderPassContext, CLEANUP_WAIT_MS, + }, + hal_api::HalApi, + hub::Hub, + id::{self, AdapterId, DeviceId}, + identity::GlobalIdentityHandlerFactory, + init_tracker::{ + BufferInitTracker, BufferInitTrackerAction, MemoryInitKind, TextureInitRange, + TextureInitTracker, TextureInitTrackerAction, + }, + instance::Adapter, + pipeline, + resource::ResourceInfo, + resource::{ + self, Buffer, QuerySet, Resource, Sampler, Texture, TextureView, + TextureViewNotRenderableReason, + }, + storage::Storage, + track::{BindGroupStates, TextureSelector, Tracker}, + validation::{self, check_buffer_usage, check_texture_usage}, + FastHashMap, LabelHelpers as _, SubmissionIndex, +}; + +use arrayvec::ArrayVec; +use hal::{CommandEncoder as _, Device as _}; +use parking_lot::{Mutex, MutexGuard, RwLock}; +use smallvec::SmallVec; +use thiserror::Error; +use wgt::{TextureFormat, TextureViewDimension}; + +use std::{ + borrow::Cow, + iter, + num::NonZeroU32, + sync::{ + atomic::{AtomicU64, Ordering}, + Arc, + }, +}; + +use super::{ + life::{self, SuspectedResources}, + queue, DeviceDescriptor, DeviceError, ImplicitPipelineContext, UserClosures, EP_FAILURE, + IMPLICIT_FAILURE, ZERO_BUFFER_SIZE, +}; + +/// Structure describing a logical device. Some members are internally mutable, +/// stored behind mutexes. +/// +/// TODO: establish clear order of locking for these: +/// `mem_allocator`, `desc_allocator`, `life_tracker`, `trackers`, +/// `render_passes`, `pending_writes`, `trace`. +/// +/// Currently, the rules are: +/// 1. `life_tracker` is locked after `hub.devices`, enforced by the type system +/// 1. `self.trackers` is locked last (unenforced) +/// 1. `self.trace` is locked last (unenforced) +pub struct Device { + pub(crate) raw: Option, + pub(crate) adapter_id: id::Valid, + pub(crate) queue: Option, + pub(crate) zero_buffer: Option, + //pub(crate) cmd_allocator: command::CommandAllocator, + //mem_allocator: Mutex>, + //desc_allocator: Mutex>, + //Note: The submission index here corresponds to the last submission that is done. + pub(crate) info: ResourceInfo, + + pub(crate) command_allocator: Mutex>>, + pub(crate) active_submission_index: AtomicU64, //SubmissionIndex, + pub(crate) fence: Mutex>, + + /// All live resources allocated with this [`Device`]. + /// + /// Has to be locked temporarily only (locked last) + pub(crate) trackers: Mutex>, + // Life tracker should be locked right after the device and before anything else. + life_tracker: Mutex>, + /// Temporary storage for resource management functions. Cleared at the end + /// of every call (unless an error occurs). + pub(crate) temp_suspected: Mutex>, + pub(crate) alignments: hal::Alignments, + pub(crate) limits: wgt::Limits, + pub(crate) features: wgt::Features, + pub(crate) downlevel: wgt::DownlevelCapabilities, + pub(crate) pending_writes: Mutex>>, + #[cfg(feature = "trace")] + pub(crate) trace: Option>, +} + +impl std::fmt::Debug for Device { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("Device") + .field("adapter_id", &self.adapter_id) + .field("limits", &self.limits) + .field("features", &self.features) + .field("downlevel", &self.downlevel) + .finish() + } +} + +impl Drop for Device { + fn drop(&mut self) { + let raw = self.raw.take().unwrap(); + let pending_writes = self.pending_writes.lock().take().unwrap(); + pending_writes.dispose(&raw); + self.command_allocator.lock().take().unwrap().dispose(&raw); + unsafe { + raw.destroy_buffer(self.zero_buffer.take().unwrap()); + raw.destroy_fence(self.fence.lock().take().unwrap()); + raw.exit(self.queue.take().unwrap()); + } + } +} + +#[derive(Clone, Debug, Error)] +pub enum CreateDeviceError { + #[error("Not enough memory left")] + OutOfMemory, + #[error("Failed to create internal buffer for initializing textures")] + FailedToCreateZeroBuffer(#[from] DeviceError), +} + +impl Device { + pub(crate) fn require_features(&self, feature: wgt::Features) -> Result<(), MissingFeatures> { + if self.features.contains(feature) { + Ok(()) + } else { + Err(MissingFeatures(feature)) + } + } + + pub(crate) fn require_downlevel_flags( + &self, + flags: wgt::DownlevelFlags, + ) -> Result<(), MissingDownlevelFlags> { + if self.downlevel.flags.contains(flags) { + Ok(()) + } else { + Err(MissingDownlevelFlags(flags)) + } + } +} + +impl Device { + pub(crate) fn new( + open: hal::OpenDevice, + adapter_id: id::Valid, + alignments: hal::Alignments, + downlevel: wgt::DownlevelCapabilities, + desc: &DeviceDescriptor, + trace_path: Option<&std::path::Path>, + ) -> Result { + #[cfg(not(feature = "trace"))] + if let Some(_) = trace_path { + log::error!("Feature 'trace' is not enabled"); + } + let fence = + unsafe { open.device.create_fence() }.map_err(|_| CreateDeviceError::OutOfMemory)?; + + let mut com_alloc = CommandAllocator { + free_encoders: Vec::new(), + }; + let pending_encoder = com_alloc + .acquire_encoder(&open.device, &open.queue) + .map_err(|_| CreateDeviceError::OutOfMemory)?; + let mut pending_writes = queue::PendingWrites::::new(pending_encoder); + + // Create zeroed buffer used for texture clears. + let zero_buffer = unsafe { + open.device + .create_buffer(&hal::BufferDescriptor { + label: Some("(wgpu internal) zero init buffer"), + size: ZERO_BUFFER_SIZE, + usage: hal::BufferUses::COPY_SRC | hal::BufferUses::COPY_DST, + memory_flags: hal::MemoryFlags::empty(), + }) + .map_err(DeviceError::from)? + }; + pending_writes.activate(); + unsafe { + pending_writes + .command_encoder + .transition_buffers(iter::once(hal::BufferBarrier { + buffer: &zero_buffer, + usage: hal::BufferUses::empty()..hal::BufferUses::COPY_DST, + })); + pending_writes + .command_encoder + .clear_buffer(&zero_buffer, 0..ZERO_BUFFER_SIZE); + pending_writes + .command_encoder + .transition_buffers(iter::once(hal::BufferBarrier { + buffer: &zero_buffer, + usage: hal::BufferUses::COPY_DST..hal::BufferUses::COPY_SRC, + })); + } + + Ok(Self { + raw: Some(open.device), + adapter_id, + queue: Some(open.queue), + zero_buffer: Some(zero_buffer), + info: ResourceInfo::new(""), + command_allocator: Mutex::new(Some(com_alloc)), + active_submission_index: AtomicU64::new(0), + fence: Mutex::new(Some(fence)), + trackers: Mutex::new(Tracker::new()), + life_tracker: Mutex::new(life::LifetimeTracker::new()), + temp_suspected: Mutex::new(life::SuspectedResources::new()), + #[cfg(feature = "trace")] + trace: trace_path.and_then(|path| match trace::Trace::new(path) { + Ok(mut trace) => { + trace.add(trace::Action::Init { + desc: desc.clone(), + backend: A::VARIANT, + }); + Some(Mutex::new(trace)) + } + Err(e) => { + log::error!("Unable to start a trace in '{:?}': {:?}", path, e); + None + } + }), + alignments, + limits: desc.limits.clone(), + features: desc.features, + downlevel, + pending_writes: Mutex::new(Some(pending_writes)), + }) + } + + pub(crate) fn lock_life<'a>(&'a self) -> MutexGuard<'a, LifetimeTracker> { + self.life_tracker.lock() + } + + /// Check this device for completed commands. + /// + /// The `maintain` argument tells how the maintence function should behave, either + /// blocking or just polling the current state of the gpu. + /// + /// Return a pair `(closures, queue_empty)`, where: + /// + /// - `closures` is a list of actions to take: mapping buffers, notifying the user + /// + /// - `queue_empty` is a boolean indicating whether there are more queue + /// submissions still in flight. (We have to take the locks needed to + /// produce this information for other reasons, so we might as well just + /// return it to our callers.) + pub(crate) fn maintain<'this, 'token: 'this, G: GlobalIdentityHandlerFactory>( + &'this self, + hub: &Hub, + maintain: wgt::Maintain, + ) -> Result<(UserClosures, bool), WaitIdleError> { + profiling::scope!("Device::maintain"); + let mut life_tracker = self.lock_life(); + + // Normally, `temp_suspected` exists only to save heap + // allocations: it's cleared at the start of the function + // call, and cleared by the end. But `Global::queue_submit` is + // fallible; if it exits early, it may leave some resources in + // `temp_suspected`. + life_tracker + .suspected_resources + .extend(&self.temp_suspected.lock()); + self.temp_suspected.lock().clear(); + + life_tracker.triage_suspected( + hub, + &self.trackers, + #[cfg(feature = "trace")] + self.trace.as_ref(), + ); + life_tracker.triage_mapped(); + + let last_done_index = if maintain.is_wait() { + let index_to_wait_for = match maintain { + wgt::Maintain::WaitForSubmissionIndex(submission_index) => { + // We don't need to check to see if the queue id matches + // as we already checked this from inside the poll call. + submission_index.index + } + _ => self.active_submission_index.load(Ordering::Relaxed), + }; + unsafe { + self.raw + .as_ref() + .unwrap() + .wait( + self.fence.lock().as_ref().unwrap(), + index_to_wait_for, + CLEANUP_WAIT_MS, + ) + .map_err(DeviceError::from)? + }; + index_to_wait_for + } else { + unsafe { + self.raw + .as_ref() + .unwrap() + .get_fence_value(self.fence.lock().as_ref().unwrap()) + .map_err(DeviceError::from)? + } + }; + + let submission_closures = life_tracker.triage_submissions( + last_done_index, + self.command_allocator.lock().as_mut().unwrap(), + ); + let mapping_closures = + life_tracker.handle_mapping(hub, self.raw.as_ref().unwrap(), &self.trackers); + life_tracker.cleanup(); + + let closures = UserClosures { + mappings: mapping_closures, + submissions: submission_closures, + }; + Ok((closures, life_tracker.queue_empty())) + } + + pub(crate) fn untrack(&self, trackers: &Tracker) { + self.temp_suspected.lock().clear(); + // As the tracker is cleared/dropped, we need to consider all the resources + // that it references for destruction in the next GC pass. + { + for resource in trackers.buffers.used_resources() { + if resource.is_unique() { + self.temp_suspected.lock().buffers.push(resource.clone()); + } + } + for resource in trackers.textures.used_resources() { + if resource.is_unique() { + self.temp_suspected.lock().textures.push(resource.clone()); + } + } + for resource in trackers.views.used_resources() { + if resource.is_unique() { + self.temp_suspected + .lock() + .texture_views + .push(resource.clone()); + } + } + for resource in trackers.bind_groups.used_resources() { + if resource.is_unique() { + self.temp_suspected + .lock() + .bind_groups + .push(resource.clone()); + } + } + for resource in trackers.samplers.used_resources() { + if resource.is_unique() { + self.temp_suspected.lock().samplers.push(resource.clone()); + } + } + for resource in trackers.compute_pipelines.used_resources() { + if resource.is_unique() { + self.temp_suspected + .lock() + .compute_pipelines + .push(resource.clone()); + } + } + for resource in trackers.render_pipelines.used_resources() { + if resource.is_unique() { + self.temp_suspected + .lock() + .render_pipelines + .push(resource.clone()); + } + } + for resource in trackers.query_sets.used_resources() { + if resource.is_unique() { + self.temp_suspected.lock().query_sets.push(resource.clone()); + } + } + } + + self.lock_life() + .suspected_resources + .extend(&self.temp_suspected.lock()); + + self.temp_suspected.lock().clear(); + } + + pub(crate) fn create_buffer( + self: &Arc, + self_id: DeviceId, + desc: &resource::BufferDescriptor, + transient: bool, + ) -> Result, resource::CreateBufferError> { + debug_assert_eq!(self_id.backend(), A::VARIANT); + + if desc.size > self.limits.max_buffer_size { + return Err(resource::CreateBufferError::MaxBufferSize { + requested: desc.size, + maximum: self.limits.max_buffer_size, + }); + } + + if desc.usage.contains(wgt::BufferUsages::INDEX) + && desc.usage.contains( + wgt::BufferUsages::VERTEX + | wgt::BufferUsages::UNIFORM + | wgt::BufferUsages::INDIRECT + | wgt::BufferUsages::STORAGE, + ) + { + self.require_downlevel_flags(wgt::DownlevelFlags::UNRESTRICTED_INDEX_BUFFER)?; + } + + let mut usage = conv::map_buffer_usage(desc.usage); + + if desc.usage.is_empty() || desc.usage.contains_invalid_bits() { + return Err(resource::CreateBufferError::InvalidUsage(desc.usage)); + } + + if !self + .features + .contains(wgt::Features::MAPPABLE_PRIMARY_BUFFERS) + { + use wgt::BufferUsages as Bu; + let write_mismatch = desc.usage.contains(Bu::MAP_WRITE) + && !(Bu::MAP_WRITE | Bu::COPY_SRC).contains(desc.usage); + let read_mismatch = desc.usage.contains(Bu::MAP_READ) + && !(Bu::MAP_READ | Bu::COPY_DST).contains(desc.usage); + if write_mismatch || read_mismatch { + return Err(resource::CreateBufferError::UsageMismatch(desc.usage)); + } + } + + if desc.mapped_at_creation { + if desc.size % wgt::COPY_BUFFER_ALIGNMENT != 0 { + return Err(resource::CreateBufferError::UnalignedSize); + } + if !desc.usage.contains(wgt::BufferUsages::MAP_WRITE) { + // we are going to be copying into it, internally + usage |= hal::BufferUses::COPY_DST; + } + } else { + // We are required to zero out (initialize) all memory. This is done + // on demand using clear_buffer which requires write transfer usage! + usage |= hal::BufferUses::COPY_DST; + } + + let actual_size = if desc.size == 0 { + wgt::COPY_BUFFER_ALIGNMENT + } else if desc.usage.contains(wgt::BufferUsages::VERTEX) { + // Bumping the size by 1 so that we can bind an empty range at the + // end of the buffer. + desc.size + 1 + } else { + desc.size + }; + let clear_remainder = actual_size % wgt::COPY_BUFFER_ALIGNMENT; + let aligned_size = if clear_remainder != 0 { + actual_size + wgt::COPY_BUFFER_ALIGNMENT - clear_remainder + } else { + actual_size + }; + + let mut memory_flags = hal::MemoryFlags::empty(); + memory_flags.set(hal::MemoryFlags::TRANSIENT, transient); + + let hal_desc = hal::BufferDescriptor { + label: desc.label.borrow_option(), + size: aligned_size, + usage, + memory_flags, + }; + let buffer = unsafe { self.raw.as_ref().unwrap().create_buffer(&hal_desc) } + .map_err(DeviceError::from)?; + + Ok(Buffer { + raw: Some(Arc::new(buffer)), + device: self.clone(), + usage: desc.usage, + size: desc.size, + initialization_status: RwLock::new(BufferInitTracker::new(desc.size)), + sync_mapped_writes: Mutex::new(None), + map_state: Mutex::new(resource::BufferMapState::Idle), + info: ResourceInfo::new(desc.label.borrow_or_default()), + }) + } + + pub(crate) fn create_texture_from_hal( + self: &Arc, + hal_texture: A::Texture, + hal_usage: hal::TextureUses, + self_id: DeviceId, + desc: &resource::TextureDescriptor, + format_features: wgt::TextureFormatFeatures, + clear_mode: resource::TextureClearMode, + ) -> Texture { + debug_assert_eq!(self_id.backend(), A::VARIANT); + + Texture { + inner: Some(resource::TextureInner::Native { + raw: Some(Arc::new(hal_texture)), + }), + device: self.clone(), + desc: desc.map_label(|_| ()), + hal_usage, + format_features, + initialization_status: RwLock::new(TextureInitTracker::new( + desc.mip_level_count, + desc.array_layer_count(), + )), + full_range: TextureSelector { + mips: 0..desc.mip_level_count, + layers: 0..desc.array_layer_count(), + }, + info: ResourceInfo::new(desc.label.borrow_or_default()), + clear_mode: RwLock::new(clear_mode), + } + } + + pub(crate) fn create_texture( + self: &Arc, + self_id: DeviceId, + adapter: &Adapter, + desc: &resource::TextureDescriptor, + ) -> Result, resource::CreateTextureError> { + use resource::{CreateTextureError, TextureDimensionError}; + + if desc.usage.is_empty() || desc.usage.contains_invalid_bits() { + return Err(CreateTextureError::InvalidUsage(desc.usage)); + } + + conv::check_texture_dimension_size( + desc.dimension, + desc.size, + desc.sample_count, + &self.limits, + )?; + + if desc.dimension != wgt::TextureDimension::D2 { + // Depth textures can only be 2D + if desc.format.is_depth_stencil_format() { + return Err(CreateTextureError::InvalidDepthDimension( + desc.dimension, + desc.format, + )); + } + // Renderable textures can only be 2D + if desc.usage.contains(wgt::TextureUsages::RENDER_ATTACHMENT) { + return Err(CreateTextureError::InvalidDimensionUsages( + wgt::TextureUsages::RENDER_ATTACHMENT, + desc.dimension, + )); + } + + // Compressed textures can only be 2D + if desc.format.is_compressed() { + return Err(CreateTextureError::InvalidCompressedDimension( + desc.dimension, + desc.format, + )); + } + } + + if desc.format.is_compressed() { + let (block_width, block_height) = desc.format.block_dimensions(); + + if desc.size.width % block_width != 0 { + return Err(CreateTextureError::InvalidDimension( + TextureDimensionError::NotMultipleOfBlockWidth { + width: desc.size.width, + block_width, + format: desc.format, + }, + )); + } + + if desc.size.height % block_height != 0 { + return Err(CreateTextureError::InvalidDimension( + TextureDimensionError::NotMultipleOfBlockHeight { + height: desc.size.height, + block_height, + format: desc.format, + }, + )); + } + } + + let format_features = self + .describe_format_features(adapter, desc.format) + .map_err(|error| CreateTextureError::MissingFeatures(desc.format, error))?; + + if desc.sample_count > 1 { + if desc.mip_level_count != 1 { + return Err(CreateTextureError::InvalidMipLevelCount { + requested: desc.mip_level_count, + maximum: 1, + }); + } + + if desc.size.depth_or_array_layers != 1 { + return Err(CreateTextureError::InvalidDimension( + TextureDimensionError::MultisampledDepthOrArrayLayer( + desc.size.depth_or_array_layers, + ), + )); + } + + if desc.usage.contains(wgt::TextureUsages::STORAGE_BINDING) { + return Err(CreateTextureError::InvalidMultisampledStorageBinding); + } + + if !desc.usage.contains(wgt::TextureUsages::RENDER_ATTACHMENT) { + return Err(CreateTextureError::MultisampledNotRenderAttachment); + } + + if !format_features.flags.intersects( + wgt::TextureFormatFeatureFlags::MULTISAMPLE_X4 + | wgt::TextureFormatFeatureFlags::MULTISAMPLE_X2 + | wgt::TextureFormatFeatureFlags::MULTISAMPLE_X8 + | wgt::TextureFormatFeatureFlags::MULTISAMPLE_X16, + ) { + return Err(CreateTextureError::InvalidMultisampledFormat(desc.format)); + } + + if !format_features + .flags + .sample_count_supported(desc.sample_count) + { + return Err(CreateTextureError::InvalidSampleCount( + desc.sample_count, + desc.format, + )); + }; + } + + let mips = desc.mip_level_count; + let max_levels_allowed = desc.size.max_mips(desc.dimension).min(hal::MAX_MIP_LEVELS); + if mips == 0 || mips > max_levels_allowed { + return Err(CreateTextureError::InvalidMipLevelCount { + requested: mips, + maximum: max_levels_allowed, + }); + } + + let missing_allowed_usages = desc.usage - format_features.allowed_usages; + if !missing_allowed_usages.is_empty() { + // detect downlevel incompatibilities + let wgpu_allowed_usages = desc.format.guaranteed_format_features().allowed_usages; + let wgpu_missing_usages = desc.usage - wgpu_allowed_usages; + return Err(CreateTextureError::InvalidFormatUsages( + missing_allowed_usages, + desc.format, + wgpu_missing_usages.is_empty(), + )); + } + + let mut hal_view_formats = vec![]; + for format in desc.view_formats.iter() { + if desc.format == *format { + continue; + } + if desc.format.remove_srgb_suffix() != format.remove_srgb_suffix() { + return Err(CreateTextureError::InvalidViewFormat(*format, desc.format)); + } + hal_view_formats.push(*format); + } + if !hal_view_formats.is_empty() { + self.require_downlevel_flags(wgt::DownlevelFlags::VIEW_FORMATS)?; + } + + // Enforce having COPY_DST/DEPTH_STENCIL_WRITE/COLOR_TARGET otherwise we + // wouldn't be able to initialize the texture. + let hal_usage = conv::map_texture_usage(desc.usage, desc.format.into()) + | if desc.format.is_depth_stencil_format() { + hal::TextureUses::DEPTH_STENCIL_WRITE + } else if desc.usage.contains(wgt::TextureUsages::COPY_DST) { + hal::TextureUses::COPY_DST // (set already) + } else { + // Use COPY_DST only if we can't use COLOR_TARGET + if format_features + .allowed_usages + .contains(wgt::TextureUsages::RENDER_ATTACHMENT) + && desc.dimension == wgt::TextureDimension::D2 + // Render targets dimension must be 2d + { + hal::TextureUses::COLOR_TARGET + } else { + hal::TextureUses::COPY_DST + } + }; + + let hal_desc = hal::TextureDescriptor { + label: desc.label.borrow_option(), + size: desc.size, + mip_level_count: desc.mip_level_count, + sample_count: desc.sample_count, + dimension: desc.dimension, + format: desc.format, + usage: hal_usage, + memory_flags: hal::MemoryFlags::empty(), + view_formats: hal_view_formats, + }; + + let raw_texture = unsafe { + self.raw + .as_ref() + .unwrap() + .create_texture(&hal_desc) + .map_err(DeviceError::from)? + }; + + let clear_mode = if hal_usage + .intersects(hal::TextureUses::DEPTH_STENCIL_WRITE | hal::TextureUses::COLOR_TARGET) + { + let is_color = !desc.format.is_depth_stencil_format(); + let clear_views = SmallVec::new(); + resource::TextureClearMode::RenderPass { + clear_views, + is_color, + } + } else { + resource::TextureClearMode::BufferCopy + }; + + let mut texture = self.create_texture_from_hal( + raw_texture, + hal_usage, + self_id, + desc, + format_features, + clear_mode, + ); + texture.hal_usage = hal_usage; + Ok(texture) + } + + pub(crate) fn create_texture_inner_view( + self: &Arc, + texture: &A::Texture, + texture_id: id::TextureId, + texture_desc: &wgt::TextureDescriptor<(), Vec>, + texture_usage: &hal::TextureUses, + texture_format: &wgt::TextureFormatFeatures, + desc: &resource::TextureViewDescriptor, + ) -> Result, resource::CreateTextureViewError> { + // resolve TextureViewDescriptor defaults + // https://gpuweb.github.io/gpuweb/#abstract-opdef-resolving-gputextureviewdescriptor-defaults + + let resolved_format = desc.format.unwrap_or_else(|| { + texture_desc + .format + .aspect_specific_format(desc.range.aspect) + .unwrap_or(texture_desc.format) + }); + + let resolved_dimension = desc + .dimension + .unwrap_or_else(|| match texture_desc.dimension { + wgt::TextureDimension::D1 => wgt::TextureViewDimension::D1, + wgt::TextureDimension::D2 => { + if texture_desc.array_layer_count() == 1 { + wgt::TextureViewDimension::D2 + } else { + wgt::TextureViewDimension::D2Array + } + } + wgt::TextureDimension::D3 => wgt::TextureViewDimension::D3, + }); + + let resolved_mip_level_count = desc.range.mip_level_count.unwrap_or_else(|| { + texture_desc + .mip_level_count + .saturating_sub(desc.range.base_mip_level) + }); + + let resolved_array_layer_count = + desc.range + .array_layer_count + .unwrap_or_else(|| match resolved_dimension { + wgt::TextureViewDimension::D1 + | wgt::TextureViewDimension::D2 + | wgt::TextureViewDimension::D3 => 1, + wgt::TextureViewDimension::Cube => 6, + wgt::TextureViewDimension::D2Array | wgt::TextureViewDimension::CubeArray => { + texture_desc + .array_layer_count() + .saturating_sub(desc.range.base_array_layer) + } + }); + + // validate TextureViewDescriptor + + let aspects = hal::FormatAspects::new(texture_desc.format, desc.range.aspect); + if aspects.is_empty() { + return Err(resource::CreateTextureViewError::InvalidAspect { + texture_format: texture_desc.format, + requested_aspect: desc.range.aspect, + }); + } + + let format_is_good = if desc.range.aspect == wgt::TextureAspect::All { + resolved_format == texture_desc.format + || texture_desc.view_formats.contains(&resolved_format) + } else { + Some(resolved_format) + == texture_desc + .format + .aspect_specific_format(desc.range.aspect) + }; + if !format_is_good { + return Err(resource::CreateTextureViewError::FormatReinterpretation { + texture: texture_desc.format, + view: resolved_format, + }); + } + + // check if multisampled texture is seen as anything but 2D + if texture_desc.sample_count > 1 && resolved_dimension != wgt::TextureViewDimension::D2 { + return Err( + resource::CreateTextureViewError::InvalidMultisampledTextureViewDimension( + resolved_dimension, + ), + ); + } + + // check if the dimension is compatible with the texture + if texture_desc.dimension != resolved_dimension.compatible_texture_dimension() { + return Err( + resource::CreateTextureViewError::InvalidTextureViewDimension { + view: resolved_dimension, + texture: texture_desc.dimension, + }, + ); + } + + match resolved_dimension { + TextureViewDimension::D1 | TextureViewDimension::D2 | TextureViewDimension::D3 => { + if resolved_array_layer_count != 1 { + return Err(resource::CreateTextureViewError::InvalidArrayLayerCount { + requested: resolved_array_layer_count, + dim: resolved_dimension, + }); + } + } + TextureViewDimension::Cube => { + if resolved_array_layer_count != 6 { + return Err( + resource::CreateTextureViewError::InvalidCubemapTextureDepth { + depth: resolved_array_layer_count, + }, + ); + } + } + TextureViewDimension::CubeArray => { + if resolved_array_layer_count % 6 != 0 { + return Err( + resource::CreateTextureViewError::InvalidCubemapArrayTextureDepth { + depth: resolved_array_layer_count, + }, + ); + } + } + _ => {} + } + + match resolved_dimension { + TextureViewDimension::Cube | TextureViewDimension::CubeArray => { + if texture_desc.size.width != texture_desc.size.height { + return Err(resource::CreateTextureViewError::InvalidCubeTextureViewSize); + } + } + _ => {} + } + + if resolved_mip_level_count == 0 { + return Err(resource::CreateTextureViewError::ZeroMipLevelCount); + } + + let mip_level_end = desc + .range + .base_mip_level + .saturating_add(resolved_mip_level_count); + + let level_end = texture_desc.mip_level_count; + if mip_level_end > level_end { + return Err(resource::CreateTextureViewError::TooManyMipLevels { + requested: mip_level_end, + total: level_end, + }); + } + + if resolved_array_layer_count == 0 { + return Err(resource::CreateTextureViewError::ZeroArrayLayerCount); + } + + let array_layer_end = desc + .range + .base_array_layer + .saturating_add(resolved_array_layer_count); + + let layer_end = texture_desc.array_layer_count(); + if array_layer_end > layer_end { + return Err(resource::CreateTextureViewError::TooManyArrayLayers { + requested: array_layer_end, + total: layer_end, + }); + }; + + // https://gpuweb.github.io/gpuweb/#abstract-opdef-renderable-texture-view + let render_extent = 'b: loop { + if !texture_desc + .usage + .contains(wgt::TextureUsages::RENDER_ATTACHMENT) + { + break 'b Err(TextureViewNotRenderableReason::Usage(texture_desc.usage)); + } + + if resolved_dimension != TextureViewDimension::D2 { + break 'b Err(TextureViewNotRenderableReason::Dimension( + resolved_dimension, + )); + } + + if resolved_mip_level_count != 1 { + break 'b Err(TextureViewNotRenderableReason::MipLevelCount( + resolved_mip_level_count, + )); + } + + if resolved_array_layer_count != 1 { + break 'b Err(TextureViewNotRenderableReason::ArrayLayerCount( + resolved_array_layer_count, + )); + } + + if aspects != hal::FormatAspects::from(texture_desc.format) { + break 'b Err(TextureViewNotRenderableReason::Aspects(aspects)); + } + + break 'b Ok(texture_desc.compute_render_extent(desc.range.base_mip_level)); + }; + + // filter the usages based on the other criteria + let usage = { + let mask_copy = !(hal::TextureUses::COPY_SRC | hal::TextureUses::COPY_DST); + let mask_dimension = match resolved_dimension { + wgt::TextureViewDimension::Cube | wgt::TextureViewDimension::CubeArray => { + hal::TextureUses::RESOURCE + } + wgt::TextureViewDimension::D3 => { + hal::TextureUses::RESOURCE + | hal::TextureUses::STORAGE_READ + | hal::TextureUses::STORAGE_READ_WRITE + } + _ => hal::TextureUses::all(), + }; + let mask_mip_level = if resolved_mip_level_count == 1 { + hal::TextureUses::all() + } else { + hal::TextureUses::RESOURCE + }; + *texture_usage & mask_copy & mask_dimension & mask_mip_level + }; + + log::debug!( + "Create view for texture {:?} filters usages to {:?}", + texture_id, + usage + ); + + // use the combined depth-stencil format for the view + let format = if resolved_format.is_depth_stencil_component(texture_desc.format) { + texture_desc.format + } else { + resolved_format + }; + + let resolved_range = wgt::ImageSubresourceRange { + aspect: desc.range.aspect, + base_mip_level: desc.range.base_mip_level, + mip_level_count: Some(resolved_mip_level_count), + base_array_layer: desc.range.base_array_layer, + array_layer_count: Some(resolved_array_layer_count), + }; + + let hal_desc = hal::TextureViewDescriptor { + label: desc.label.borrow_option(), + format, + dimension: resolved_dimension, + usage, + range: resolved_range, + }; + + let raw = unsafe { + self.raw + .as_ref() + .unwrap() + .create_texture_view(texture, &hal_desc) + .map_err(|_| resource::CreateTextureViewError::OutOfMemory)? + }; + + let selector = TextureSelector { + mips: desc.range.base_mip_level..mip_level_end, + layers: desc.range.base_array_layer..array_layer_end, + }; + + Ok(TextureView { + raw: Some(Arc::new(raw)), + parent_id: id::Valid(texture_id), + device: self.clone(), + desc: resource::HalTextureViewDescriptor { + format: resolved_format, + dimension: resolved_dimension, + range: resolved_range, + }, + format_features: *texture_format, + render_extent, + samples: texture_desc.sample_count, + selector, + info: ResourceInfo::new(desc.label.borrow_or_default()), + }) + } + + pub(crate) fn create_texture_view( + self: &Arc, + texture: &Texture, + texture_id: id::TextureId, + desc: &resource::TextureViewDescriptor, + ) -> Result, resource::CreateTextureViewError> { + let texture_raw = texture + .inner + .as_ref() + .unwrap() + .as_raw() + .ok_or(resource::CreateTextureViewError::InvalidTexture)?; + + self.create_texture_inner_view( + texture_raw, + texture_id, + &texture.desc, + &texture.hal_usage, + &texture.format_features, + desc, + ) + } + + pub(crate) fn create_sampler( + self: &Arc, + desc: &resource::SamplerDescriptor, + ) -> Result, resource::CreateSamplerError> { + if desc + .address_modes + .iter() + .any(|am| am == &wgt::AddressMode::ClampToBorder) + { + self.require_features(wgt::Features::ADDRESS_MODE_CLAMP_TO_BORDER)?; + } + + if desc.border_color == Some(wgt::SamplerBorderColor::Zero) { + self.require_features(wgt::Features::ADDRESS_MODE_CLAMP_TO_ZERO)?; + } + + if desc.lod_min_clamp < 0.0 { + return Err(resource::CreateSamplerError::InvalidLodMinClamp( + desc.lod_min_clamp, + )); + } + if desc.lod_max_clamp < desc.lod_min_clamp { + return Err(resource::CreateSamplerError::InvalidLodMaxClamp { + lod_min_clamp: desc.lod_min_clamp, + lod_max_clamp: desc.lod_max_clamp, + }); + } + + if desc.anisotropy_clamp < 1 { + return Err(resource::CreateSamplerError::InvalidAnisotropy( + desc.anisotropy_clamp, + )); + } + + if desc.anisotropy_clamp != 1 { + if !matches!(desc.min_filter, wgt::FilterMode::Linear) { + return Err( + resource::CreateSamplerError::InvalidFilterModeWithAnisotropy { + filter_type: resource::SamplerFilterErrorType::MinFilter, + filter_mode: desc.min_filter, + anisotropic_clamp: desc.anisotropy_clamp, + }, + ); + } + if !matches!(desc.mag_filter, wgt::FilterMode::Linear) { + return Err( + resource::CreateSamplerError::InvalidFilterModeWithAnisotropy { + filter_type: resource::SamplerFilterErrorType::MagFilter, + filter_mode: desc.mag_filter, + anisotropic_clamp: desc.anisotropy_clamp, + }, + ); + } + if !matches!(desc.mipmap_filter, wgt::FilterMode::Linear) { + return Err( + resource::CreateSamplerError::InvalidFilterModeWithAnisotropy { + filter_type: resource::SamplerFilterErrorType::MipmapFilter, + filter_mode: desc.mipmap_filter, + anisotropic_clamp: desc.anisotropy_clamp, + }, + ); + } + } + + let anisotropy_clamp = if self + .downlevel + .flags + .contains(wgt::DownlevelFlags::ANISOTROPIC_FILTERING) + { + // Clamp anisotropy clamp to [1, 16] per the wgpu-hal interface + desc.anisotropy_clamp.min(16) + } else { + // If it isn't supported, set this unconditionally to 1 + 1 + }; + + //TODO: check for wgt::DownlevelFlags::COMPARISON_SAMPLERS + + let hal_desc = hal::SamplerDescriptor { + label: desc.label.borrow_option(), + address_modes: desc.address_modes, + mag_filter: desc.mag_filter, + min_filter: desc.min_filter, + mipmap_filter: desc.mipmap_filter, + lod_clamp: desc.lod_min_clamp..desc.lod_max_clamp, + compare: desc.compare, + anisotropy_clamp, + border_color: desc.border_color, + }; + + let raw = unsafe { + self.raw + .as_ref() + .unwrap() + .create_sampler(&hal_desc) + .map_err(DeviceError::from)? + }; + Ok(Sampler { + raw: Some(Arc::new(raw)), + device: self.clone(), + info: ResourceInfo::new(desc.label.borrow_or_default()), + comparison: desc.compare.is_some(), + filtering: desc.min_filter == wgt::FilterMode::Linear + || desc.mag_filter == wgt::FilterMode::Linear, + }) + } + + pub(crate) fn create_shader_module<'a>( + self: &Arc, + desc: &pipeline::ShaderModuleDescriptor<'a>, + source: pipeline::ShaderModuleSource<'a>, + ) -> Result, pipeline::CreateShaderModuleError> { + let (module, source) = match source { + #[cfg(feature = "wgsl")] + pipeline::ShaderModuleSource::Wgsl(code) => { + profiling::scope!("naga::wgsl::parse_str"); + let module = naga::front::wgsl::parse_str(&code).map_err(|inner| { + pipeline::CreateShaderModuleError::Parsing(pipeline::ShaderError { + source: code.to_string(), + label: desc.label.as_ref().map(|l| l.to_string()), + inner: Box::new(inner), + }) + })?; + (Cow::Owned(module), code.into_owned()) + } + pipeline::ShaderModuleSource::Naga(module) => (module, String::new()), + pipeline::ShaderModuleSource::Dummy(_) => panic!("found `ShaderModuleSource::Dummy`"), + }; + for (_, var) in module.global_variables.iter() { + match var.binding { + Some(ref br) if br.group >= self.limits.max_bind_groups => { + return Err(pipeline::CreateShaderModuleError::InvalidGroupIndex { + bind: br.clone(), + group: br.group, + limit: self.limits.max_bind_groups, + }); + } + _ => continue, + }; + } + + use naga::valid::Capabilities as Caps; + profiling::scope!("naga::validate"); + + let mut caps = Caps::empty(); + caps.set( + Caps::PUSH_CONSTANT, + self.features.contains(wgt::Features::PUSH_CONSTANTS), + ); + caps.set( + Caps::FLOAT64, + self.features.contains(wgt::Features::SHADER_F64), + ); + caps.set( + Caps::PRIMITIVE_INDEX, + self.features + .contains(wgt::Features::SHADER_PRIMITIVE_INDEX), + ); + caps.set( + Caps::SAMPLED_TEXTURE_AND_STORAGE_BUFFER_ARRAY_NON_UNIFORM_INDEXING, + self.features.contains( + wgt::Features::SAMPLED_TEXTURE_AND_STORAGE_BUFFER_ARRAY_NON_UNIFORM_INDEXING, + ), + ); + caps.set( + Caps::UNIFORM_BUFFER_AND_STORAGE_TEXTURE_ARRAY_NON_UNIFORM_INDEXING, + self.features.contains( + wgt::Features::UNIFORM_BUFFER_AND_STORAGE_TEXTURE_ARRAY_NON_UNIFORM_INDEXING, + ), + ); + // TODO: This needs a proper wgpu feature + caps.set( + Caps::SAMPLER_NON_UNIFORM_INDEXING, + self.features.contains( + wgt::Features::SAMPLED_TEXTURE_AND_STORAGE_BUFFER_ARRAY_NON_UNIFORM_INDEXING, + ), + ); + caps.set( + Caps::STORAGE_TEXTURE_16BIT_NORM_FORMATS, + self.features + .contains(wgt::Features::TEXTURE_FORMAT_16BIT_NORM), + ); + caps.set( + Caps::MULTIVIEW, + self.features.contains(wgt::Features::MULTIVIEW), + ); + caps.set( + Caps::EARLY_DEPTH_TEST, + self.features + .contains(wgt::Features::SHADER_EARLY_DEPTH_TEST), + ); + caps.set( + Caps::MULTISAMPLED_SHADING, + self.downlevel + .flags + .contains(wgt::DownlevelFlags::MULTISAMPLED_SHADING), + ); + + let info = naga::valid::Validator::new(naga::valid::ValidationFlags::all(), caps) + .validate(&module) + .map_err(|inner| { + pipeline::CreateShaderModuleError::Validation(pipeline::ShaderError { + source, + label: desc.label.as_ref().map(|l| l.to_string()), + inner: Box::new(inner), + }) + })?; + let interface = + validation::Interface::new(&module, &info, self.features, self.limits.clone()); + let hal_shader = hal::ShaderInput::Naga(hal::NagaShader { module, info }); + + let hal_desc = hal::ShaderModuleDescriptor { + label: desc.label.borrow_option(), + runtime_checks: desc.shader_bound_checks.runtime_checks(), + }; + let raw = match unsafe { + self.raw + .as_ref() + .unwrap() + .create_shader_module(&hal_desc, hal_shader) + } { + Ok(raw) => raw, + Err(error) => { + return Err(match error { + hal::ShaderError::Device(error) => { + pipeline::CreateShaderModuleError::Device(error.into()) + } + hal::ShaderError::Compilation(ref msg) => { + log::error!("Shader error: {}", msg); + pipeline::CreateShaderModuleError::Generation + } + }) + } + }; + + Ok(pipeline::ShaderModule { + raw: Some(Arc::new(raw)), + device: self.clone(), + interface: Some(interface), + info: ResourceInfo::new(desc.label.borrow_or_default()), + #[cfg(debug_assertions)] + label: desc.label.borrow_or_default().to_string(), + }) + } + + #[allow(unused_unsafe)] + pub(crate) unsafe fn create_shader_module_spirv<'a>( + self: &Arc, + desc: &pipeline::ShaderModuleDescriptor<'a>, + source: &'a [u32], + ) -> Result, pipeline::CreateShaderModuleError> { + self.require_features(wgt::Features::SPIRV_SHADER_PASSTHROUGH)?; + let hal_desc = hal::ShaderModuleDescriptor { + label: desc.label.borrow_option(), + runtime_checks: desc.shader_bound_checks.runtime_checks(), + }; + let hal_shader = hal::ShaderInput::SpirV(source); + let raw = match unsafe { + self.raw + .as_ref() + .unwrap() + .create_shader_module(&hal_desc, hal_shader) + } { + Ok(raw) => raw, + Err(error) => { + return Err(match error { + hal::ShaderError::Device(error) => { + pipeline::CreateShaderModuleError::Device(error.into()) + } + hal::ShaderError::Compilation(ref msg) => { + log::error!("Shader error: {}", msg); + pipeline::CreateShaderModuleError::Generation + } + }) + } + }; + + Ok(pipeline::ShaderModule { + raw: Some(Arc::new(raw)), + device: self.clone(), + interface: None, + info: ResourceInfo::new(desc.label.borrow_or_default()), + #[cfg(debug_assertions)] + label: desc.label.borrow_or_default().to_string(), + }) + } + + pub(crate) fn deduplicate_bind_group_layout( + self_id: DeviceId, + entry_map: &binding_model::BindEntryMap, + guard: &Storage, id::BindGroupLayoutId>, + ) -> Option { + guard + .iter(self_id.backend()) + .find(|&(_, bgl)| bgl.device.info.id().0 == self_id && bgl.entries == *entry_map) + .map(|(id, _)| id) + } + + pub(crate) fn get_introspection_bind_group_layouts<'a>( + pipeline_layout: &binding_model::PipelineLayout, + bgl_guard: &'a Storage, id::BindGroupLayoutId>, + ) -> ArrayVec<&'a binding_model::BindEntryMap, { hal::MAX_BIND_GROUPS }> { + pipeline_layout + .bind_group_layout_ids + .iter() + .map(|&id| &bgl_guard[id].entries) + .collect() + } + + /// Generate information about late-validated buffer bindings for pipelines. + //TODO: should this be combined with `get_introspection_bind_group_layouts` in some way? + pub(crate) fn make_late_sized_buffer_groups<'a>( + shader_binding_sizes: &FastHashMap, + layout: &binding_model::PipelineLayout, + bgl_guard: &'a Storage, id::BindGroupLayoutId>, + ) -> ArrayVec { + // Given the shader-required binding sizes and the pipeline layout, + // return the filtered list of them in the layout order, + // removing those with given `min_binding_size`. + layout + .bind_group_layout_ids + .iter() + .enumerate() + .map(|(group_index, &bgl_id)| pipeline::LateSizedBufferGroup { + shader_sizes: bgl_guard[bgl_id] + .entries + .values() + .filter_map(|entry| match entry.ty { + wgt::BindingType::Buffer { + min_binding_size: None, + .. + } => { + let rb = naga::ResourceBinding { + group: group_index as u32, + binding: entry.binding, + }; + let shader_size = + shader_binding_sizes.get(&rb).map_or(0, |nz| nz.get()); + Some(shader_size) + } + _ => None, + }) + .collect(), + }) + .collect() + } + + pub(crate) fn create_bind_group_layout( + self: &Arc, + label: Option<&str>, + entry_map: binding_model::BindEntryMap, + ) -> Result, binding_model::CreateBindGroupLayoutError> { + #[derive(PartialEq)] + enum WritableStorage { + Yes, + No, + } + + for entry in entry_map.values() { + use wgt::BindingType as Bt; + + let mut required_features = wgt::Features::empty(); + let mut required_downlevel_flags = wgt::DownlevelFlags::empty(); + let (array_feature, writable_storage) = match entry.ty { + Bt::Buffer { + ty: wgt::BufferBindingType::Uniform, + has_dynamic_offset: false, + min_binding_size: _, + } => ( + Some(wgt::Features::BUFFER_BINDING_ARRAY), + WritableStorage::No, + ), + Bt::Buffer { + ty: wgt::BufferBindingType::Uniform, + has_dynamic_offset: true, + min_binding_size: _, + } => ( + Some(wgt::Features::BUFFER_BINDING_ARRAY), + WritableStorage::No, + ), + Bt::Buffer { + ty: wgt::BufferBindingType::Storage { read_only }, + .. + } => ( + Some( + wgt::Features::BUFFER_BINDING_ARRAY + | wgt::Features::STORAGE_RESOURCE_BINDING_ARRAY, + ), + match read_only { + true => WritableStorage::No, + false => WritableStorage::Yes, + }, + ), + Bt::Sampler { .. } => ( + Some(wgt::Features::TEXTURE_BINDING_ARRAY), + WritableStorage::No, + ), + Bt::Texture { .. } => ( + Some(wgt::Features::TEXTURE_BINDING_ARRAY), + WritableStorage::No, + ), + Bt::StorageTexture { + access, + view_dimension, + format: _, + } => { + match view_dimension { + wgt::TextureViewDimension::Cube | wgt::TextureViewDimension::CubeArray => { + return Err(binding_model::CreateBindGroupLayoutError::Entry { + binding: entry.binding, + error: binding_model::BindGroupLayoutEntryError::StorageTextureCube, + }) + } + _ => (), + } + match access { + wgt::StorageTextureAccess::ReadOnly + | wgt::StorageTextureAccess::ReadWrite + if !self.features.contains( + wgt::Features::TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES, + ) => + { + return Err(binding_model::CreateBindGroupLayoutError::Entry { + binding: entry.binding, + error: binding_model::BindGroupLayoutEntryError::StorageTextureReadWrite, + }); + } + _ => (), + } + ( + Some( + wgt::Features::TEXTURE_BINDING_ARRAY + | wgt::Features::STORAGE_RESOURCE_BINDING_ARRAY, + ), + match access { + wgt::StorageTextureAccess::WriteOnly => WritableStorage::Yes, + wgt::StorageTextureAccess::ReadOnly => { + required_features |= + wgt::Features::TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES; + WritableStorage::No + } + wgt::StorageTextureAccess::ReadWrite => { + required_features |= + wgt::Features::TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES; + WritableStorage::Yes + } + }, + ) + } + }; + + // Validate the count parameter + if entry.count.is_some() { + required_features |= array_feature + .ok_or(binding_model::BindGroupLayoutEntryError::ArrayUnsupported) + .map_err(|error| binding_model::CreateBindGroupLayoutError::Entry { + binding: entry.binding, + error, + })?; + } + + if entry.visibility.contains_invalid_bits() { + return Err( + binding_model::CreateBindGroupLayoutError::InvalidVisibility(entry.visibility), + ); + } + + if entry.visibility.contains(wgt::ShaderStages::VERTEX) { + if writable_storage == WritableStorage::Yes { + required_features |= wgt::Features::VERTEX_WRITABLE_STORAGE; + } + if let Bt::Buffer { + ty: wgt::BufferBindingType::Storage { .. }, + .. + } = entry.ty + { + required_downlevel_flags |= wgt::DownlevelFlags::VERTEX_STORAGE; + } + } + if writable_storage == WritableStorage::Yes + && entry.visibility.contains(wgt::ShaderStages::FRAGMENT) + { + required_downlevel_flags |= wgt::DownlevelFlags::FRAGMENT_WRITABLE_STORAGE; + } + + self.require_features(required_features) + .map_err(binding_model::BindGroupLayoutEntryError::MissingFeatures) + .map_err(|error| binding_model::CreateBindGroupLayoutError::Entry { + binding: entry.binding, + error, + })?; + self.require_downlevel_flags(required_downlevel_flags) + .map_err(binding_model::BindGroupLayoutEntryError::MissingDownlevelFlags) + .map_err(|error| binding_model::CreateBindGroupLayoutError::Entry { + binding: entry.binding, + error, + })?; + } + + let bgl_flags = conv::bind_group_layout_flags(self.features); + + let mut hal_bindings = entry_map.values().cloned().collect::>(); + hal_bindings.sort_by_key(|b| b.binding); + let hal_desc = hal::BindGroupLayoutDescriptor { + label, + flags: bgl_flags, + entries: &hal_bindings, + }; + let raw = unsafe { + self.raw + .as_ref() + .unwrap() + .create_bind_group_layout(&hal_desc) + .map_err(DeviceError::from)? + }; + + let mut count_validator = binding_model::BindingTypeMaxCountValidator::default(); + for entry in entry_map.values() { + count_validator.add_binding(entry); + } + // If a single bind group layout violates limits, the pipeline layout is + // definitely going to violate limits too, lets catch it now. + count_validator + .validate(&self.limits) + .map_err(binding_model::CreateBindGroupLayoutError::TooManyBindings)?; + + Ok(binding_model::BindGroupLayout { + raw: Some(Arc::new(raw)), + device: self.clone(), + count_validator, + entries: entry_map, + info: ResourceInfo::new(label.unwrap_or("")), + #[cfg(debug_assertions)] + label: label.unwrap_or("").to_string(), + }) + } + + pub(crate) fn create_buffer_binding<'a>( + bb: &binding_model::BufferBinding, + binding: u32, + decl: &wgt::BindGroupLayoutEntry, + used_buffer_ranges: &mut Vec, + dynamic_binding_info: &mut Vec, + late_buffer_binding_sizes: &mut FastHashMap, + used: &mut BindGroupStates, + storage: &'a Storage, id::BufferId>, + limits: &wgt::Limits, + ) -> Result, binding_model::CreateBindGroupError> { + use crate::binding_model::CreateBindGroupError as Error; + + let (binding_ty, dynamic, min_size) = match decl.ty { + wgt::BindingType::Buffer { + ty, + has_dynamic_offset, + min_binding_size, + } => (ty, has_dynamic_offset, min_binding_size), + _ => { + return Err(Error::WrongBindingType { + binding, + actual: decl.ty, + expected: "UniformBuffer, StorageBuffer or ReadonlyStorageBuffer", + }) + } + }; + let (pub_usage, internal_use, range_limit) = match binding_ty { + wgt::BufferBindingType::Uniform => ( + wgt::BufferUsages::UNIFORM, + hal::BufferUses::UNIFORM, + limits.max_uniform_buffer_binding_size, + ), + wgt::BufferBindingType::Storage { read_only } => ( + wgt::BufferUsages::STORAGE, + if read_only { + hal::BufferUses::STORAGE_READ + } else { + hal::BufferUses::STORAGE_READ_WRITE + }, + limits.max_storage_buffer_binding_size, + ), + }; + + let (align, align_limit_name) = + binding_model::buffer_binding_type_alignment(limits, binding_ty); + if bb.offset % align as u64 != 0 { + return Err(Error::UnalignedBufferOffset( + bb.offset, + align_limit_name, + align, + )); + } + + let buffer = used + .buffers + .add_single(storage, bb.buffer_id, internal_use) + .ok_or(Error::InvalidBuffer(bb.buffer_id))?; + check_buffer_usage(buffer.usage, pub_usage)?; + let raw_buffer = buffer + .raw + .as_ref() + .ok_or(Error::InvalidBuffer(bb.buffer_id))?; + + let (bind_size, bind_end) = match bb.size { + Some(size) => { + let end = bb.offset + size.get(); + if end > buffer.size { + return Err(Error::BindingRangeTooLarge { + buffer: bb.buffer_id, + range: bb.offset..end, + size: buffer.size, + }); + } + (size.get(), end) + } + None => (buffer.size - bb.offset, buffer.size), + }; + + if bind_size > range_limit as u64 { + return Err(Error::BufferRangeTooLarge { + binding, + given: bind_size as u32, + limit: range_limit, + }); + } + + // Record binding info for validating dynamic offsets + if dynamic { + dynamic_binding_info.push(binding_model::BindGroupDynamicBindingData { + binding_idx: binding, + buffer_size: buffer.size, + binding_range: bb.offset..bind_end, + maximum_dynamic_offset: buffer.size - bind_end, + binding_type: binding_ty, + }); + } + + if let Some(non_zero) = min_size { + let min_size = non_zero.get(); + if min_size > bind_size { + return Err(Error::BindingSizeTooSmall { + buffer: bb.buffer_id, + actual: bind_size, + min: min_size, + }); + } + } else { + let late_size = + wgt::BufferSize::new(bind_size).ok_or(Error::BindingZeroSize(bb.buffer_id))?; + late_buffer_binding_sizes.insert(binding, late_size); + } + + assert_eq!(bb.offset % wgt::COPY_BUFFER_ALIGNMENT, 0); + used_buffer_ranges.extend(buffer.initialization_status.read().create_action( + bb.buffer_id, + bb.offset..bb.offset + bind_size, + MemoryInitKind::NeedsInitializedMemory, + )); + + Ok(hal::BufferBinding { + buffer: raw_buffer.as_ref(), + offset: bb.offset, + size: bb.size, + }) + } + + pub(crate) fn create_texture_binding( + view: &TextureView, + texture_guard: &Storage, id::TextureId>, + internal_use: hal::TextureUses, + pub_usage: wgt::TextureUsages, + used: &mut BindGroupStates, + used_texture_ranges: &mut Vec, + ) -> Result<(), binding_model::CreateBindGroupError> { + // Careful here: the texture may no longer have its own ref count, + // if it was deleted by the user. + let texture = used + .textures + .add_single( + texture_guard, + view.parent_id.0, + Some(view.selector.clone()), + internal_use, + ) + .ok_or(binding_model::CreateBindGroupError::InvalidTexture( + view.parent_id.0, + ))?; + check_texture_usage(texture.desc.usage, pub_usage)?; + + used_texture_ranges.push(TextureInitTrackerAction { + id: view.parent_id.0, + range: TextureInitRange { + mip_range: view.desc.range.mip_range(texture.desc.mip_level_count), + layer_range: view + .desc + .range + .layer_range(texture.desc.array_layer_count()), + }, + kind: MemoryInitKind::NeedsInitializedMemory, + }); + + Ok(()) + } + + pub(crate) fn create_bind_group( + self: &Arc, + layout: &binding_model::BindGroupLayout, + desc: &binding_model::BindGroupDescriptor, + hub: &Hub, + ) -> Result, binding_model::CreateBindGroupError> { + use crate::binding_model::{BindingResource as Br, CreateBindGroupError as Error}; + { + // Check that the number of entries in the descriptor matches + // the number of entries in the layout. + let actual = desc.entries.len(); + let expected = layout.entries.len(); + if actual != expected { + return Err(Error::BindingsNumMismatch { expected, actual }); + } + } + + // TODO: arrayvec/smallvec, or re-use allocations + // Record binding info for dynamic offset validation + let mut dynamic_binding_info = Vec::new(); + // Map of binding -> shader reflected size + //Note: we can't collect into a vector right away because + // it needs to be in BGL iteration order, not BG entry order. + let mut late_buffer_binding_sizes = FastHashMap::default(); + // fill out the descriptors + let mut used = BindGroupStates::new(); + + let buffer_guard = hub.buffers.read(); + let texture_guard = hub.textures.read(); + let texture_view_guard = hub.texture_views.read(); + let sampler_guard = hub.samplers.read(); + + let mut used_buffer_ranges = Vec::new(); + let mut used_texture_ranges = Vec::new(); + let mut hal_entries = Vec::with_capacity(desc.entries.len()); + let mut hal_buffers = Vec::new(); + let mut hal_samplers = Vec::new(); + let mut hal_textures = Vec::new(); + for entry in desc.entries.iter() { + let binding = entry.binding; + // Find the corresponding declaration in the layout + let decl = layout + .entries + .get(&binding) + .ok_or(Error::MissingBindingDeclaration(binding))?; + let (res_index, count) = match entry.resource { + Br::Buffer(ref bb) => { + let bb = Self::create_buffer_binding( + bb, + binding, + decl, + &mut used_buffer_ranges, + &mut dynamic_binding_info, + &mut late_buffer_binding_sizes, + &mut used, + &*buffer_guard, + &self.limits, + )?; + + let res_index = hal_buffers.len(); + hal_buffers.push(bb); + (res_index, 1) + } + Br::BufferArray(ref bindings_array) => { + let num_bindings = bindings_array.len(); + Self::check_array_binding(self.features, decl.count, num_bindings)?; + + let res_index = hal_buffers.len(); + for bb in bindings_array.iter() { + let bb = Self::create_buffer_binding( + bb, + binding, + decl, + &mut used_buffer_ranges, + &mut dynamic_binding_info, + &mut late_buffer_binding_sizes, + &mut used, + &*buffer_guard, + &self.limits, + )?; + hal_buffers.push(bb); + } + (res_index, num_bindings) + } + Br::Sampler(id) => { + match decl.ty { + wgt::BindingType::Sampler(ty) => { + let sampler = used + .samplers + .add_single(&*sampler_guard, id) + .ok_or(Error::InvalidSampler(id))?; + + // Allowed sampler values for filtering and comparison + let (allowed_filtering, allowed_comparison) = match ty { + wgt::SamplerBindingType::Filtering => (None, false), + wgt::SamplerBindingType::NonFiltering => (Some(false), false), + wgt::SamplerBindingType::Comparison => (None, true), + }; + + if let Some(allowed_filtering) = allowed_filtering { + if allowed_filtering != sampler.filtering { + return Err(Error::WrongSamplerFiltering { + binding, + layout_flt: allowed_filtering, + sampler_flt: sampler.filtering, + }); + } + } + + if allowed_comparison != sampler.comparison { + return Err(Error::WrongSamplerComparison { + binding, + layout_cmp: allowed_comparison, + sampler_cmp: sampler.comparison, + }); + } + + let res_index = hal_samplers.len(); + hal_samplers.push(&sampler.raw); + (res_index, 1) + } + _ => { + return Err(Error::WrongBindingType { + binding, + actual: decl.ty, + expected: "Sampler", + }) + } + } + } + Br::SamplerArray(ref bindings_array) => { + let num_bindings = bindings_array.len(); + Self::check_array_binding(self.features, decl.count, num_bindings)?; + + let res_index = hal_samplers.len(); + for &id in bindings_array.iter() { + let sampler = used + .samplers + .add_single(&*sampler_guard, id) + .ok_or(Error::InvalidSampler(id))?; + hal_samplers.push(&sampler.raw); + } + + (res_index, num_bindings) + } + Br::TextureView(id) => { + let view = used + .views + .add_single(&*texture_view_guard, id) + .ok_or(Error::InvalidTextureView(id))?; + let (pub_usage, internal_use) = Self::texture_use_parameters( + binding, + decl, + view, + "SampledTexture, ReadonlyStorageTexture or WriteonlyStorageTexture", + )?; + Self::create_texture_binding( + view, + &texture_guard, + internal_use, + pub_usage, + &mut used, + &mut used_texture_ranges, + )?; + let res_index = hal_textures.len(); + hal_textures.push(hal::TextureBinding { + view: view.raw.as_ref().unwrap().as_ref(), + usage: internal_use, + }); + (res_index, 1) + } + Br::TextureViewArray(ref bindings_array) => { + let num_bindings = bindings_array.len(); + Self::check_array_binding(self.features, decl.count, num_bindings)?; + + let res_index = hal_textures.len(); + for &id in bindings_array.iter() { + let view = used + .views + .add_single(&*texture_view_guard, id) + .ok_or(Error::InvalidTextureView(id))?; + let (pub_usage, internal_use) = + Self::texture_use_parameters(binding, decl, view, + "SampledTextureArray, ReadonlyStorageTextureArray or WriteonlyStorageTextureArray")?; + Self::create_texture_binding( + view, + &texture_guard, + internal_use, + pub_usage, + &mut used, + &mut used_texture_ranges, + )?; + hal_textures.push(hal::TextureBinding { + view: view.raw.as_ref().unwrap().as_ref(), + usage: internal_use, + }); + } + + (res_index, num_bindings) + } + }; + + hal_entries.push(hal::BindGroupEntry { + binding, + resource_index: res_index as u32, + count: count as u32, + }); + } + + used.optimize(); + + hal_entries.sort_by_key(|entry| entry.binding); + for (a, b) in hal_entries.iter().zip(hal_entries.iter().skip(1)) { + if a.binding == b.binding { + return Err(Error::DuplicateBinding(a.binding)); + } + } + let samplers = hal_samplers + .iter() + .map(|&s| s.as_ref().unwrap().as_ref()) + .collect::>(); + let hal_desc = hal::BindGroupDescriptor { + label: desc.label.borrow_option(), + layout: layout.raw.as_ref().unwrap().as_ref(), + entries: &hal_entries, + buffers: &hal_buffers, + samplers: samplers.as_ref(), + textures: &hal_textures, + }; + let raw = unsafe { + self.raw + .as_ref() + .unwrap() + .create_bind_group(&hal_desc) + .map_err(DeviceError::from)? + }; + + Ok(binding_model::BindGroup { + raw: Some(Arc::new(raw)), + device: self.clone(), + layout_id: id::Valid(desc.layout), + info: ResourceInfo::new(desc.label.borrow_or_default()), + used, + used_buffer_ranges, + used_texture_ranges, + dynamic_binding_info, + // collect in the order of BGL iteration + late_buffer_binding_sizes: layout + .entries + .keys() + .flat_map(|binding| late_buffer_binding_sizes.get(binding).cloned()) + .collect(), + }) + } + + pub(crate) fn check_array_binding( + features: wgt::Features, + count: Option, + num_bindings: usize, + ) -> Result<(), super::binding_model::CreateBindGroupError> { + use super::binding_model::CreateBindGroupError as Error; + + if let Some(count) = count { + let count = count.get() as usize; + if count < num_bindings { + return Err(Error::BindingArrayPartialLengthMismatch { + actual: num_bindings, + expected: count, + }); + } + if count != num_bindings + && !features.contains(wgt::Features::PARTIALLY_BOUND_BINDING_ARRAY) + { + return Err(Error::BindingArrayLengthMismatch { + actual: num_bindings, + expected: count, + }); + } + if num_bindings == 0 { + return Err(Error::BindingArrayZeroLength); + } + } else { + return Err(Error::SingleBindingExpected); + }; + + Ok(()) + } + + pub(crate) fn texture_use_parameters( + binding: u32, + decl: &wgt::BindGroupLayoutEntry, + view: &TextureView, + expected: &'static str, + ) -> Result<(wgt::TextureUsages, hal::TextureUses), binding_model::CreateBindGroupError> { + use crate::binding_model::CreateBindGroupError as Error; + if view + .desc + .aspects() + .contains(hal::FormatAspects::DEPTH | hal::FormatAspects::STENCIL) + { + return Err(Error::DepthStencilAspect); + } + match decl.ty { + wgt::BindingType::Texture { + sample_type, + view_dimension, + multisampled, + } => { + use wgt::TextureSampleType as Tst; + if multisampled != (view.samples != 1) { + return Err(Error::InvalidTextureMultisample { + binding, + layout_multisampled: multisampled, + view_samples: view.samples, + }); + } + let compat_sample_type = view + .desc + .format + .sample_type(Some(view.desc.range.aspect)) + .unwrap(); + match (sample_type, compat_sample_type) { + (Tst::Uint, Tst::Uint) | + (Tst::Sint, Tst::Sint) | + (Tst::Depth, Tst::Depth) | + // if we expect non-filterable, accept anything float + (Tst::Float { filterable: false }, Tst::Float { .. }) | + // if we expect filterable, require it + (Tst::Float { filterable: true }, Tst::Float { filterable: true }) | + // if we expect non-filterable, also accept depth + (Tst::Float { filterable: false }, Tst::Depth) => {} + // if we expect filterable, also accept Float that is defined as + // unfilterable if filterable feature is explicitly enabled (only hit + // if wgt::Features::TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES is + // enabled) + (Tst::Float { filterable: true }, Tst::Float { .. }) if view.format_features.flags.contains(wgt::TextureFormatFeatureFlags::FILTERABLE) => {} + _ => { + return Err(Error::InvalidTextureSampleType { + binding, + layout_sample_type: sample_type, + view_format: view.desc.format, + }) + } + } + if view_dimension != view.desc.dimension { + return Err(Error::InvalidTextureDimension { + binding, + layout_dimension: view_dimension, + view_dimension: view.desc.dimension, + }); + } + Ok(( + wgt::TextureUsages::TEXTURE_BINDING, + hal::TextureUses::RESOURCE, + )) + } + wgt::BindingType::StorageTexture { + access, + format, + view_dimension, + } => { + if format != view.desc.format { + return Err(Error::InvalidStorageTextureFormat { + binding, + layout_format: format, + view_format: view.desc.format, + }); + } + if view_dimension != view.desc.dimension { + return Err(Error::InvalidTextureDimension { + binding, + layout_dimension: view_dimension, + view_dimension: view.desc.dimension, + }); + } + + let mip_level_count = view.selector.mips.end - view.selector.mips.start; + if mip_level_count != 1 { + return Err(Error::InvalidStorageTextureMipLevelCount { + binding, + mip_level_count, + }); + } + + let internal_use = match access { + wgt::StorageTextureAccess::WriteOnly => hal::TextureUses::STORAGE_READ_WRITE, + wgt::StorageTextureAccess::ReadOnly => { + if !view + .format_features + .flags + .contains(wgt::TextureFormatFeatureFlags::STORAGE_READ_WRITE) + { + return Err(Error::StorageReadNotSupported(view.desc.format)); + } + hal::TextureUses::STORAGE_READ + } + wgt::StorageTextureAccess::ReadWrite => { + if !view + .format_features + .flags + .contains(wgt::TextureFormatFeatureFlags::STORAGE_READ_WRITE) + { + return Err(Error::StorageReadNotSupported(view.desc.format)); + } + + hal::TextureUses::STORAGE_READ_WRITE + } + }; + Ok((wgt::TextureUsages::STORAGE_BINDING, internal_use)) + } + _ => Err(Error::WrongBindingType { + binding, + actual: decl.ty, + expected, + }), + } + } + + pub(crate) fn create_pipeline_layout( + self: &Arc, + desc: &binding_model::PipelineLayoutDescriptor, + bgl_guard: &Storage, id::BindGroupLayoutId>, + ) -> Result, binding_model::CreatePipelineLayoutError> { + use crate::binding_model::CreatePipelineLayoutError as Error; + + let bind_group_layouts_count = desc.bind_group_layouts.len(); + let device_max_bind_groups = self.limits.max_bind_groups as usize; + if bind_group_layouts_count > device_max_bind_groups { + return Err(Error::TooManyGroups { + actual: bind_group_layouts_count, + max: device_max_bind_groups, + }); + } + + if !desc.push_constant_ranges.is_empty() { + self.require_features(wgt::Features::PUSH_CONSTANTS)?; + } + + let mut used_stages = wgt::ShaderStages::empty(); + for (index, pc) in desc.push_constant_ranges.iter().enumerate() { + if pc.stages.intersects(used_stages) { + return Err(Error::MoreThanOnePushConstantRangePerStage { + index, + provided: pc.stages, + intersected: pc.stages & used_stages, + }); + } + used_stages |= pc.stages; + + let device_max_pc_size = self.limits.max_push_constant_size; + if device_max_pc_size < pc.range.end { + return Err(Error::PushConstantRangeTooLarge { + index, + range: pc.range.clone(), + max: device_max_pc_size, + }); + } + + if pc.range.start % wgt::PUSH_CONSTANT_ALIGNMENT != 0 { + return Err(Error::MisalignedPushConstantRange { + index, + bound: pc.range.start, + }); + } + if pc.range.end % wgt::PUSH_CONSTANT_ALIGNMENT != 0 { + return Err(Error::MisalignedPushConstantRange { + index, + bound: pc.range.end, + }); + } + } + + let mut count_validator = binding_model::BindingTypeMaxCountValidator::default(); + + // validate total resource counts + for &id in desc.bind_group_layouts.iter() { + let bind_group_layout = bgl_guard + .get(id) + .map_err(|_| Error::InvalidBindGroupLayout(id))?; + count_validator.merge(&bind_group_layout.count_validator); + } + count_validator + .validate(&self.limits) + .map_err(Error::TooManyBindings)?; + + let bgl_vec = desc + .bind_group_layouts + .iter() + .map(|&id| bgl_guard.get(id).unwrap().raw.as_ref().unwrap().as_ref()) + .collect::>(); + let hal_desc = hal::PipelineLayoutDescriptor { + label: desc.label.borrow_option(), + flags: hal::PipelineLayoutFlags::BASE_VERTEX_INSTANCE, + bind_group_layouts: &bgl_vec, + push_constant_ranges: desc.push_constant_ranges.as_ref(), + }; + + let raw = unsafe { + self.raw + .as_ref() + .unwrap() + .create_pipeline_layout(&hal_desc) + .map_err(DeviceError::from)? + }; + + Ok(binding_model::PipelineLayout { + raw: Some(Arc::new(raw)), + device: self.clone(), + info: ResourceInfo::new(desc.label.borrow_or_default()), + bind_group_layout_ids: desc + .bind_group_layouts + .iter() + .map(|&id| id::Valid(id)) + .collect(), + push_constant_ranges: desc.push_constant_ranges.iter().cloned().collect(), + }) + } + + //TODO: refactor this. It's the only method of `Device` that registers new objects + // (the pipeline layout). + pub(crate) fn derive_pipeline_layout( + self: &Arc, + implicit_context: Option, + mut derived_group_layouts: ArrayVec, + bgl_guard: &mut Storage, id::BindGroupLayoutId>, + pipeline_layout_guard: &mut Storage, id::PipelineLayoutId>, + ) -> Result { + while derived_group_layouts + .last() + .map_or(false, |map| map.is_empty()) + { + derived_group_layouts.pop(); + } + let mut ids = implicit_context.ok_or(pipeline::ImplicitLayoutError::MissingIds(0))?; + let group_count = derived_group_layouts.len(); + if ids.group_ids.len() < group_count { + log::error!( + "Not enough bind group IDs ({}) specified for the implicit layout ({})", + ids.group_ids.len(), + derived_group_layouts.len() + ); + return Err(pipeline::ImplicitLayoutError::MissingIds(group_count as _)); + } + + for (bgl_id, map) in ids.group_ids.iter_mut().zip(derived_group_layouts) { + match Device::deduplicate_bind_group_layout(self.info.id().0, &map, bgl_guard) { + Some(dedup_id) => { + *bgl_id = dedup_id; + } + None => { + let bgl = self.create_bind_group_layout(None, map)?; + bgl_guard.force_replace(*bgl_id, bgl); + } + }; + } + + let layout_desc = binding_model::PipelineLayoutDescriptor { + label: None, + bind_group_layouts: Cow::Borrowed(&ids.group_ids[..group_count]), + push_constant_ranges: Cow::Borrowed(&[]), //TODO? + }; + let layout = self.create_pipeline_layout(&layout_desc, bgl_guard)?; + pipeline_layout_guard.force_replace(ids.root_id, layout); + Ok(ids.root_id) + } + + pub(crate) fn create_compute_pipeline( + self: &Arc, + desc: &pipeline::ComputePipelineDescriptor, + implicit_context: Option, + hub: &Hub, + ) -> Result, pipeline::CreateComputePipelineError> { + //TODO: only lock mutable if the layout is derived + let mut pipeline_layout_guard = hub.pipeline_layouts.write(); + let mut bgl_guard = hub.bind_group_layouts.write(); + + // This has to be done first, or otherwise the IDs may be pointing to entries + // that are not even in the storage. + if let Some(ref ids) = implicit_context { + pipeline_layout_guard.insert_error(ids.root_id, IMPLICIT_FAILURE); + for &bgl_id in ids.group_ids.iter() { + bgl_guard.insert_error(bgl_id, IMPLICIT_FAILURE); + } + } + + self.require_downlevel_flags(wgt::DownlevelFlags::COMPUTE_SHADERS)?; + + let mut derived_group_layouts = + ArrayVec::::new(); + let mut shader_binding_sizes = FastHashMap::default(); + + let io = validation::StageIo::default(); + + let shader_module = hub + .shader_modules + .get(desc.stage.module) + .map_err(|_| validation::StageError::InvalidModule)?; + + { + let flag = wgt::ShaderStages::COMPUTE; + let provided_layouts = match desc.layout { + Some(pipeline_layout_id) => Some(Device::get_introspection_bind_group_layouts( + pipeline_layout_guard + .get(pipeline_layout_id) + .as_ref() + .map_err(|_| pipeline::CreateComputePipelineError::InvalidLayout)?, + &*bgl_guard, + )), + None => { + for _ in 0..self.limits.max_bind_groups { + derived_group_layouts.push(binding_model::BindEntryMap::default()); + } + None + } + }; + if let Some(ref interface) = shader_module.interface { + let _ = interface.check_stage( + provided_layouts.as_ref().map(|p| p.as_slice()), + &mut derived_group_layouts, + &mut shader_binding_sizes, + &desc.stage.entry_point, + flag, + io, + None, + )?; + } + } + + let pipeline_layout_id = match desc.layout { + Some(id) => id, + None => self.derive_pipeline_layout( + implicit_context, + derived_group_layouts, + &mut *bgl_guard, + &mut *pipeline_layout_guard, + )?, + }; + let layout = pipeline_layout_guard + .get(pipeline_layout_id) + .map_err(|_| pipeline::CreateComputePipelineError::InvalidLayout)?; + + let late_sized_buffer_groups = + Device::make_late_sized_buffer_groups(&shader_binding_sizes, &layout, &*bgl_guard); + + let pipeline_desc = hal::ComputePipelineDescriptor { + label: desc.label.borrow_option(), + layout: layout.raw.as_ref().unwrap().as_ref(), + stage: hal::ProgrammableStage { + entry_point: desc.stage.entry_point.as_ref(), + module: shader_module.raw.as_ref().unwrap().as_ref(), + }, + }; + + let raw = unsafe { + self.raw + .as_ref() + .unwrap() + .create_compute_pipeline(&pipeline_desc) + } + .map_err(|err| match err { + hal::PipelineError::Device(error) => { + pipeline::CreateComputePipelineError::Device(error.into()) + } + hal::PipelineError::Linkage(_stages, msg) => { + pipeline::CreateComputePipelineError::Internal(msg) + } + hal::PipelineError::EntryPoint(_stage) => { + pipeline::CreateComputePipelineError::Internal(EP_FAILURE.to_string()) + } + })?; + + let pipeline = pipeline::ComputePipeline { + raw: Some(Arc::new(raw)), + layout_id: id::Valid(pipeline_layout_id), + device: self.clone(), + late_sized_buffer_groups, + info: ResourceInfo::new(desc.label.borrow_or_default()), + }; + Ok(pipeline) + } + + pub(crate) fn create_render_pipeline( + self: &Arc, + adapter: &Adapter, + desc: &pipeline::RenderPipelineDescriptor, + implicit_context: Option, + hub: &Hub, + ) -> Result, pipeline::CreateRenderPipelineError> { + use wgt::TextureFormatFeatureFlags as Tfff; + + //TODO: only lock mutable if the layout is derived + let mut pipeline_layout_guard = hub.pipeline_layouts.write(); + let mut bgl_guard = hub.bind_group_layouts.write(); + + // This has to be done first, or otherwise the IDs may be pointing to entries + // that are not even in the storage. + if let Some(ref ids) = implicit_context { + pipeline_layout_guard.insert_error(ids.root_id, IMPLICIT_FAILURE); + for &bgl_id in ids.group_ids.iter() { + bgl_guard.insert_error(bgl_id, IMPLICIT_FAILURE); + } + } + + let mut derived_group_layouts = + ArrayVec::::new(); + let mut shader_binding_sizes = FastHashMap::default(); + + let num_attachments = desc.fragment.as_ref().map(|f| f.targets.len()).unwrap_or(0); + if num_attachments > hal::MAX_COLOR_ATTACHMENTS { + return Err(pipeline::CreateRenderPipelineError::ColorAttachment( + command::ColorAttachmentError::TooMany { + given: num_attachments, + limit: hal::MAX_COLOR_ATTACHMENTS, + }, + )); + } + + let color_targets = desc + .fragment + .as_ref() + .map_or(&[][..], |fragment| &fragment.targets); + let depth_stencil_state = desc.depth_stencil.as_ref(); + + let cts: ArrayVec<_, { hal::MAX_COLOR_ATTACHMENTS }> = + color_targets.iter().filter_map(|x| x.as_ref()).collect(); + if !cts.is_empty() && { + let first = &cts[0]; + cts[1..] + .iter() + .any(|ct| ct.write_mask != first.write_mask || ct.blend != first.blend) + } { + log::info!("Color targets: {:?}", color_targets); + self.require_downlevel_flags(wgt::DownlevelFlags::INDEPENDENT_BLEND)?; + } + + let mut io = validation::StageIo::default(); + let mut validated_stages = wgt::ShaderStages::empty(); + + let mut vertex_steps = Vec::with_capacity(desc.vertex.buffers.len()); + let mut vertex_buffers = Vec::with_capacity(desc.vertex.buffers.len()); + let mut total_attributes = 0; + for (i, vb_state) in desc.vertex.buffers.iter().enumerate() { + vertex_steps.push(pipeline::VertexStep { + stride: vb_state.array_stride, + mode: vb_state.step_mode, + }); + if vb_state.attributes.is_empty() { + continue; + } + if vb_state.array_stride > self.limits.max_vertex_buffer_array_stride as u64 { + return Err(pipeline::CreateRenderPipelineError::VertexStrideTooLarge { + index: i as u32, + given: vb_state.array_stride as u32, + limit: self.limits.max_vertex_buffer_array_stride, + }); + } + if vb_state.array_stride % wgt::VERTEX_STRIDE_ALIGNMENT != 0 { + return Err(pipeline::CreateRenderPipelineError::UnalignedVertexStride { + index: i as u32, + stride: vb_state.array_stride, + }); + } + vertex_buffers.push(hal::VertexBufferLayout { + array_stride: vb_state.array_stride, + step_mode: vb_state.step_mode, + attributes: vb_state.attributes.as_ref(), + }); + + for attribute in vb_state.attributes.iter() { + if attribute.offset >= 0x10000000 { + return Err( + pipeline::CreateRenderPipelineError::InvalidVertexAttributeOffset { + location: attribute.shader_location, + offset: attribute.offset, + }, + ); + } + + if let wgt::VertexFormat::Float64 + | wgt::VertexFormat::Float64x2 + | wgt::VertexFormat::Float64x3 + | wgt::VertexFormat::Float64x4 = attribute.format + { + self.require_features(wgt::Features::VERTEX_ATTRIBUTE_64BIT)?; + } + + let previous = io.insert( + attribute.shader_location, + validation::InterfaceVar::vertex_attribute(attribute.format), + ); + + if previous.is_some() { + return Err(pipeline::CreateRenderPipelineError::ShaderLocationClash( + attribute.shader_location, + )); + } + } + total_attributes += vb_state.attributes.len(); + } + + if vertex_buffers.len() > self.limits.max_vertex_buffers as usize { + return Err(pipeline::CreateRenderPipelineError::TooManyVertexBuffers { + given: vertex_buffers.len() as u32, + limit: self.limits.max_vertex_buffers, + }); + } + if total_attributes > self.limits.max_vertex_attributes as usize { + return Err( + pipeline::CreateRenderPipelineError::TooManyVertexAttributes { + given: total_attributes as u32, + limit: self.limits.max_vertex_attributes, + }, + ); + } + + if desc.primitive.strip_index_format.is_some() && !desc.primitive.topology.is_strip() { + return Err( + pipeline::CreateRenderPipelineError::StripIndexFormatForNonStripTopology { + strip_index_format: desc.primitive.strip_index_format, + topology: desc.primitive.topology, + }, + ); + } + + if desc.primitive.unclipped_depth { + self.require_features(wgt::Features::DEPTH_CLIP_CONTROL)?; + } + + if desc.primitive.polygon_mode == wgt::PolygonMode::Line { + self.require_features(wgt::Features::POLYGON_MODE_LINE)?; + } + if desc.primitive.polygon_mode == wgt::PolygonMode::Point { + self.require_features(wgt::Features::POLYGON_MODE_POINT)?; + } + + if desc.primitive.conservative { + self.require_features(wgt::Features::CONSERVATIVE_RASTERIZATION)?; + } + + if desc.primitive.conservative && desc.primitive.polygon_mode != wgt::PolygonMode::Fill { + return Err( + pipeline::CreateRenderPipelineError::ConservativeRasterizationNonFillPolygonMode, + ); + } + + for (i, cs) in color_targets.iter().enumerate() { + if let Some(cs) = cs.as_ref() { + let error = loop { + if cs.write_mask.contains_invalid_bits() { + break Some(pipeline::ColorStateError::InvalidWriteMask(cs.write_mask)); + } + + let format_features = self.describe_format_features(adapter, cs.format)?; + if !format_features + .allowed_usages + .contains(wgt::TextureUsages::RENDER_ATTACHMENT) + { + break Some(pipeline::ColorStateError::FormatNotRenderable(cs.format)); + } + let blendable = format_features.flags.contains(Tfff::BLENDABLE); + let filterable = format_features.flags.contains(Tfff::FILTERABLE); + let adapter_specific = self + .features + .contains(wgt::Features::TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES); + // according to WebGPU specifications the texture needs to be + // [`TextureFormatFeatureFlags::FILTERABLE`] if blending is set - use + // [`Features::TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES`] to elude + // this limitation + if cs.blend.is_some() && (!blendable || (!filterable && !adapter_specific)) { + break Some(pipeline::ColorStateError::FormatNotBlendable(cs.format)); + } + if !hal::FormatAspects::from(cs.format).contains(hal::FormatAspects::COLOR) { + break Some(pipeline::ColorStateError::FormatNotColor(cs.format)); + } + if desc.multisample.count > 1 + && !format_features + .flags + .sample_count_supported(desc.multisample.count) + { + break Some(pipeline::ColorStateError::FormatNotMultisampled(cs.format)); + } + + break None; + }; + if let Some(e) = error { + return Err(pipeline::CreateRenderPipelineError::ColorState(i as u8, e)); + } + } + } + + if let Some(ds) = depth_stencil_state { + let error = loop { + let format_features = self.describe_format_features(adapter, ds.format)?; + if !format_features + .allowed_usages + .contains(wgt::TextureUsages::RENDER_ATTACHMENT) + { + break Some(pipeline::DepthStencilStateError::FormatNotRenderable( + ds.format, + )); + } + + let aspect = hal::FormatAspects::from(ds.format); + if ds.is_depth_enabled() && !aspect.contains(hal::FormatAspects::DEPTH) { + break Some(pipeline::DepthStencilStateError::FormatNotDepth(ds.format)); + } + if ds.stencil.is_enabled() && !aspect.contains(hal::FormatAspects::STENCIL) { + break Some(pipeline::DepthStencilStateError::FormatNotStencil( + ds.format, + )); + } + if desc.multisample.count > 1 + && !format_features + .flags + .sample_count_supported(desc.multisample.count) + { + break Some(pipeline::DepthStencilStateError::FormatNotMultisampled( + ds.format, + )); + } + + break None; + }; + if let Some(e) = error { + return Err(pipeline::CreateRenderPipelineError::DepthStencilState(e)); + } + + if ds.bias.clamp != 0.0 { + self.require_downlevel_flags(wgt::DownlevelFlags::DEPTH_BIAS_CLAMP)?; + } + } + + if desc.layout.is_none() { + for _ in 0..self.limits.max_bind_groups { + derived_group_layouts.push(binding_model::BindEntryMap::default()); + } + } + + let samples = { + let sc = desc.multisample.count; + if sc == 0 || sc > 32 || !conv::is_power_of_two_u32(sc) { + return Err(pipeline::CreateRenderPipelineError::InvalidSampleCount(sc)); + } + sc + }; + + let shader_module_guard = hub.shader_modules.read(); + + let vertex_stage = { + let stage = &desc.vertex.stage; + let flag = wgt::ShaderStages::VERTEX; + + let shader_module = shader_module_guard.get(stage.module).map_err(|_| { + pipeline::CreateRenderPipelineError::Stage { + stage: flag, + error: validation::StageError::InvalidModule, + } + })?; + + let provided_layouts = match desc.layout { + Some(pipeline_layout_id) => { + let pipeline_layout = pipeline_layout_guard + .get(pipeline_layout_id) + .map_err(|_| pipeline::CreateRenderPipelineError::InvalidLayout)?; + Some(Device::get_introspection_bind_group_layouts( + &pipeline_layout, + &*bgl_guard, + )) + } + None => None, + }; + + if let Some(ref interface) = shader_module.interface { + io = interface + .check_stage( + provided_layouts.as_ref().map(|p| p.as_slice()), + &mut derived_group_layouts, + &mut shader_binding_sizes, + &stage.entry_point, + flag, + io, + desc.depth_stencil.as_ref().map(|d| d.depth_compare), + ) + .map_err(|error| pipeline::CreateRenderPipelineError::Stage { + stage: flag, + error, + })?; + validated_stages |= flag; + } + + hal::ProgrammableStage { + module: shader_module.raw.as_ref().unwrap().as_ref(), + entry_point: stage.entry_point.as_ref(), + } + }; + + let fragment_stage = match desc.fragment { + Some(ref fragment) => { + let flag = wgt::ShaderStages::FRAGMENT; + + let shader_module = + shader_module_guard + .get(fragment.stage.module) + .map_err(|_| pipeline::CreateRenderPipelineError::Stage { + stage: flag, + error: validation::StageError::InvalidModule, + })?; + + let provided_layouts = match desc.layout { + Some(pipeline_layout_id) => Some(Device::get_introspection_bind_group_layouts( + pipeline_layout_guard + .get(pipeline_layout_id) + .as_ref() + .map_err(|_| pipeline::CreateRenderPipelineError::InvalidLayout)?, + &*bgl_guard, + )), + None => None, + }; + + if validated_stages == wgt::ShaderStages::VERTEX { + if let Some(ref interface) = shader_module.interface { + io = interface + .check_stage( + provided_layouts.as_ref().map(|p| p.as_slice()), + &mut derived_group_layouts, + &mut shader_binding_sizes, + &fragment.stage.entry_point, + flag, + io, + desc.depth_stencil.as_ref().map(|d| d.depth_compare), + ) + .map_err(|error| pipeline::CreateRenderPipelineError::Stage { + stage: flag, + error, + })?; + validated_stages |= flag; + } + } + + Some(hal::ProgrammableStage { + module: shader_module.raw.as_ref().unwrap().as_ref(), + entry_point: fragment.stage.entry_point.as_ref(), + }) + } + None => None, + }; + + if validated_stages.contains(wgt::ShaderStages::FRAGMENT) { + for (i, output) in io.iter() { + match color_targets.get(*i as usize) { + Some(&Some(ref state)) => { + validation::check_texture_format(state.format, &output.ty).map_err( + |pipeline| { + pipeline::CreateRenderPipelineError::ColorState( + *i as u8, + pipeline::ColorStateError::IncompatibleFormat { + pipeline, + shader: output.ty, + }, + ) + }, + )?; + } + _ => { + log::info!( + "The fragment stage {:?} output @location({}) values are ignored", + fragment_stage + .as_ref() + .map_or("", |stage| stage.entry_point), + i + ); + } + } + } + } + let last_stage = match desc.fragment { + Some(_) => wgt::ShaderStages::FRAGMENT, + None => wgt::ShaderStages::VERTEX, + }; + if desc.layout.is_none() && !validated_stages.contains(last_stage) { + return Err(pipeline::ImplicitLayoutError::ReflectionError(last_stage).into()); + } + + let pipeline_layout_id = match desc.layout { + Some(id) => id, + None => self.derive_pipeline_layout( + implicit_context, + derived_group_layouts, + &mut *bgl_guard, + &mut *pipeline_layout_guard, + )?, + }; + let layout = pipeline_layout_guard + .get(pipeline_layout_id) + .map_err(|_| pipeline::CreateRenderPipelineError::InvalidLayout)?; + + // Multiview is only supported if the feature is enabled + if desc.multiview.is_some() { + self.require_features(wgt::Features::MULTIVIEW)?; + } + + if !self + .downlevel + .flags + .contains(wgt::DownlevelFlags::BUFFER_BINDINGS_NOT_16_BYTE_ALIGNED) + { + for (binding, size) in shader_binding_sizes.iter() { + if size.get() % 16 != 0 { + return Err(pipeline::CreateRenderPipelineError::UnalignedShader { + binding: binding.binding, + group: binding.group, + size: size.get(), + }); + } + } + } + + let late_sized_buffer_groups = + Device::make_late_sized_buffer_groups(&shader_binding_sizes, &layout, &*bgl_guard); + + let pipeline_desc = hal::RenderPipelineDescriptor { + label: desc.label.borrow_option(), + layout: layout.raw.as_ref().unwrap().as_ref(), + vertex_buffers: &vertex_buffers, + vertex_stage, + primitive: desc.primitive, + depth_stencil: desc.depth_stencil.clone(), + multisample: desc.multisample, + fragment_stage, + color_targets, + multiview: desc.multiview, + }; + let raw = unsafe { + self.raw + .as_ref() + .unwrap() + .create_render_pipeline(&pipeline_desc) + } + .map_err(|err| match err { + hal::PipelineError::Device(error) => { + pipeline::CreateRenderPipelineError::Device(error.into()) + } + hal::PipelineError::Linkage(stage, msg) => { + pipeline::CreateRenderPipelineError::Internal { stage, error: msg } + } + hal::PipelineError::EntryPoint(stage) => { + pipeline::CreateRenderPipelineError::Internal { + stage: hal::auxil::map_naga_stage(stage), + error: EP_FAILURE.to_string(), + } + } + })?; + + let pass_context = RenderPassContext { + attachments: AttachmentData { + colors: color_targets + .iter() + .map(|state| state.as_ref().map(|s| s.format)) + .collect(), + resolves: ArrayVec::new(), + depth_stencil: depth_stencil_state.as_ref().map(|state| state.format), + }, + sample_count: samples, + multiview: desc.multiview, + }; + + let mut flags = pipeline::PipelineFlags::empty(); + for state in color_targets.iter().filter_map(|s| s.as_ref()) { + if let Some(ref bs) = state.blend { + if bs.color.uses_constant() | bs.alpha.uses_constant() { + flags |= pipeline::PipelineFlags::BLEND_CONSTANT; + } + } + } + if let Some(ds) = depth_stencil_state.as_ref() { + if ds.stencil.is_enabled() && ds.stencil.needs_ref_value() { + flags |= pipeline::PipelineFlags::STENCIL_REFERENCE; + } + if !ds.is_depth_read_only() { + flags |= pipeline::PipelineFlags::WRITES_DEPTH; + } + if !ds.is_stencil_read_only(desc.primitive.cull_mode) { + flags |= pipeline::PipelineFlags::WRITES_STENCIL; + } + } + + let pipeline = pipeline::RenderPipeline { + raw: Some(Arc::new(raw)), + layout_id: id::Valid(pipeline_layout_id), + device: self.clone(), + pass_context, + flags, + strip_index_format: desc.primitive.strip_index_format, + vertex_steps, + late_sized_buffer_groups, + info: ResourceInfo::new(desc.label.borrow_or_default()), + }; + Ok(pipeline) + } + + pub(crate) fn describe_format_features( + &self, + adapter: &Adapter, + format: TextureFormat, + ) -> Result { + self.require_features(format.required_features())?; + + let using_device_features = self + .features + .contains(wgt::Features::TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES); + // If we're running downlevel, we need to manually ask the backend what + // we can use as we can't trust WebGPU. + let downlevel = !self.downlevel.is_webgpu_compliant(); + + if using_device_features || downlevel { + Ok(adapter.get_texture_format_features(format)) + } else { + Ok(format.guaranteed_format_features()) + } + } + + pub(crate) fn wait_for_submit( + &self, + submission_index: SubmissionIndex, + ) -> Result<(), WaitIdleError> { + let last_done_index = unsafe { + self.raw + .as_ref() + .unwrap() + .get_fence_value(self.fence.lock().as_ref().unwrap()) + .map_err(DeviceError::from)? + }; + if last_done_index < submission_index { + log::info!("Waiting for submission {:?}", submission_index); + unsafe { + self.raw + .as_ref() + .unwrap() + .wait(self.fence.lock().as_ref().unwrap(), submission_index, !0) + .map_err(DeviceError::from)? + }; + let closures = self.lock_life().triage_submissions( + submission_index, + self.command_allocator.lock().as_mut().unwrap(), + ); + assert!( + closures.is_empty(), + "wait_for_submit is not expected to work with closures" + ); + } + Ok(()) + } + + pub(crate) fn create_query_set( + self: &Arc, + desc: &resource::QuerySetDescriptor, + ) -> Result, resource::CreateQuerySetError> { + use resource::CreateQuerySetError as Error; + + match desc.ty { + wgt::QueryType::Occlusion => {} + wgt::QueryType::Timestamp => { + self.require_features(wgt::Features::TIMESTAMP_QUERY)?; + } + wgt::QueryType::PipelineStatistics(..) => { + self.require_features(wgt::Features::PIPELINE_STATISTICS_QUERY)?; + } + } + + if desc.count == 0 { + return Err(Error::ZeroCount); + } + + if desc.count > wgt::QUERY_SET_MAX_QUERIES { + return Err(Error::TooManyQueries { + count: desc.count, + maximum: wgt::QUERY_SET_MAX_QUERIES, + }); + } + + let hal_desc = desc.map_label(crate::LabelHelpers::borrow_option); + Ok(QuerySet { + raw: Some(Arc::new(unsafe { + self.raw + .as_ref() + .unwrap() + .create_query_set(&hal_desc) + .unwrap() + })), + device: self.clone(), + info: ResourceInfo::new(""), + desc: desc.map_label(|_| ()), + }) + } +} + +impl Device { + pub(crate) fn destroy_command_buffer(&self, mut cmd_buf: command::CommandBuffer) { + let mut baked = cmd_buf.into_baked(); + unsafe { + baked.encoder.reset_all(baked.list.into_iter()); + } + unsafe { + self.raw + .as_ref() + .unwrap() + .destroy_command_encoder(baked.encoder); + } + } + + /// Wait for idle and remove resources that we can, before we die. + pub(crate) fn prepare_to_die(&self) { + self.pending_writes.lock().as_mut().unwrap().deactivate(); + let mut life_tracker = self.life_tracker.lock(); + let current_index = self.active_submission_index.load(Ordering::Relaxed); + if let Err(error) = unsafe { + self.raw.as_ref().unwrap().wait( + self.fence.lock().as_ref().unwrap(), + current_index, + CLEANUP_WAIT_MS, + ) + } { + log::error!("failed to wait for the device: {:?}", error); + } + let _ = life_tracker.triage_submissions( + current_index, + self.command_allocator.lock().as_mut().unwrap(), + ); + life_tracker.cleanup(); + #[cfg(feature = "trace")] + { + self.trace = None; + } + } +} + +impl Resource for Device { + const TYPE: &'static str = "Device"; + + fn info(&self) -> &ResourceInfo { + &self.info + } +} diff --git a/wgpu-core/src/device/global.rs b/wgpu-core/src/device/global.rs new file mode 100644 index 0000000000..cb2a76681c --- /dev/null +++ b/wgpu-core/src/device/global.rs @@ -0,0 +1,2557 @@ +use crate::{ + binding_model, command, conv, + device::{life::WaitIdleError, map_buffer, queue, Device, DeviceError, HostMap}, + global::Global, + hal_api::HalApi, + id::{self, AdapterId, DeviceId, SurfaceId}, + identity::{GlobalIdentityHandlerFactory, Input}, + init_tracker::TextureInitTracker, + instance::{self, Adapter, Surface}, + pipeline, present, + resource::{self, Buffer, BufferAccessResult, BufferMapState, Resource}, + resource::{BufferAccessError, BufferMapOperation}, + validation::check_buffer_usage, + FastHashMap, Label, LabelHelpers as _, +}; + +use hal::{CommandEncoder as _, Device as _}; +use parking_lot::RwLock; +use smallvec::SmallVec; + +use wgt::{BufferAddress, TextureFormat}; + +use std::{ + borrow::Cow, + iter, mem, + ops::Range, + ptr, + sync::{atomic::Ordering, Arc}, +}; + +use super::{BufferMapPendingClosure, ImplicitPipelineIds, InvalidDevice, UserClosures}; + +impl Global { + pub fn adapter_is_surface_supported( + &self, + adapter_id: AdapterId, + surface_id: SurfaceId, + ) -> Result { + let hub = A::hub(self); + + let surface_guard = self.surfaces.read(); + let adapter_guard = hub.adapters.read(); + let adapter = adapter_guard + .get(adapter_id) + .map_err(|_| instance::IsSurfaceSupportedError::InvalidAdapter)?; + let surface = surface_guard + .get(surface_id) + .map_err(|_| instance::IsSurfaceSupportedError::InvalidSurface)?; + Ok(adapter.is_surface_supported(&surface)) + } + + pub fn surface_get_capabilities( + &self, + surface_id: SurfaceId, + adapter_id: AdapterId, + ) -> Result { + profiling::scope!("Surface::get_capabilities"); + self.fetch_adapter_and_surface::(surface_id, adapter_id, |adapter, surface| { + let mut hal_caps = surface.get_capabilities(adapter)?; + + hal_caps.formats.sort_by_key(|f| !f.is_srgb()); + + Ok(wgt::SurfaceCapabilities { + formats: hal_caps.formats, + present_modes: hal_caps.present_modes, + alpha_modes: hal_caps.composite_alpha_modes, + }) + }) + } + + fn fetch_adapter_and_surface< + A: HalApi, + F: FnOnce(&Adapter, &Surface) -> Result, + B, + >( + &self, + surface_id: SurfaceId, + adapter_id: AdapterId, + get_supported_callback: F, + ) -> Result { + let hub = A::hub(self); + + let surface_guard = self.surfaces.read(); + let adapter_guard = hub.adapters.read(); + let adapter = adapter_guard + .get(adapter_id) + .map_err(|_| instance::GetSurfaceSupportError::InvalidAdapter)?; + let surface = surface_guard + .get(surface_id) + .map_err(|_| instance::GetSurfaceSupportError::InvalidSurface)?; + + get_supported_callback(&adapter, &surface) + } + + pub fn device_features( + &self, + device_id: DeviceId, + ) -> Result { + let hub = A::hub(self); + let device = hub.devices.get(device_id).map_err(|_| InvalidDevice)?; + + Ok(device.features) + } + + pub fn device_limits( + &self, + device_id: DeviceId, + ) -> Result { + let hub = A::hub(self); + let device = hub.devices.get(device_id).map_err(|_| InvalidDevice)?; + + Ok(device.limits.clone()) + } + + pub fn device_downlevel_properties( + &self, + device_id: DeviceId, + ) -> Result { + let hub = A::hub(self); + let device = hub.devices.get(device_id).map_err(|_| InvalidDevice)?; + + Ok(device.downlevel.clone()) + } + + pub fn device_create_buffer( + &self, + device_id: DeviceId, + desc: &resource::BufferDescriptor, + id_in: Input, + ) -> (id::BufferId, Option) { + profiling::scope!("Device::create_buffer"); + + let hub = A::hub(self); + let fid = hub.buffers.prepare(id_in); + + let error = loop { + let device = match hub.devices.get(device_id) { + Ok(device) => device, + Err(_) => break DeviceError::Invalid.into(), + }; + #[cfg(feature = "trace")] + if let Some(ref trace) = device.trace { + let mut desc = desc.clone(); + let mapped_at_creation = mem::replace(&mut desc.mapped_at_creation, false); + if mapped_at_creation && !desc.usage.contains(wgt::BufferUsages::MAP_WRITE) { + desc.usage |= wgt::BufferUsages::COPY_DST; + } + trace + .lock() + .add(trace::Action::CreateBuffer(fid.id(), desc)); + } + + let mut buffer = match device.create_buffer(device_id, desc, false) { + Ok(buffer) => buffer, + Err(e) => break e, + }; + + let buffer_use = if !desc.mapped_at_creation { + hal::BufferUses::empty() + } else if desc.usage.contains(wgt::BufferUsages::MAP_WRITE) { + // buffer is mappable, so we are just doing that at start + let map_size = buffer.size; + let ptr = if map_size == 0 { + std::ptr::NonNull::dangling() + } else { + match map_buffer( + device.raw.as_ref().unwrap(), + &mut buffer, + 0, + map_size, + HostMap::Write, + ) { + Ok(ptr) => ptr, + Err(e) => { + device.lock_life().schedule_resource_destruction( + queue::TempResource::Buffer(Arc::new(buffer)), + !0, + ); + break e.into(); + } + } + }; + *buffer.map_state.lock() = resource::BufferMapState::Active { + ptr, + range: 0..map_size, + host: HostMap::Write, + }; + hal::BufferUses::MAP_WRITE + } else { + // buffer needs staging area for initialization only + let stage_desc = wgt::BufferDescriptor { + label: Some(Cow::Borrowed( + "(wgpu internal) initializing unmappable buffer", + )), + size: desc.size, + usage: wgt::BufferUsages::MAP_WRITE | wgt::BufferUsages::COPY_SRC, + mapped_at_creation: false, + }; + let stage = match device.create_buffer(device_id, &stage_desc, true) { + Ok(stage) => stage, + Err(e) => { + device.lock_life().schedule_resource_destruction( + queue::TempResource::Buffer(Arc::new(buffer)), + !0, + ); + break e; + } + }; + let mapping = match unsafe { + device + .raw + .as_ref() + .unwrap() + .map_buffer(stage.raw.as_ref().unwrap(), 0..stage.size) + } { + Ok(mapping) => mapping, + Err(e) => { + let mut life_lock = device.lock_life(); + life_lock.schedule_resource_destruction( + queue::TempResource::Buffer(Arc::new(buffer)), + !0, + ); + life_lock.schedule_resource_destruction( + queue::TempResource::Buffer(Arc::new(stage)), + !0, + ); + break DeviceError::from(e).into(); + } + }; + + assert_eq!(buffer.size % wgt::COPY_BUFFER_ALIGNMENT, 0); + // Zero initialize memory and then mark both staging and buffer as initialized + // (it's guaranteed that this is the case by the time the buffer is usable) + unsafe { ptr::write_bytes(mapping.ptr.as_ptr(), 0, buffer.size as usize) }; + buffer.initialization_status.write().drain(0..buffer.size); + stage.initialization_status.write().drain(0..buffer.size); + + *buffer.map_state.lock() = resource::BufferMapState::Init { + ptr: mapping.ptr, + needs_flush: !mapping.is_coherent, + stage_buffer: Arc::new(stage), + }; + hal::BufferUses::COPY_DST + }; + + let (id, resource) = fid.assign(buffer); + log::info!("Created buffer {:?} with {:?}", id, desc); + + device + .trackers + .lock() + .buffers + .insert_single(id, resource, buffer_use); + + return (id.0, None); + }; + + let id = fid.assign_error(desc.label.borrow_or_default()); + (id, Some(error)) + } + + /// Assign `id_in` an error with the given `label`. + /// + /// Ensure that future attempts to use `id_in` as a buffer ID will propagate + /// the error, following the WebGPU ["contagious invalidity"] style. + /// + /// Firefox uses this function to comply strictly with the WebGPU spec, + /// which requires [`GPUBufferDescriptor`] validation to be generated on the + /// Device timeline and leave the newly created [`GPUBuffer`] invalid. + /// + /// Ideally, we would simply let [`device_create_buffer`] take care of all + /// of this, but some errors must be detected before we can even construct a + /// [`wgpu_types::BufferDescriptor`] to give it. For example, the WebGPU API + /// allows a `GPUBufferDescriptor`'s [`usage`] property to be any WebIDL + /// `unsigned long` value, but we can't construct a + /// [`wgpu_types::BufferUsages`] value from values with unassigned bits + /// set. This means we must validate `usage` before we can call + /// `device_create_buffer`. + /// + /// When that validation fails, we must arrange for the buffer id to be + /// considered invalid. This method provides the means to do so. + /// + /// ["contagious invalidity"]: https://www.w3.org/TR/webgpu/#invalidity + /// [`GPUBufferDescriptor`]: https://www.w3.org/TR/webgpu/#dictdef-gpubufferdescriptor + /// [`GPUBuffer`]: https://www.w3.org/TR/webgpu/#gpubuffer + /// [`wgpu_types::BufferDescriptor`]: wgt::BufferDescriptor + /// [`device_create_buffer`]: Global::device_create_buffer + /// [`usage`]: https://www.w3.org/TR/webgpu/#dom-gputexturedescriptor-usage + /// [`wgpu_types::BufferUsages`]: wgt::BufferUsages + pub fn create_buffer_error(&self, id_in: Input, label: Label) { + let hub = A::hub(self); + let fid = hub.buffers.prepare(id_in); + + fid.assign_error(label.borrow_or_default()); + } + + /// Assign `id_in` an error with the given `label`. + /// + /// See `create_buffer_error` for more context and explaination. + pub fn create_texture_error(&self, id_in: Input, label: Label) { + let hub = A::hub(self); + let fid = hub.textures.prepare(id_in); + + fid.assign_error(label.borrow_or_default()); + } + + #[cfg(feature = "replay")] + pub fn device_wait_for_buffer( + &self, + device_id: DeviceId, + buffer_id: id::BufferId, + ) -> Result<(), WaitIdleError> { + let hub = A::hub(self); + let device_guard = hub.devices.read(); + let last_submission = { + let buffer_guard = hub.buffers.write(); + match buffer_guard.get(buffer_id) { + Ok(buffer) => buffer.info.submission_index(), + Err(_) => return Ok(()), + } + }; + + device_guard + .get(device_id) + .map_err(|_| DeviceError::Invalid)? + .wait_for_submit(last_submission) + } + + #[doc(hidden)] + pub fn device_set_buffer_sub_data( + &self, + device_id: DeviceId, + buffer_id: id::BufferId, + offset: BufferAddress, + data: &[u8], + ) -> BufferAccessResult { + profiling::scope!("Device::set_buffer_sub_data"); + + let hub = A::hub(self); + + let device = hub + .devices + .get(device_id) + .map_err(|_| DeviceError::Invalid)?; + let buffer = hub + .buffers + .get(buffer_id) + .map_err(|_| BufferAccessError::Invalid)?; + check_buffer_usage(buffer.usage, wgt::BufferUsages::MAP_WRITE)?; + //assert!(buffer isn't used by the GPU); + + #[cfg(feature = "trace")] + if let Some(ref trace) = device.trace { + let mut trace = trace.lock(); + let data_path = trace.make_binary("bin", data); + trace.add(trace::Action::WriteBuffer { + id: buffer_id, + data: data_path, + range: offset..offset + data.len() as BufferAddress, + queued: false, + }); + } + + let raw_buf = buffer.raw.as_ref().unwrap(); + unsafe { + let mapping = device + .raw + .as_ref() + .unwrap() + .map_buffer(raw_buf, offset..offset + data.len() as u64) + .map_err(DeviceError::from)?; + ptr::copy_nonoverlapping(data.as_ptr(), mapping.ptr.as_ptr(), data.len()); + if !mapping.is_coherent { + device + .raw + .as_ref() + .unwrap() + .flush_mapped_ranges(raw_buf, iter::once(offset..offset + data.len() as u64)); + } + device + .raw + .as_ref() + .unwrap() + .unmap_buffer(raw_buf) + .map_err(DeviceError::from)?; + } + + Ok(()) + } + + #[doc(hidden)] + pub fn device_get_buffer_sub_data( + &self, + device_id: DeviceId, + buffer_id: id::BufferId, + offset: BufferAddress, + data: &mut [u8], + ) -> BufferAccessResult { + profiling::scope!("Device::get_buffer_sub_data"); + + let hub = A::hub(self); + + let device = hub + .devices + .get(device_id) + .map_err(|_| DeviceError::Invalid)?; + let buffer = hub + .buffers + .get(buffer_id) + .map_err(|_| BufferAccessError::Invalid)?; + check_buffer_usage(buffer.usage, wgt::BufferUsages::MAP_READ)?; + //assert!(buffer isn't used by the GPU); + + let raw_buf = buffer.raw.as_ref().unwrap(); + unsafe { + let mapping = device + .raw + .as_ref() + .unwrap() + .map_buffer(raw_buf, offset..offset + data.len() as u64) + .map_err(DeviceError::from)?; + if !mapping.is_coherent { + device.raw.as_ref().unwrap().invalidate_mapped_ranges( + raw_buf, + iter::once(offset..offset + data.len() as u64), + ); + } + ptr::copy_nonoverlapping(mapping.ptr.as_ptr(), data.as_mut_ptr(), data.len()); + device + .raw + .as_ref() + .unwrap() + .unmap_buffer(raw_buf) + .map_err(DeviceError::from)?; + } + + Ok(()) + } + + pub fn buffer_label(&self, id: id::BufferId) -> String { + A::hub(self).buffers.label_for_resource(id) + } + + pub fn buffer_destroy( + &self, + buffer_id: id::BufferId, + ) -> Result<(), resource::DestroyError> { + profiling::scope!("Buffer::destroy"); + + let map_closure; + // Restrict the locks to this scope. + { + let hub = A::hub(self); + + //TODO: lock pending writes separately, keep the device read-only + + log::info!("Buffer {:?} is destroyed", buffer_id); + let buffer = hub + .buffers + .get(buffer_id) + .map_err(|_| resource::DestroyError::Invalid)?; + + let device = &buffer.device; + + map_closure = match &*buffer.map_state.lock() { + &BufferMapState::Waiting(..) // To get the proper callback behavior. + | &BufferMapState::Init { .. } + | &BufferMapState::Active { .. } + => { + self.buffer_unmap_inner(buffer_id, &buffer, &device) + .unwrap_or(None) + } + _ => None, + }; + + #[cfg(feature = "trace")] + if let Some(ref trace) = device.trace { + trace.lock().add(trace::Action::FreeBuffer(buffer_id)); + } + if buffer.raw.is_none() { + return Err(resource::DestroyError::AlreadyDestroyed); + } + + let temp = queue::TempResource::Buffer(buffer.clone()); + let mut pending_writes = device.pending_writes.lock(); + let pending_writes = pending_writes.as_mut().unwrap(); + if pending_writes.dst_buffers.contains_key(&buffer_id) { + pending_writes.temp_resources.push(temp); + } else { + let last_submit_index = buffer.info.submission_index(); + device + .lock_life() + .schedule_resource_destruction(temp, last_submit_index); + } + } + + // Note: outside the scope where locks are held when calling the callback + if let Some((operation, status)) = map_closure { + operation.callback.call(status); + } + + Ok(()) + } + + pub fn buffer_drop(&self, buffer_id: id::BufferId, wait: bool) { + profiling::scope!("Buffer::drop"); + log::debug!("buffer {:?} is dropped", buffer_id); + + let hub = A::hub(self); + let mut buffer_guard = hub.buffers.write(); + + let (last_submit_index, buffer) = { + match buffer_guard.get(buffer_id) { + Ok(buffer) => { + let last_submit_index = buffer.info.submission_index(); + (last_submit_index, buffer) + } + Err(_) => { + hub.buffers.unregister_locked(buffer_id, &mut *buffer_guard); + return; + } + } + }; + + let device = &buffer.device; + { + let mut life_lock = device.lock_life(); + if device + .pending_writes + .lock() + .as_ref() + .unwrap() + .dst_buffers + .contains_key(&buffer_id) + { + life_lock.future_suspected_buffers.push(buffer.clone()); + } else { + life_lock.suspected_resources.buffers.push(buffer.clone()); + } + } + + if wait { + match device.wait_for_submit(last_submit_index) { + Ok(()) => (), + Err(e) => log::error!("Failed to wait for buffer {:?}: {:?}", buffer_id, e), + } + } + } + + pub fn device_create_texture( + &self, + device_id: DeviceId, + desc: &resource::TextureDescriptor, + id_in: Input, + idtv_in: Input, + ) -> (id::TextureId, Option) { + profiling::scope!("Device::create_texture"); + + let hub = A::hub(self); + + let fid = hub.textures.prepare(id_in); + + let error = loop { + let device = match hub.devices.get(device_id) { + Ok(device) => device, + Err(_) => break DeviceError::Invalid.into(), + }; + #[cfg(feature = "trace")] + if let Some(ref trace) = device.trace { + trace + .lock() + .add(trace::Action::CreateTexture(fid.id(), desc.clone())); + } + + let adapter = hub.adapters.get(device.adapter_id.0).unwrap(); + let texture = match device.create_texture(device_id, &adapter, desc) { + Ok(texture) => texture, + Err(error) => break error, + }; + let (id, resource) = fid.assign(texture); + log::info!("Created texture {:?} with {:?}", id, desc); + + let dimension = match desc.dimension { + wgt::TextureDimension::D1 => wgt::TextureViewDimension::D1, + wgt::TextureDimension::D2 => wgt::TextureViewDimension::D2, + wgt::TextureDimension::D3 => unreachable!(), + }; + + for mip_level in 0..desc.mip_level_count { + for array_layer in 0..desc.size.depth_or_array_layers { + let fid = hub.texture_views.prepare(idtv_in.clone()); + + let descriptor = resource::TextureViewDescriptor { + label: Some(Cow::Borrowed("(wgpu internal) clear texture view")), + format: Some(desc.format), + dimension: Some(dimension), + range: wgt::ImageSubresourceRange { + aspect: wgt::TextureAspect::All, + base_mip_level: mip_level, + mip_level_count: Some(1), + base_array_layer: array_layer, + array_layer_count: Some(1), + }, + }; + + let texture_view = device + .create_texture_view(&resource, id.0, &descriptor) + .unwrap(); + let (tv_id, texture_view) = fid.assign(texture_view); + log::info!("Created texture view {:?} for texture {:?}", tv_id, id); + + let mut texture_clear_mode = resource.clear_mode.write(); + match &mut *texture_clear_mode { + resource::TextureClearMode::RenderPass { + clear_views, + is_color: _, + } => { + clear_views.push(texture_view.clone()); + } + _ => {} + } + + device + .trackers + .lock() + .views + .insert_single(tv_id, texture_view); + } + } + + device.trackers.lock().textures.insert_single( + id.0, + resource, + hal::TextureUses::UNINITIALIZED, + ); + + return (id.0, None); + }; + + let id = fid.assign_error(desc.label.borrow_or_default()); + (id, Some(error)) + } + + /// # Safety + /// + /// - `hal_texture` must be created from `device_id` corresponding raw handle. + /// - `hal_texture` must be created respecting `desc` + /// - `hal_texture` must be initialized + pub unsafe fn create_texture_from_hal( + &self, + hal_texture: A::Texture, + device_id: DeviceId, + desc: &resource::TextureDescriptor, + id_in: Input, + ) -> (id::TextureId, Option) { + profiling::scope!("Device::create_texture"); + + let hub = A::hub(self); + + let fid = hub.textures.prepare(id_in); + + let error = loop { + let device = match hub.devices.get(device_id) { + Ok(device) => device, + Err(_) => break DeviceError::Invalid.into(), + }; + + // NB: Any change done through the raw texture handle will not be + // recorded in the replay + #[cfg(feature = "trace")] + if let Some(ref trace) = device.trace { + trace + .lock() + .add(trace::Action::CreateTexture(fid.id(), desc.clone())); + } + + let adapter = hub.adapters.get(device.adapter_id.0).unwrap(); + + let format_features = match device + .describe_format_features(&adapter, desc.format) + .map_err(|error| resource::CreateTextureError::MissingFeatures(desc.format, error)) + { + Ok(features) => features, + Err(error) => break error, + }; + + let mut texture = device.create_texture_from_hal( + hal_texture, + conv::map_texture_usage(desc.usage, desc.format.into()), + device_id, + desc, + format_features, + resource::TextureClearMode::None, + ); + if desc.usage.contains(wgt::TextureUsages::COPY_DST) { + texture.hal_usage |= hal::TextureUses::COPY_DST; + } + + texture.initialization_status = + RwLock::new(TextureInitTracker::new(desc.mip_level_count, 0)); + + let (id, resource) = fid.assign(texture); + log::info!("Created texture {:?} with {:?}", id, desc); + + device.trackers.lock().textures.insert_single( + id.0, + resource, + hal::TextureUses::UNINITIALIZED, + ); + + return (id.0, None); + }; + + let id = fid.assign_error(desc.label.borrow_or_default()); + (id, Some(error)) + } + + pub fn texture_label(&self, id: id::TextureId) -> String { + A::hub(self).textures.label_for_resource(id) + } + + pub fn texture_destroy( + &self, + texture_id: id::TextureId, + ) -> Result<(), resource::DestroyError> { + profiling::scope!("Texture::destroy"); + + let hub = A::hub(self); + + log::info!("Buffer {:?} is destroyed", texture_id); + let texture = hub + .textures + .get(texture_id) + .map_err(|_| resource::DestroyError::Invalid)?; + + let device = &texture.device; + + #[cfg(feature = "trace")] + if let Some(ref trace) = device.trace { + trace.lock().add(trace::Action::FreeTexture(texture_id)); + } + + let last_submit_index = texture.info.submission_index(); + + let mut clear_views = match std::mem::replace( + &mut *texture.clear_mode.write(), + resource::TextureClearMode::None, + ) { + resource::TextureClearMode::BufferCopy => SmallVec::new(), + resource::TextureClearMode::RenderPass { clear_views, .. } => clear_views, + resource::TextureClearMode::None => SmallVec::new(), + }; + + match texture.inner.as_ref().unwrap() { + resource::TextureInner::Native { ref raw } => { + if raw.is_none() { + return Err(resource::DestroyError::AlreadyDestroyed); + } + let temp = queue::TempResource::Texture(texture.clone(), clear_views); + let mut pending_writes = device.pending_writes.lock(); + let pending_writes = pending_writes.as_mut().unwrap(); + if pending_writes.dst_textures.contains_key(&texture_id) { + pending_writes.temp_resources.push(temp); + } else { + device + .lock_life() + .schedule_resource_destruction(temp, last_submit_index); + } + } + resource::TextureInner::Surface { .. } => { + clear_views.clear(); + } + } + + Ok(()) + } + + pub fn texture_drop(&self, texture_id: id::TextureId, wait: bool) { + profiling::scope!("Texture::drop"); + log::debug!("texture {:?} is dropped", texture_id); + + let hub = A::hub(self); + let mut texture_guard = hub.textures.write(); + + let (last_submit_index, texture) = { + match texture_guard.get(texture_id) { + Ok(texture) => { + let last_submit_index = texture.info.submission_index(); + (last_submit_index, texture) + } + Err(_) => { + hub.textures + .unregister_locked(texture_id, &mut *texture_guard); + return; + } + } + }; + + let device = &texture.device; + { + let mut life_lock = device.lock_life(); + if device + .pending_writes + .lock() + .as_ref() + .unwrap() + .dst_textures + .contains_key(&texture_id) + { + life_lock.future_suspected_textures.push(texture.clone()); + } else { + life_lock.suspected_resources.textures.push(texture.clone()); + } + } + + if wait { + match device.wait_for_submit(last_submit_index) { + Ok(()) => (), + Err(e) => log::error!("Failed to wait for texture {:?}: {:?}", texture_id, e), + } + } + } + + pub fn texture_create_view( + &self, + texture_id: id::TextureId, + desc: &resource::TextureViewDescriptor, + id_in: Input, + ) -> (id::TextureViewId, Option) { + profiling::scope!("Texture::create_view"); + + let hub = A::hub(self); + + let fid = hub.texture_views.prepare(id_in); + + let error = loop { + let texture = match hub.textures.get(texture_id) { + Ok(texture) => texture, + Err(_) => break resource::CreateTextureViewError::InvalidTexture, + }; + let device = &texture.device; + #[cfg(feature = "trace")] + if let Some(ref trace) = device.trace { + trace.lock().add(trace::Action::CreateTextureView { + id: fid.id(), + parent_id: texture_id, + desc: desc.clone(), + }); + } + + let view = match device.create_texture_view(&texture, texture_id, desc) { + Ok(view) => view, + Err(e) => break e, + }; + let (id, resource) = fid.assign(view); + + device.trackers.lock().views.insert_single(id, resource); + return (id.0, None); + }; + + let id = fid.assign_error(desc.label.borrow_or_default()); + (id, Some(error)) + } + + pub fn texture_view_label(&self, id: id::TextureViewId) -> String { + A::hub(self).texture_views.label_for_resource(id) + } + + pub fn texture_view_drop( + &self, + texture_view_id: id::TextureViewId, + wait: bool, + ) -> Result<(), resource::TextureViewDestroyError> { + profiling::scope!("TextureView::drop"); + log::debug!("texture view {:?} is dropped", texture_view_id); + + let hub = A::hub(self); + let mut texture_view_guard = hub.texture_views.write(); + + let (last_submit_index, view) = { + match texture_view_guard.get(texture_view_id) { + Ok(view) => { + let last_submit_index = view.info.submission_index(); + (last_submit_index, view) + } + Err(_) => { + hub.texture_views + .unregister_locked(texture_view_id, &mut *texture_view_guard); + return Ok(()); + } + } + }; + + view.device + .lock_life() + .suspected_resources + .texture_views + .push(view.clone()); + + if wait { + match view.device.wait_for_submit(last_submit_index) { + Ok(()) => (), + Err(e) => log::error!( + "Failed to wait for texture view {:?}: {:?}", + texture_view_id, + e + ), + } + } + Ok(()) + } + + pub fn device_create_sampler( + &self, + device_id: DeviceId, + desc: &resource::SamplerDescriptor, + id_in: Input, + ) -> (id::SamplerId, Option) { + profiling::scope!("Device::create_sampler"); + + let hub = A::hub(self); + let fid = hub.samplers.prepare(id_in); + + let error = loop { + let device = match hub.devices.get(device_id) { + Ok(device) => device, + Err(_) => break DeviceError::Invalid.into(), + }; + #[cfg(feature = "trace")] + if let Some(ref trace) = device.trace { + trace + .lock() + .add(trace::Action::CreateSampler(fid.id(), desc.clone())); + } + + let sampler = match device.create_sampler(desc) { + Ok(sampler) => sampler, + Err(e) => break e, + }; + + let (id, resource) = fid.assign(sampler); + + device.trackers.lock().samplers.insert_single(id, resource); + + return (id.0, None); + }; + + let id = fid.assign_error(desc.label.borrow_or_default()); + (id, Some(error)) + } + + pub fn sampler_label(&self, id: id::SamplerId) -> String { + A::hub(self).samplers.label_for_resource(id) + } + + pub fn sampler_drop(&self, sampler_id: id::SamplerId) { + profiling::scope!("Sampler::drop"); + log::debug!("sampler {:?} is dropped", sampler_id); + + let hub = A::hub(self); + let mut sampler_guard = hub.samplers.write(); + + let sampler = { + match sampler_guard.get(sampler_id) { + Ok(sampler) => sampler, + Err(_) => { + hub.samplers + .unregister_locked(sampler_id, &mut *sampler_guard); + return; + } + } + }; + + sampler + .device + .lock_life() + .suspected_resources + .samplers + .push(sampler.clone()); + } + + pub fn device_create_bind_group_layout( + &self, + device_id: DeviceId, + desc: &binding_model::BindGroupLayoutDescriptor, + id_in: Input, + ) -> ( + id::BindGroupLayoutId, + Option, + ) { + profiling::scope!("Device::create_bind_group_layout"); + + let hub = A::hub(self); + let fid = hub.bind_group_layouts.prepare(id_in); + + let error = 'outer: loop { + let device = match hub.devices.get(device_id) { + Ok(device) => device, + Err(_) => break DeviceError::Invalid.into(), + }; + #[cfg(feature = "trace")] + if let Some(ref trace) = device.trace { + trace + .lock() + .add(trace::Action::CreateBindGroupLayout(fid.id(), desc.clone())); + } + + let mut entry_map = FastHashMap::default(); + for entry in desc.entries.iter() { + if entry.binding > device.limits.max_bindings_per_bind_group { + break 'outer binding_model::CreateBindGroupLayoutError::InvalidBindingIndex { + binding: entry.binding, + maximum: device.limits.max_bindings_per_bind_group, + }; + } + if entry_map.insert(entry.binding, *entry).is_some() { + break 'outer binding_model::CreateBindGroupLayoutError::ConflictBinding( + entry.binding, + ); + } + } + + // If there is an equivalent BGL, just bump the refcount and return it. + // This is only applicable for identity filters that are generating new IDs, + // so their inputs are `PhantomData` of size 0. + if mem::size_of::>() == 0 { + let bgl_guard = hub.bind_group_layouts.read(); + if let Some(id) = + Device::deduplicate_bind_group_layout(device_id, &entry_map, &*bgl_guard) + { + return (id, None); + } + } + + let layout = + match device.create_bind_group_layout(desc.label.borrow_option(), entry_map) { + Ok(layout) => layout, + Err(e) => break e, + }; + + let (id, _) = fid.assign(layout); + return (id.0, None); + }; + + let id = fid.assign_error(desc.label.borrow_or_default()); + (id, Some(error)) + } + + pub fn bind_group_layout_label(&self, id: id::BindGroupLayoutId) -> String { + A::hub(self).bind_group_layouts.label_for_resource(id) + } + + pub fn bind_group_layout_drop(&self, bind_group_layout_id: id::BindGroupLayoutId) { + profiling::scope!("BindGroupLayout::drop"); + log::debug!("bind group layout {:?} is dropped", bind_group_layout_id); + + let hub = A::hub(self); + let mut bind_group_layout_guard = hub.bind_group_layouts.write(); + + let layout = { + match bind_group_layout_guard.get(bind_group_layout_id) { + Ok(layout) => layout, + Err(_) => { + hub.bind_group_layouts + .unregister_locked(bind_group_layout_id, &mut *bind_group_layout_guard); + return; + } + } + }; + + layout + .device + .lock_life() + .suspected_resources + .bind_group_layouts + .push(layout.clone()); + } + + pub fn device_create_pipeline_layout( + &self, + device_id: DeviceId, + desc: &binding_model::PipelineLayoutDescriptor, + id_in: Input, + ) -> ( + id::PipelineLayoutId, + Option, + ) { + profiling::scope!("Device::create_pipeline_layout"); + + let hub = A::hub(self); + let fid = hub.pipeline_layouts.prepare(id_in); + + let error = loop { + let device = match hub.devices.get(device_id) { + Ok(device) => device, + Err(_) => break DeviceError::Invalid.into(), + }; + #[cfg(feature = "trace")] + if let Some(ref trace) = device.trace { + trace + .lock() + .add(trace::Action::CreatePipelineLayout(fid.id(), desc.clone())); + } + + let layout = { + let bgl_guard = hub.bind_group_layouts.read(); + match device.create_pipeline_layout(desc, &*bgl_guard) { + Ok(layout) => layout, + Err(e) => break e, + } + }; + + let (id, _) = fid.assign(layout); + return (id.0, None); + }; + + let id = fid.assign_error(desc.label.borrow_or_default()); + (id, Some(error)) + } + + pub fn pipeline_layout_label(&self, id: id::PipelineLayoutId) -> String { + A::hub(self).pipeline_layouts.label_for_resource(id) + } + + pub fn pipeline_layout_drop(&self, pipeline_layout_id: id::PipelineLayoutId) { + profiling::scope!("PipelineLayout::drop"); + log::debug!("pipeline layout {:?} is dropped", pipeline_layout_id); + + let hub = A::hub(self); + let mut pipeline_layout_guard = hub.pipeline_layouts.write(); + let layout = { + match pipeline_layout_guard.get(pipeline_layout_id) { + Ok(layout) => layout, + Err(_) => { + hub.pipeline_layouts + .unregister_locked(pipeline_layout_id, &mut *pipeline_layout_guard); + return; + } + } + }; + + layout + .device + .lock_life() + .suspected_resources + .pipeline_layouts + .push(layout.clone()); + } + + pub fn device_create_bind_group( + &self, + device_id: DeviceId, + desc: &binding_model::BindGroupDescriptor, + id_in: Input, + ) -> (id::BindGroupId, Option) { + profiling::scope!("Device::create_bind_group"); + + let hub = A::hub(self); + let fid = hub.bind_groups.prepare(id_in); + + let error = loop { + let device = match hub.devices.get(device_id) { + Ok(device) => device, + Err(_) => break DeviceError::Invalid.into(), + }; + #[cfg(feature = "trace")] + if let Some(ref trace) = device.trace { + trace + .lock() + .add(trace::Action::CreateBindGroup(fid.id(), desc.clone())); + } + + let bind_group_layout = match hub.bind_group_layouts.get(desc.layout) { + Ok(layout) => layout, + Err(_) => break binding_model::CreateBindGroupError::InvalidLayout, + }; + let bind_group = match device.create_bind_group(&bind_group_layout, desc, hub) { + Ok(bind_group) => bind_group, + Err(e) => break e, + }; + let (id, resource) = fid.assign(bind_group); + log::debug!("Bind group {:?}", id,); + + device + .trackers + .lock() + .bind_groups + .insert_single(id, resource); + return (id.0, None); + }; + + let id = fid.assign_error(desc.label.borrow_or_default()); + (id, Some(error)) + } + + pub fn bind_group_label(&self, id: id::BindGroupId) -> String { + A::hub(self).bind_groups.label_for_resource(id) + } + + pub fn bind_group_drop(&self, bind_group_id: id::BindGroupId) { + profiling::scope!("BindGroup::drop"); + log::debug!("bind group {:?} is dropped", bind_group_id); + + let hub = A::hub(self); + let mut bind_group_guard = hub.bind_groups.write(); + + let bind_group = { + match bind_group_guard.get(bind_group_id) { + Ok(bind_group) => bind_group, + Err(_) => { + hub.bind_groups + .unregister_locked(bind_group_id, &mut *bind_group_guard); + return; + } + } + }; + + bind_group + .device + .lock_life() + .suspected_resources + .bind_groups + .push(bind_group.clone()); + } + + pub fn device_create_shader_module( + &self, + device_id: DeviceId, + desc: &pipeline::ShaderModuleDescriptor, + source: pipeline::ShaderModuleSource, + id_in: Input, + ) -> ( + id::ShaderModuleId, + Option, + ) { + profiling::scope!("Device::create_shader_module"); + + let hub = A::hub(self); + let fid = hub.shader_modules.prepare(id_in); + + let error = loop { + let device = match hub.devices.get(device_id) { + Ok(device) => device, + Err(_) => break DeviceError::Invalid.into(), + }; + #[cfg(feature = "trace")] + if let Some(ref trace) = device.trace { + let mut trace = trace.lock(); + let data = match source { + #[cfg(feature = "wgsl")] + pipeline::ShaderModuleSource::Wgsl(ref code) => { + trace.make_binary("wgsl", code.as_bytes()) + } + pipeline::ShaderModuleSource::Naga(ref module) => { + let string = + ron::ser::to_string_pretty(module, ron::ser::PrettyConfig::default()) + .unwrap(); + trace.make_binary("ron", string.as_bytes()) + } + pipeline::ShaderModuleSource::Dummy(_) => { + panic!("found `ShaderModuleSource::Dummy`") + } + }; + trace.add(trace::Action::CreateShaderModule { + id: fid.id(), + desc: desc.clone(), + data, + }); + }; + + let shader = match device.create_shader_module(desc, source) { + Ok(shader) => shader, + Err(e) => break e, + }; + let (id, _) = fid.assign(shader); + return (id.0, None); + }; + + let id = fid.assign_error(desc.label.borrow_or_default()); + (id, Some(error)) + } + + // Unsafe-ness of internal calls has little to do with unsafe-ness of this. + #[allow(unused_unsafe)] + /// # Safety + /// + /// This function passes SPIR-V binary to the backend as-is and can potentially result in a + /// driver crash. + pub unsafe fn device_create_shader_module_spirv( + &self, + device_id: DeviceId, + desc: &pipeline::ShaderModuleDescriptor, + source: Cow<[u32]>, + id_in: Input, + ) -> ( + id::ShaderModuleId, + Option, + ) { + profiling::scope!("Device::create_shader_module"); + + let hub = A::hub(self); + let fid = hub.shader_modules.prepare(id_in); + + let error = loop { + let device = match hub.devices.get(device_id) { + Ok(device) => device, + Err(_) => break DeviceError::Invalid.into(), + }; + #[cfg(feature = "trace")] + if let Some(ref trace) = device.trace { + let mut trace = trace.lock(); + let data = trace.make_binary("spv", unsafe { + std::slice::from_raw_parts(source.as_ptr() as *const u8, source.len() * 4) + }); + trace.add(trace::Action::CreateShaderModule { + id: fid.id(), + desc: desc.clone(), + data, + }); + }; + + let shader = match unsafe { device.create_shader_module_spirv(desc, &source) } { + Ok(shader) => shader, + Err(e) => break e, + }; + let (id, _) = fid.assign(shader); + return (id.0, None); + }; + + let id = fid.assign_error(desc.label.borrow_or_default()); + (id, Some(error)) + } + + pub fn shader_module_label(&self, id: id::ShaderModuleId) -> String { + A::hub(self).shader_modules.label_for_resource(id) + } + + pub fn shader_module_drop(&self, shader_module_id: id::ShaderModuleId) { + profiling::scope!("ShaderModule::drop"); + log::debug!("shader module {:?} is dropped", shader_module_id); + + let hub = A::hub(self); + hub.shader_modules.unregister(shader_module_id); + } + + pub fn device_create_command_encoder( + &self, + device_id: DeviceId, + desc: &wgt::CommandEncoderDescriptor(command_buffer_id) + } + + pub fn device_create_render_bundle_encoder( + &self, + device_id: DeviceId, + desc: &command::RenderBundleEncoderDescriptor, + ) -> ( + id::RenderBundleEncoderId, + Option, + ) { + profiling::scope!("Device::create_render_bundle_encoder"); + let (encoder, error) = match command::RenderBundleEncoder::new(desc, device_id, None) { + Ok(encoder) => (encoder, None), + Err(e) => (command::RenderBundleEncoder::dummy(device_id), Some(e)), + }; + (Box::into_raw(Box::new(encoder)), error) + } + + pub fn render_bundle_encoder_finish( + &self, + bundle_encoder: command::RenderBundleEncoder, + desc: &command::RenderBundleDescriptor, + id_in: Input, + ) -> (id::RenderBundleId, Option) { + profiling::scope!("RenderBundleEncoder::finish"); + + let hub = A::hub(self); + + let fid = hub.render_bundles.prepare(id_in); + + let error = loop { + let device = match hub.devices.get(bundle_encoder.parent()) { + Ok(device) => device, + Err(_) => break command::RenderBundleError::INVALID_DEVICE, + }; + #[cfg(feature = "trace")] + if let Some(ref trace) = device.trace { + trace.lock().add(trace::Action::CreateRenderBundle { + id: fid.id(), + desc: trace::new_render_bundle_encoder_descriptor( + desc.label.clone(), + &bundle_encoder.context, + bundle_encoder.is_depth_read_only, + bundle_encoder.is_stencil_read_only, + ), + base: bundle_encoder.to_base_pass(), + }); + } + + let render_bundle = match bundle_encoder.finish(desc, &device, hub) { + Ok(bundle) => bundle, + Err(e) => break e, + }; + + log::debug!("Render bundle"); + let (id, resource) = fid.assign(render_bundle); + + device.trackers.lock().bundles.insert_single(id, resource); + return (id.0, None); + }; + + let id = fid.assign_error(desc.label.borrow_or_default()); + (id, Some(error)) + } + + pub fn render_bundle_label(&self, id: id::RenderBundleId) -> String { + A::hub(self).render_bundles.label_for_resource(id) + } + + pub fn render_bundle_drop(&self, render_bundle_id: id::RenderBundleId) { + profiling::scope!("RenderBundle::drop"); + log::debug!("render bundle {:?} is dropped", render_bundle_id); + let hub = A::hub(self); + let mut bundle_guard = hub.render_bundles.write(); + + let bundle = { + match bundle_guard.get(render_bundle_id) { + Ok(bundle) => bundle, + Err(_) => { + hub.render_bundles + .unregister_locked(render_bundle_id, &mut *bundle_guard); + return; + } + } + }; + + hub.devices + .get(bundle.device_id.0) + .unwrap() + .lock_life() + .suspected_resources + .render_bundles + .push(bundle.clone()); + } + + pub fn device_create_query_set( + &self, + device_id: DeviceId, + desc: &resource::QuerySetDescriptor, + id_in: Input, + ) -> (id::QuerySetId, Option) { + profiling::scope!("Device::create_query_set"); + + let hub = A::hub(self); + let fid = hub.query_sets.prepare(id_in); + + let error = loop { + let device = match hub.devices.get(device_id) { + Ok(device) => device, + Err(_) => break DeviceError::Invalid.into(), + }; + #[cfg(feature = "trace")] + if let Some(ref trace) = device.trace { + trace.lock().add(trace::Action::CreateQuerySet { + id: fid.id(), + desc: desc.clone(), + }); + } + + let query_set = match device.create_query_set(desc) { + Ok(query_set) => query_set, + Err(err) => break err, + }; + + let (id, resource) = fid.assign(query_set); + + device + .trackers + .lock() + .query_sets + .insert_single(id, resource); + + return (id.0, None); + }; + + let id = fid.assign_error(""); + (id, Some(error)) + } + + pub fn query_set_drop(&self, query_set_id: id::QuerySetId) { + profiling::scope!("QuerySet::drop"); + log::debug!("query set {:?} is dropped", query_set_id); + + let hub = A::hub(self); + let query_set_guard = hub.query_sets.read(); + + let query_set = { + let query_set = query_set_guard.get(query_set_id).unwrap(); + query_set + }; + + let device = &query_set.device; + + #[cfg(feature = "trace")] + if let Some(ref trace) = device.trace { + trace + .lock() + .add(trace::Action::DestroyQuerySet(query_set_id)); + } + + device + .lock_life() + .suspected_resources + .query_sets + .push(query_set.clone()); + } + + pub fn query_set_label(&self, id: id::QuerySetId) -> String { + A::hub(self).query_sets.label_for_resource(id) + } + + pub fn device_create_render_pipeline( + &self, + device_id: DeviceId, + desc: &pipeline::RenderPipelineDescriptor, + id_in: Input, + implicit_pipeline_ids: Option>, + ) -> ( + id::RenderPipelineId, + Option, + ) { + profiling::scope!("Device::create_render_pipeline"); + + let hub = A::hub(self); + + let fid = hub.render_pipelines.prepare(id_in); + let implicit_context = implicit_pipeline_ids.map(|ipi| ipi.prepare(hub)); + + let error = loop { + let device = match hub.devices.get(device_id) { + Ok(device) => device, + Err(_) => break DeviceError::Invalid.into(), + }; + let adapter = hub.adapters.get(device.adapter_id.0).unwrap(); + #[cfg(feature = "trace")] + if let Some(ref trace) = device.trace { + trace.lock().add(trace::Action::CreateRenderPipeline { + id: fid.id(), + desc: desc.clone(), + implicit_context: implicit_context.clone(), + }); + } + + let pipeline = + match device.create_render_pipeline(&adapter, desc, implicit_context, hub) { + Ok(pair) => pair, + Err(e) => break e, + }; + + let (id, resource) = fid.assign(pipeline); + log::info!("Created render pipeline {:?} with {:?}", id, desc); + + device + .trackers + .lock() + .render_pipelines + .insert_single(id, resource); + + return (id.0, None); + }; + + let id = fid.assign_error(desc.label.borrow_or_default()); + (id, Some(error)) + } + + /// Get an ID of one of the bind group layouts. The ID adds a refcount, + /// which needs to be released by calling `bind_group_layout_drop`. + pub fn render_pipeline_get_bind_group_layout( + &self, + pipeline_id: id::RenderPipelineId, + index: u32, + id_in: Input, + ) -> ( + id::BindGroupLayoutId, + Option, + ) { + let hub = A::hub(self); + let pipeline_layout_guard = hub.pipeline_layouts.read(); + + let error = loop { + let pipeline = match hub.render_pipelines.get(pipeline_id) { + Ok(pipeline) => pipeline, + Err(_) => break binding_model::GetBindGroupLayoutError::InvalidPipeline, + }; + let id = match pipeline_layout_guard[pipeline.layout_id] + .bind_group_layout_ids + .get(index as usize) + { + Some(id) => id, + None => break binding_model::GetBindGroupLayoutError::InvalidGroupIndex(index), + }; + + return (id.0, None); + }; + + let id = hub + .bind_group_layouts + .prepare(id_in) + .assign_error(""); + (id, Some(error)) + } + + pub fn render_pipeline_label(&self, id: id::RenderPipelineId) -> String { + A::hub(self).render_pipelines.label_for_resource(id) + } + + pub fn render_pipeline_drop(&self, render_pipeline_id: id::RenderPipelineId) { + profiling::scope!("RenderPipeline::drop"); + log::debug!("render pipeline {:?} is dropped", render_pipeline_id); + let hub = A::hub(self); + let mut pipeline_guard = hub.render_pipelines.write(); + + let (pipeline, layout_id) = { + match pipeline_guard.get(render_pipeline_id) { + Ok(pipeline) => (pipeline, pipeline.layout_id), + Err(_) => { + hub.render_pipelines + .unregister_locked(render_pipeline_id, &mut *pipeline_guard); + return; + } + } + }; + let device = &pipeline.device; + let mut life_lock = device.lock_life(); + life_lock + .suspected_resources + .render_pipelines + .push(pipeline.clone()); + let layout = hub.pipeline_layouts.get(layout_id.0).unwrap(); + life_lock.suspected_resources.pipeline_layouts.push(layout); + } + + pub fn device_create_compute_pipeline( + &self, + device_id: DeviceId, + desc: &pipeline::ComputePipelineDescriptor, + id_in: Input, + implicit_pipeline_ids: Option>, + ) -> ( + id::ComputePipelineId, + Option, + ) { + profiling::scope!("Device::create_compute_pipeline"); + + let hub = A::hub(self); + + let fid = hub.compute_pipelines.prepare(id_in); + let implicit_context = implicit_pipeline_ids.map(|ipi| ipi.prepare(hub)); + + let error = loop { + let device = match hub.devices.get(device_id) { + Ok(device) => device, + Err(_) => break DeviceError::Invalid.into(), + }; + #[cfg(feature = "trace")] + if let Some(ref trace) = device.trace { + trace.lock().add(trace::Action::CreateComputePipeline { + id: fid.id(), + desc: desc.clone(), + implicit_context: implicit_context.clone(), + }); + } + + let pipeline = match device.create_compute_pipeline(desc, implicit_context, hub) { + Ok(pair) => pair, + Err(e) => break e, + }; + + let (id, resource) = fid.assign(pipeline); + log::info!("Created compute pipeline {:?} with {:?}", id, desc); + + device + .trackers + .lock() + .compute_pipelines + .insert_single(id, resource); + return (id.0, None); + }; + + let id = fid.assign_error(desc.label.borrow_or_default()); + (id, Some(error)) + } + + /// Get an ID of one of the bind group layouts. The ID adds a refcount, + /// which needs to be released by calling `bind_group_layout_drop`. + pub fn compute_pipeline_get_bind_group_layout( + &self, + pipeline_id: id::ComputePipelineId, + index: u32, + id_in: Input, + ) -> ( + id::BindGroupLayoutId, + Option, + ) { + let hub = A::hub(self); + let pipeline_layout_guard = hub.pipeline_layouts.read(); + + let error = loop { + let pipeline_guard = hub.compute_pipelines.read(); + + let pipeline = match pipeline_guard.get(pipeline_id) { + Ok(pipeline) => pipeline, + Err(_) => break binding_model::GetBindGroupLayoutError::InvalidPipeline, + }; + let id = match pipeline_layout_guard[pipeline.layout_id] + .bind_group_layout_ids + .get(index as usize) + { + Some(id) => id, + None => break binding_model::GetBindGroupLayoutError::InvalidGroupIndex(index), + }; + + return (id.0, None); + }; + + let id = hub + .bind_group_layouts + .prepare(id_in) + .assign_error(""); + (id, Some(error)) + } + + pub fn compute_pipeline_label(&self, id: id::ComputePipelineId) -> String { + A::hub(self).compute_pipelines.label_for_resource(id) + } + + pub fn compute_pipeline_drop(&self, compute_pipeline_id: id::ComputePipelineId) { + profiling::scope!("ComputePipeline::drop"); + log::debug!("compute pipeline {:?} is dropped", compute_pipeline_id); + let hub = A::hub(self); + let mut pipeline_guard = hub.compute_pipelines.write(); + + let (pipeline, layout_id) = { + match pipeline_guard.get(compute_pipeline_id) { + Ok(pipeline) => (pipeline, pipeline.layout_id), + Err(_) => { + hub.compute_pipelines + .unregister_locked(compute_pipeline_id, &mut *pipeline_guard); + return; + } + } + }; + let device = &pipeline.device; + let mut life_lock = device.lock_life(); + life_lock + .suspected_resources + .compute_pipelines + .push(pipeline.clone()); + let layout = hub.pipeline_layouts.get(layout_id.0).unwrap(); + life_lock.suspected_resources.pipeline_layouts.push(layout); + } + + pub fn surface_configure( + &self, + surface_id: SurfaceId, + device_id: DeviceId, + config: &wgt::SurfaceConfiguration>, + ) -> Option { + use hal::{Adapter as _, Surface as _}; + use present::ConfigureSurfaceError as E; + profiling::scope!("surface_configure"); + + fn validate_surface_configuration( + config: &mut hal::SurfaceConfiguration, + caps: &hal::SurfaceCapabilities, + ) -> Result<(), E> { + let width = config.extent.width; + let height = config.extent.height; + if width < caps.extents.start().width + || width > caps.extents.end().width + || height < caps.extents.start().height + || height > caps.extents.end().height + { + log::warn!( + "Requested size {}x{} is outside of the supported range: {:?}", + width, + height, + caps.extents + ); + } + if !caps.present_modes.contains(&config.present_mode) { + let new_mode = 'b: loop { + // Automatic present mode checks. + // + // The "Automatic" modes are never supported by the backends. + let fallbacks = match config.present_mode { + wgt::PresentMode::AutoVsync => { + &[wgt::PresentMode::FifoRelaxed, wgt::PresentMode::Fifo][..] + } + // Always end in FIFO to make sure it's always supported + wgt::PresentMode::AutoNoVsync => &[ + wgt::PresentMode::Immediate, + wgt::PresentMode::Mailbox, + wgt::PresentMode::Fifo, + ][..], + _ => { + return Err(E::UnsupportedPresentMode { + requested: config.present_mode, + available: caps.present_modes.clone(), + }); + } + }; + + for &fallback in fallbacks { + if caps.present_modes.contains(&fallback) { + break 'b fallback; + } + } + + unreachable!("Fallback system failed to choose present mode. This is a bug. Mode: {:?}, Options: {:?}", config.present_mode, &caps.present_modes); + }; + + log::info!( + "Automatically choosing presentation mode by rule {:?}. Chose {new_mode:?}", + config.present_mode + ); + config.present_mode = new_mode; + } + if !caps.formats.contains(&config.format) { + return Err(E::UnsupportedFormat { + requested: config.format, + available: caps.formats.clone(), + }); + } + if !caps + .composite_alpha_modes + .contains(&config.composite_alpha_mode) + { + let new_alpha_mode = 'alpha: loop { + // Automatic alpha mode checks. + let fallbacks = match config.composite_alpha_mode { + wgt::CompositeAlphaMode::Auto => &[ + wgt::CompositeAlphaMode::Opaque, + wgt::CompositeAlphaMode::Inherit, + ][..], + _ => { + return Err(E::UnsupportedAlphaMode { + requested: config.composite_alpha_mode, + available: caps.composite_alpha_modes.clone(), + }); + } + }; + + for &fallback in fallbacks { + if caps.composite_alpha_modes.contains(&fallback) { + break 'alpha fallback; + } + } + + unreachable!( + "Fallback system failed to choose alpha mode. This is a bug. \ + AlphaMode: {:?}, Options: {:?}", + config.composite_alpha_mode, &caps.composite_alpha_modes + ); + }; + + log::info!( + "Automatically choosing alpha mode by rule {:?}. Chose {new_alpha_mode:?}", + config.composite_alpha_mode + ); + config.composite_alpha_mode = new_alpha_mode; + } + if !caps.usage.contains(config.usage) { + return Err(E::UnsupportedUsage); + } + if width == 0 || height == 0 { + return Err(E::ZeroArea); + } + Ok(()) + } + + log::info!("configuring surface with {:?}", config); + let hub = A::hub(self); + + let surface_guard = self.surfaces.read(); + let adapter_guard = hub.adapters.read(); + let device_guard = hub.devices.read(); + + let error = 'outer: loop { + let device = match device_guard.get(device_id) { + Ok(device) => device, + Err(_) => break DeviceError::Invalid.into(), + }; + #[cfg(feature = "trace")] + if let Some(ref trace) = device.trace { + trace + .lock() + .add(trace::Action::ConfigureSurface(surface_id, config.clone())); + } + + let surface = match surface_guard.get(surface_id) { + Ok(surface) => surface, + Err(_) => break E::InvalidSurface, + }; + + let caps = unsafe { + let suf = A::get_surface(&surface); + let adapter = &adapter_guard[device.adapter_id]; + match adapter + .raw + .adapter + .surface_capabilities(suf.unwrap().raw.as_ref()) + { + Some(caps) => caps, + None => break E::UnsupportedQueueFamily, + } + }; + + let mut hal_view_formats = vec![]; + for format in config.view_formats.iter() { + if *format == config.format { + continue; + } + if !caps.formats.contains(&config.format) { + break 'outer E::UnsupportedFormat { + requested: config.format, + available: caps.formats.clone(), + }; + } + if config.format.remove_srgb_suffix() != format.remove_srgb_suffix() { + break 'outer E::InvalidViewFormat(*format, config.format); + } + hal_view_formats.push(*format); + } + + if !hal_view_formats.is_empty() { + if let Err(missing_flag) = + device.require_downlevel_flags(wgt::DownlevelFlags::SURFACE_VIEW_FORMATS) + { + break 'outer E::MissingDownlevelFlags(missing_flag); + } + } + + let num_frames = present::DESIRED_NUM_FRAMES + .clamp(*caps.swap_chain_sizes.start(), *caps.swap_chain_sizes.end()); + let mut hal_config = hal::SurfaceConfiguration { + swap_chain_size: num_frames, + present_mode: config.present_mode, + composite_alpha_mode: config.alpha_mode, + format: config.format, + extent: wgt::Extent3d { + width: config.width, + height: config.height, + depth_or_array_layers: 1, + }, + usage: conv::map_texture_usage(config.usage, hal::FormatAspects::COLOR), + view_formats: hal_view_formats, + }; + + if let Err(error) = validate_surface_configuration(&mut hal_config, &caps) { + break error; + } + + match unsafe { + A::get_surface(&surface) + .unwrap() + .raw + .configure(device.raw.as_ref().unwrap(), &hal_config) + } { + Ok(()) => (), + Err(error) => { + break match error { + hal::SurfaceError::Outdated | hal::SurfaceError::Lost => E::InvalidSurface, + hal::SurfaceError::Device(error) => E::Device(error.into()), + hal::SurfaceError::Other(message) => { + log::error!("surface configuration failed: {}", message); + E::InvalidSurface + } + } + } + } + + if let Some(present) = surface.presentation.lock().take() { + if present.acquired_texture.is_some() { + break E::PreviousOutputExists; + } + } + let mut presentation = surface.presentation.lock(); + *presentation = Some(present::Presentation { + device_id: id::Valid(device_id), + config: config.clone(), + num_frames, + acquired_texture: None, + }); + + return None; + }; + + Some(error) + } + + #[cfg(feature = "replay")] + /// Only triange suspected resource IDs. This helps us to avoid ID collisions + /// upon creating new resources when re-playing a trace. + pub fn device_maintain_ids(&self, device_id: DeviceId) -> Result<(), InvalidDevice> { + let hub = A::hub(self); + + let device = hub.devices.get(device_id).map_err(|_| InvalidDevice)?; + device.lock_life().triage_suspected( + hub, + &device.trackers, + #[cfg(feature = "trace")] + None, + ); + Ok(()) + } + + /// Check `device_id` for freeable resources and completed buffer mappings. + /// + /// Return `queue_empty` indicating whether there are more queue submissions still in flight. + pub fn device_poll( + &self, + device_id: DeviceId, + maintain: wgt::Maintain, + ) -> Result { + let (closures, queue_empty) = { + if let wgt::Maintain::WaitForSubmissionIndex(submission_index) = maintain { + if submission_index.queue_id != device_id { + return Err(WaitIdleError::WrongSubmissionIndex( + submission_index.queue_id, + device_id, + )); + } + } + + let hub = A::hub(self); + hub.devices + .get(device_id) + .map_err(|_| DeviceError::Invalid)? + .maintain(hub, maintain)? + }; + + closures.fire(); + + Ok(queue_empty) + } + + /// Poll all devices belonging to the backend `A`. + /// + /// If `force_wait` is true, block until all buffer mappings are done. + /// + /// Return `all_queue_empty` indicating whether there are more queue + /// submissions still in flight. + fn poll_devices( + &self, + force_wait: bool, + closures: &mut UserClosures, + ) -> Result { + profiling::scope!("poll_devices"); + + let hub = A::hub(self); + let mut devices_to_drop = vec![]; + let mut all_queue_empty = true; + { + let device_guard = hub.devices.read(); + + for (id, device) in device_guard.iter(A::VARIANT) { + let maintain = if force_wait { + wgt::Maintain::Wait + } else { + wgt::Maintain::Poll + }; + let (cbs, queue_empty) = device.maintain(hub, maintain)?; + all_queue_empty = all_queue_empty && queue_empty; + + // If the device's own `RefCount` is the only one left, and + // its submission queue is empty, then it can be freed. + if queue_empty && device.is_unique() { + devices_to_drop.push(id); + } + closures.extend(cbs); + } + } + + for device_id in devices_to_drop { + self.exit_device::(device_id); + } + + Ok(all_queue_empty) + } + + /// Poll all devices on all backends. + /// + /// This is the implementation of `wgpu::Instance::poll_all`. + /// + /// Return `all_queue_empty` indicating whether there are more queue + /// submissions still in flight. + pub fn poll_all_devices(&self, force_wait: bool) -> Result { + let mut closures = UserClosures::default(); + let mut all_queue_empty = true; + + #[cfg(all(feature = "vulkan", not(target_arch = "wasm32")))] + { + all_queue_empty = self.poll_devices::(force_wait, &mut closures)? + && all_queue_empty; + } + #[cfg(all(feature = "metal", any(target_os = "macos", target_os = "ios")))] + { + all_queue_empty = + self.poll_devices::(force_wait, &mut closures)? && all_queue_empty; + } + #[cfg(all(feature = "dx12", windows))] + { + all_queue_empty = + self.poll_devices::(force_wait, &mut closures)? && all_queue_empty; + } + #[cfg(all(feature = "dx11", windows))] + { + all_queue_empty = + self.poll_devices::(force_wait, &mut closures)? && all_queue_empty; + } + #[cfg(feature = "gles")] + { + all_queue_empty = + self.poll_devices::(force_wait, &mut closures)? && all_queue_empty; + } + + closures.fire(); + + Ok(all_queue_empty) + } + + pub fn device_label(&self, id: DeviceId) -> String { + A::hub(self).devices.label_for_resource(id) + } + + pub fn device_start_capture(&self, id: DeviceId) { + let hub = A::hub(self); + if let Ok(device) = hub.devices.get(id) { + unsafe { device.raw.as_ref().unwrap().start_capture() }; + } + } + + pub fn device_stop_capture(&self, id: DeviceId) { + let hub = A::hub(self); + if let Ok(device) = hub.devices.get(id) { + unsafe { device.raw.as_ref().unwrap().stop_capture() }; + } + } + + pub fn device_drop(&self, device_id: DeviceId) { + profiling::scope!("Device::drop"); + log::debug!("device {:?} is dropped", device_id); + } + + /// Exit the unreferenced, inactive device `device_id`. + fn exit_device(&self, device_id: DeviceId) { + let hub = A::hub(self); + let mut free_adapter_id = None; + { + let device = hub.devices.unregister(device_id); + if let Some(device) = device { + // The things `Device::prepare_to_die` takes care are mostly + // unnecessary here. We know our queue is empty, so we don't + // need to wait for submissions or triage them. We know we were + // just polled, so `life_tracker.free_resources` is empty. + debug_assert!(device.lock_life().queue_empty()); + device.pending_writes.lock().as_mut().unwrap().deactivate(); + + let adapter = hub.adapters.get(device.adapter_id.0).unwrap(); + // Adapter is only referenced by the device and itself. + // This isn't a robust way to destroy them, we should find a better one. + if adapter.is_unique() { + free_adapter_id = Some(device.adapter_id.0); + } + + drop(device); + } + } + + // Free the adapter now that we've dropped the `Device`. + if let Some(free_adapter_id) = free_adapter_id { + let _ = hub.adapters.unregister(free_adapter_id); + } + } + + pub fn buffer_map_async( + &self, + buffer_id: id::BufferId, + range: Range, + op: BufferMapOperation, + ) -> BufferAccessResult { + // User callbacks must not be called while holding buffer_map_async_inner's locks, so we + // defer the error callback if it needs to be called immediately (typically when running + // into errors). + if let Err((op, err)) = self.buffer_map_async_inner::(buffer_id, range, op) { + op.callback.call(Err(err.clone())); + + return Err(err); + } + + Ok(()) + } + + // Returns the mapping callback in case of error so that the callback can be fired outside + // of the locks that are held in this function. + fn buffer_map_async_inner( + &self, + buffer_id: id::BufferId, + range: Range, + op: BufferMapOperation, + ) -> Result<(), (BufferMapOperation, BufferAccessError)> { + profiling::scope!("Buffer::map_async"); + + let hub = A::hub(self); + let buffer_guard = hub.buffers.read(); + + let (pub_usage, internal_use) = match op.host { + HostMap::Read => (wgt::BufferUsages::MAP_READ, hal::BufferUses::MAP_READ), + HostMap::Write => (wgt::BufferUsages::MAP_WRITE, hal::BufferUses::MAP_WRITE), + }; + + if range.start % wgt::MAP_ALIGNMENT != 0 || range.end % wgt::COPY_BUFFER_ALIGNMENT != 0 { + return Err((op, BufferAccessError::UnalignedRange)); + } + + let (device, buffer) = { + let buffer = buffer_guard + .get(buffer_id) + .map_err(|_| BufferAccessError::Invalid); + + let buffer = match buffer { + Ok(b) => b, + Err(e) => { + return Err((op, e)); + } + }; + + if let Err(e) = check_buffer_usage(buffer.usage, pub_usage) { + return Err((op, e.into())); + } + + if range.start > range.end { + return Err(( + op, + BufferAccessError::NegativeRange { + start: range.start, + end: range.end, + }, + )); + } + if range.end > buffer.size { + return Err(( + op, + BufferAccessError::OutOfBoundsOverrun { + index: range.end, + max: buffer.size, + }, + )); + } + let mut map_state = buffer.map_state.lock(); + *map_state = match *map_state { + resource::BufferMapState::Init { .. } | resource::BufferMapState::Active { .. } => { + return Err((op, BufferAccessError::AlreadyMapped)); + } + resource::BufferMapState::Waiting(_) => { + return Err((op, BufferAccessError::MapAlreadyPending)); + } + resource::BufferMapState::Idle => { + resource::BufferMapState::Waiting(resource::BufferPendingMapping { + range, + op, + _parent_buffer: buffer.clone(), + }) + } + }; + log::debug!("Buffer {:?} map state -> Waiting", buffer_id); + + let device = &buffer.device; + + { + let mut trackers = device.as_ref().trackers.lock(); + + trackers + .buffers + .set_single(&*buffer_guard, buffer_id, internal_use); + trackers.buffers.drain(); + } + + (device, buffer) + }; + + device.lock_life().map(buffer); + + Ok(()) + } + + pub fn buffer_get_mapped_range( + &self, + buffer_id: id::BufferId, + offset: BufferAddress, + size: Option, + ) -> Result<(*mut u8, u64), BufferAccessError> { + profiling::scope!("Buffer::get_mapped_range"); + + let hub = A::hub(self); + + let buffer = hub + .buffers + .get(buffer_id) + .map_err(|_| BufferAccessError::Invalid)?; + + let range_size = if let Some(size) = size { + size + } else if offset > buffer.size { + 0 + } else { + buffer.size - offset + }; + + if offset % wgt::MAP_ALIGNMENT != 0 { + return Err(BufferAccessError::UnalignedOffset { offset }); + } + if range_size % wgt::COPY_BUFFER_ALIGNMENT != 0 { + return Err(BufferAccessError::UnalignedRangeSize { range_size }); + } + let map_state = &*buffer.map_state.lock(); + match map_state { + resource::BufferMapState::Init { ptr, .. } => { + // offset (u64) can not be < 0, so no need to validate the lower bound + if offset + range_size > buffer.size { + return Err(BufferAccessError::OutOfBoundsOverrun { + index: offset + range_size - 1, + max: buffer.size, + }); + } + unsafe { Ok((ptr.as_ptr().offset(offset as isize), range_size)) } + } + resource::BufferMapState::Active { ptr, ref range, .. } => { + if offset < range.start { + return Err(BufferAccessError::OutOfBoundsUnderrun { + index: offset, + min: range.start, + }); + } + if offset + range_size > range.end { + return Err(BufferAccessError::OutOfBoundsOverrun { + index: offset + range_size - 1, + max: range.end, + }); + } + // ptr points to the beginning of the range we mapped in map_async + // rather thant the beginning of the buffer. + let relative_offset = (offset - range.start) as isize; + unsafe { Ok((ptr.as_ptr().offset(relative_offset), range_size)) } + } + resource::BufferMapState::Idle | resource::BufferMapState::Waiting(_) => { + Err(BufferAccessError::NotMapped) + } + } + } + + fn buffer_unmap_inner( + &self, + buffer_id: id::BufferId, + buffer: &Arc>, + device: &Device, + ) -> Result, BufferAccessError> { + log::debug!("Buffer {:?} map state -> Idle", buffer_id); + match mem::replace( + &mut *buffer.map_state.lock(), + resource::BufferMapState::Idle, + ) { + resource::BufferMapState::Init { + ptr, + stage_buffer, + needs_flush, + } => { + #[cfg(feature = "trace")] + if let Some(ref trace) = device.trace { + let mut trace = trace.lock(); + let data = trace.make_binary("bin", unsafe { + std::slice::from_raw_parts(ptr.as_ptr(), buffer.size as usize) + }); + trace.add(trace::Action::WriteBuffer { + id: buffer_id, + data, + range: 0..buffer.size, + queued: true, + }); + } + let _ = ptr; + if needs_flush { + unsafe { + device.raw.as_ref().unwrap().flush_mapped_ranges( + stage_buffer.raw.as_ref().unwrap(), + iter::once(0..buffer.size), + ); + } + } + + let raw_buf = buffer.raw.as_ref().ok_or(BufferAccessError::Destroyed)?; + + buffer.info.use_at( + device + .active_submission_index + .fetch_add(1, Ordering::Relaxed), + ); + let region = wgt::BufferSize::new(buffer.size).map(|size| hal::BufferCopy { + src_offset: 0, + dst_offset: 0, + size, + }); + let transition_src = hal::BufferBarrier { + buffer: stage_buffer.raw.as_ref().unwrap().as_ref(), + usage: hal::BufferUses::MAP_WRITE..hal::BufferUses::COPY_SRC, + }; + let transition_dst = hal::BufferBarrier { + buffer: raw_buf.as_ref(), + usage: hal::BufferUses::empty()..hal::BufferUses::COPY_DST, + }; + let mut pending_writes = device.pending_writes.lock(); + let pending_writes = pending_writes.as_mut().unwrap(); + let encoder = pending_writes.activate(); + unsafe { + encoder.transition_buffers( + iter::once(transition_src).chain(iter::once(transition_dst)), + ); + if buffer.size > 0 { + encoder.copy_buffer_to_buffer( + stage_buffer.raw.as_ref().unwrap().as_ref(), + raw_buf, + region.into_iter(), + ); + } + } + pending_writes.consume_temp(queue::TempResource::Buffer(stage_buffer)); + pending_writes.dst_buffers.insert(buffer_id, buffer.clone()); + } + resource::BufferMapState::Idle => { + return Err(BufferAccessError::NotMapped); + } + resource::BufferMapState::Waiting(pending) => { + return Ok(Some((pending.op, Err(BufferAccessError::MapAborted)))); + } + resource::BufferMapState::Active { ptr, range, host } => { + if host == HostMap::Write { + #[cfg(feature = "trace")] + if let Some(ref trace) = device.trace { + let mut trace = trace.lock(); + let size = range.end - range.start; + let data = trace.make_binary("bin", unsafe { + std::slice::from_raw_parts(ptr.as_ptr(), size as usize) + }); + trace.add(trace::Action::WriteBuffer { + id: buffer_id, + data, + range: range.clone(), + queued: false, + }); + } + let _ = (ptr, range); + } + unsafe { + device + .raw + .as_ref() + .unwrap() + .unmap_buffer(buffer.raw.as_ref().unwrap()) + .map_err(DeviceError::from)? + }; + } + } + Ok(None) + } + + pub fn buffer_unmap(&self, buffer_id: id::BufferId) -> BufferAccessResult { + profiling::scope!("unmap", "Buffer"); + + let closure; + { + // Restrict the locks to this scope. + let hub = A::hub(self); + + let buffer = hub + .buffers + .get(buffer_id) + .map_err(|_| BufferAccessError::Invalid)?; + let device = &buffer.device; + + closure = self.buffer_unmap_inner(buffer_id, &buffer, &device) + } + + // Note: outside the scope where locks are held when calling the callback + if let Some((operation, status)) = closure? { + operation.callback.call(status); + } + Ok(()) + } +} diff --git a/wgpu-core/src/device/life.rs b/wgpu-core/src/device/life.rs index 605aea3dab..79f8344ee7 100644 --- a/wgpu-core/src/device/life.rs +++ b/wgpu-core/src/device/life.rs @@ -1,41 +1,60 @@ #[cfg(feature = "trace")] use crate::device::trace; use crate::{ + binding_model::{BindGroup, BindGroupLayout, PipelineLayout}, + command::RenderBundle, device::{ queue::{EncoderInFlight, SubmittedWorkDoneClosure, TempResource}, DeviceError, }, - hub::{GlobalIdentityHandlerFactory, HalApi, Hub, Token}, - id, resource, + hal_api::HalApi, + hub::Hub, + id, + identity::GlobalIdentityHandlerFactory, + pipeline::{ComputePipeline, RenderPipeline}, + resource::{self, Buffer, QuerySet, Resource, Sampler, StagingBuffer, Texture, TextureView}, track::{BindGroupStates, RenderBundleScope, Tracker}, - RefCount, Stored, SubmissionIndex, + SubmissionIndex, }; use smallvec::SmallVec; -use hal::Device as _; use parking_lot::Mutex; use thiserror::Error; -use std::mem; +use std::{mem, sync::Arc}; /// A struct that keeps lists of resources that are no longer needed by the user. -#[derive(Debug, Default)] -pub(super) struct SuspectedResources { - pub(super) buffers: Vec>, - pub(super) textures: Vec>, - pub(super) texture_views: Vec>, - pub(super) samplers: Vec>, - pub(super) bind_groups: Vec>, - pub(super) compute_pipelines: Vec>, - pub(super) render_pipelines: Vec>, - pub(super) bind_group_layouts: Vec>, - pub(super) pipeline_layouts: Vec>, - pub(super) render_bundles: Vec>, - pub(super) query_sets: Vec>, +pub(crate) struct SuspectedResources { + pub(crate) buffers: Vec>>, + pub(crate) textures: Vec>>, + pub(crate) texture_views: Vec>>, + pub(crate) samplers: Vec>>, + pub(crate) bind_groups: Vec>>, + pub(crate) compute_pipelines: Vec>>, + pub(crate) render_pipelines: Vec>>, + pub(crate) bind_group_layouts: Vec>>, + pub(crate) pipeline_layouts: Vec>>, + pub(crate) render_bundles: Vec>>, + pub(crate) query_sets: Vec>>, } -impl SuspectedResources { - pub(super) fn clear(&mut self) { +impl SuspectedResources { + pub(crate) fn new() -> Self { + Self { + buffers: Vec::new(), + textures: Vec::new(), + texture_views: Vec::new(), + samplers: Vec::new(), + bind_groups: Vec::new(), + compute_pipelines: Vec::new(), + render_pipelines: Vec::new(), + bind_group_layouts: Vec::new(), + pipeline_layouts: Vec::new(), + render_bundles: Vec::new(), + query_sets: Vec::new(), + } + } + pub(crate) fn clear(&mut self) { self.buffers.clear(); self.textures.clear(); self.texture_views.clear(); @@ -49,7 +68,7 @@ impl SuspectedResources { self.query_sets.clear(); } - pub(super) fn extend(&mut self, other: &Self) { + pub(crate) fn extend(&mut self, other: &Self) { self.buffers.extend_from_slice(&other.buffers); self.textures.extend_from_slice(&other.textures); self.texture_views.extend_from_slice(&other.texture_views); @@ -67,42 +86,97 @@ impl SuspectedResources { self.query_sets.extend_from_slice(&other.query_sets); } - pub(super) fn add_render_bundle_scope(&mut self, trackers: &RenderBundleScope) { - self.buffers.extend(trackers.buffers.used()); - self.textures.extend(trackers.textures.used()); - self.bind_groups.extend(trackers.bind_groups.used()); - self.render_pipelines - .extend(trackers.render_pipelines.used()); - self.query_sets.extend(trackers.query_sets.used()); + pub(crate) fn add_render_bundle_scope(&mut self, trackers: &RenderBundleScope) { + self.buffers.extend( + trackers + .buffers + .used_resources() + .map(|r| r.clone()) + .into_iter(), + ); + self.textures.extend( + trackers + .textures + .used_resources() + .map(|r| r.clone()) + .into_iter(), + ); + self.bind_groups.extend( + trackers + .bind_groups + .used_resources() + .map(|r| r.clone()) + .into_iter(), + ); + self.render_pipelines.extend( + trackers + .render_pipelines + .used_resources() + .map(|r| r.clone()) + .into_iter(), + ); + self.query_sets.extend( + trackers + .query_sets + .used_resources() + .map(|r| r.clone()) + .into_iter(), + ); } - pub(super) fn add_bind_group_states(&mut self, trackers: &BindGroupStates) { - self.buffers.extend(trackers.buffers.used()); - self.textures.extend(trackers.textures.used()); - self.texture_views.extend(trackers.views.used()); - self.samplers.extend(trackers.samplers.used()); + pub(crate) fn add_bind_group_states(&mut self, trackers: &BindGroupStates) { + self.buffers.extend( + trackers + .buffers + .used_resources() + .map(|r| r.clone()) + .into_iter(), + ); + self.textures.extend( + trackers + .textures + .used_resources() + .map(|r| r.clone()) + .into_iter(), + ); + self.texture_views.extend( + trackers + .views + .used_resources() + .map(|r| r.clone()) + .into_iter(), + ); + self.samplers.extend( + trackers + .samplers + .used_resources() + .map(|r| r.clone()) + .into_iter(), + ); } } /// Raw backend resources that should be freed shortly. #[derive(Debug)] -struct NonReferencedResources { - buffers: Vec, - textures: Vec, - texture_views: Vec, - samplers: Vec, - bind_groups: Vec, - compute_pipes: Vec, - render_pipes: Vec, - bind_group_layouts: Vec, - pipeline_layouts: Vec, - query_sets: Vec, +struct NonReferencedResources { + buffers: Vec>>, + staging_buffers: Vec>>, + textures: Vec>>, + texture_views: Vec>>, + samplers: Vec>>, + bind_groups: Vec>>, + compute_pipes: Vec>>, + render_pipes: Vec>>, + bind_group_layouts: Vec>>, + pipeline_layouts: Vec>>, + query_sets: Vec>>, } -impl NonReferencedResources { +impl NonReferencedResources { fn new() -> Self { Self { buffers: Vec::new(), + staging_buffers: Vec::new(), textures: Vec::new(), texture_views: Vec::new(), samplers: Vec::new(), @@ -117,6 +191,7 @@ impl NonReferencedResources { fn extend(&mut self, other: Self) { self.buffers.extend(other.buffers); + self.staging_buffers.extend(other.staging_buffers); self.textures.extend(other.textures); self.texture_views.extend(other.texture_views); self.samplers.extend(other.samplers); @@ -128,72 +203,56 @@ impl NonReferencedResources { assert!(other.pipeline_layouts.is_empty()); } - unsafe fn clean(&mut self, device: &A::Device) { + unsafe fn clean(&mut self) { if !self.buffers.is_empty() { profiling::scope!("destroy_buffers"); - for raw in self.buffers.drain(..) { - unsafe { device.destroy_buffer(raw) }; - } + self.buffers.clear(); + } + if !self.staging_buffers.is_empty() { + profiling::scope!("destroy_staging_buffers"); + self.staging_buffers.clear(); } if !self.textures.is_empty() { profiling::scope!("destroy_textures"); - for raw in self.textures.drain(..) { - unsafe { device.destroy_texture(raw) }; - } + self.textures.clear(); } if !self.texture_views.is_empty() { profiling::scope!("destroy_texture_views"); - for raw in self.texture_views.drain(..) { - unsafe { device.destroy_texture_view(raw) }; - } + self.texture_views.clear(); } if !self.samplers.is_empty() { profiling::scope!("destroy_samplers"); - for raw in self.samplers.drain(..) { - unsafe { device.destroy_sampler(raw) }; - } + self.samplers.clear(); } if !self.bind_groups.is_empty() { profiling::scope!("destroy_bind_groups"); - for raw in self.bind_groups.drain(..) { - unsafe { device.destroy_bind_group(raw) }; - } + self.bind_groups.clear(); } if !self.compute_pipes.is_empty() { profiling::scope!("destroy_compute_pipelines"); - for raw in self.compute_pipes.drain(..) { - unsafe { device.destroy_compute_pipeline(raw) }; - } + self.compute_pipes.clear(); } if !self.render_pipes.is_empty() { profiling::scope!("destroy_render_pipelines"); - for raw in self.render_pipes.drain(..) { - unsafe { device.destroy_render_pipeline(raw) }; - } + self.render_pipes.clear(); } if !self.bind_group_layouts.is_empty() { profiling::scope!("destroy_bind_group_layouts"); - for raw in self.bind_group_layouts.drain(..) { - unsafe { device.destroy_bind_group_layout(raw) }; - } + self.bind_group_layouts.clear(); } if !self.pipeline_layouts.is_empty() { profiling::scope!("destroy_pipeline_layouts"); - for raw in self.pipeline_layouts.drain(..) { - unsafe { device.destroy_pipeline_layout(raw) }; - } + self.pipeline_layouts.clear(); } if !self.query_sets.is_empty() { profiling::scope!("destroy_query_sets"); - for raw in self.query_sets.drain(..) { - unsafe { device.destroy_query_set(raw) }; - } + self.query_sets.clear(); } } } /// Resources used by a queue submission, and work to be done once it completes. -struct ActiveSubmission { +struct ActiveSubmission { /// The index of the submission we track. /// /// When `Device::fence`'s value is greater than or equal to this, our queue @@ -213,7 +272,7 @@ struct ActiveSubmission { last_resources: NonReferencedResources, /// Buffers to be mapped once this submission has completed. - mapped: Vec>, + mapped: Vec>>, encoders: Vec>, work_done_closures: SmallVec<[SubmittedWorkDoneClosure; 1]>, @@ -266,21 +325,21 @@ pub enum WaitIdleError { /// /// Only `self.mapped` holds a `RefCount` for the buffer; it is dropped by /// `triage_mapped`. -pub(super) struct LifetimeTracker { +pub(crate) struct LifetimeTracker { /// Resources that the user has requested be mapped, but which are used by /// queue submissions still in flight. - mapped: Vec>, + mapped: Vec>>, /// Buffers can be used in a submission that is yet to be made, by the /// means of `write_buffer()`, so we have a special place for them. - pub future_suspected_buffers: Vec>, + pub future_suspected_buffers: Vec>>, /// Textures can be used in the upcoming submission by `write_texture`. - pub future_suspected_textures: Vec>, + pub future_suspected_textures: Vec>>, /// Resources whose user handle has died (i.e. drop/destroy has been called) /// and will likely be ready for destruction soon. - pub suspected_resources: SuspectedResources, + pub suspected_resources: SuspectedResources, /// Resources used by queue submissions still in flight. One entry per /// submission, with older submissions appearing before younger. @@ -299,16 +358,16 @@ pub(super) struct LifetimeTracker { /// Buffers the user has asked us to map, and which are not used by any /// queue submission still in flight. - ready_to_map: Vec>, + ready_to_map: Vec>>, } -impl LifetimeTracker { +impl LifetimeTracker { pub fn new() -> Self { Self { mapped: Vec::new(), future_suspected_buffers: Vec::new(), future_suspected_textures: Vec::new(), - suspected_resources: SuspectedResources::default(), + suspected_resources: SuspectedResources::new(), active: Vec::new(), free_resources: NonReferencedResources::new(), ready_to_map: Vec::new(), @@ -331,6 +390,7 @@ impl LifetimeTracker { for res in temp_resources { match res { TempResource::Buffer(raw) => last_resources.buffers.push(raw), + TempResource::StagingBuffer(raw) => last_resources.staging_buffers.push(raw), TempResource::Texture(raw, views) => { last_resources.textures.push(raw); last_resources.texture_views.extend(views); @@ -348,20 +408,18 @@ impl LifetimeTracker { } pub fn post_submit(&mut self) { - self.suspected_resources.buffers.extend( - self.future_suspected_buffers - .drain(..) - .map(|stored| stored.value), - ); + self.suspected_resources + .buffers + .extend(self.future_suspected_buffers.drain(..).map(|stored| stored)); self.suspected_resources.textures.extend( self.future_suspected_textures .drain(..) - .map(|stored| stored.value), + .map(|stored| stored), ); } - pub(crate) fn map(&mut self, value: id::Valid, ref_count: RefCount) { - self.mapped.push(Stored { value, ref_count }); + pub(crate) fn map(&mut self, value: &Arc>) { + self.mapped.push(value.clone()); } /// Sort out the consequences of completed submissions. @@ -389,7 +447,7 @@ impl LifetimeTracker { pub fn triage_submissions( &mut self, last_done: SubmissionIndex, - command_allocator: &Mutex>, + command_allocator: &mut super::CommandAllocator, ) -> SmallVec<[SubmittedWorkDoneClosure; 1]> { profiling::scope!("triage_submissions"); @@ -408,17 +466,17 @@ impl LifetimeTracker { self.ready_to_map.extend(a.mapped); for encoder in a.encoders { let raw = unsafe { encoder.land() }; - command_allocator.lock().release_encoder(raw); + command_allocator.release_encoder(raw); } work_done_closures.extend(a.work_done_closures); } work_done_closures } - pub fn cleanup(&mut self, device: &A::Device) { + pub fn cleanup(&mut self) { profiling::scope!("LifetimeTracker::cleanup"); unsafe { - self.free_resources.clean(device); + self.free_resources.clean(); } } @@ -434,6 +492,7 @@ impl LifetimeTracker { .map_or(&mut self.free_resources, |a| &mut a.last_resources); match temp_resource { TempResource::Buffer(raw) => resources.buffers.push(raw), + TempResource::StagingBuffer(raw) => resources.staging_buffers.push(raw), TempResource::Texture(raw, views) => { resources.texture_views.extend(views); resources.textures.push(raw); @@ -497,28 +556,31 @@ impl LifetimeTracker { /// [`self.active`]: LifetimeTracker::active /// [`triage_submissions`]: LifetimeTracker::triage_submissions /// [`self.free_resources`]: LifetimeTracker::free_resources - pub(super) fn triage_suspected( + pub(crate) fn triage_suspected( &mut self, hub: &Hub, trackers: &Mutex>, #[cfg(feature = "trace")] trace: Option<&Mutex>, - token: &mut Token>, ) { profiling::scope!("triage_suspected"); if !self.suspected_resources.render_bundles.is_empty() { - let (mut guard, _) = hub.render_bundles.write(token); + let mut render_bundles_locked = hub.render_bundles.write(); let mut trackers = trackers.lock(); - while let Some(id) = self.suspected_resources.render_bundles.pop() { + while let Some(bundle) = self.suspected_resources.render_bundles.pop() { + let id = bundle.info.id(); if trackers.bundles.remove_abandoned(id) { log::debug!("Bundle {:?} will be destroyed", id); #[cfg(feature = "trace")] if let Some(t) = trace { - t.lock().add(trace::Action::DestroyRenderBundle(id.0)); + t.lock().add(trace::Action::DestroyRenderBundle(id)); } - if let Some(res) = hub.render_bundles.unregister_locked(id.0, &mut *guard) { + if let Some(res) = hub + .render_bundles + .unregister_locked(id.0, &mut *render_bundles_locked) + { self.suspected_resources.add_render_bundle_scope(&res.used); } } @@ -526,58 +588,68 @@ impl LifetimeTracker { } if !self.suspected_resources.bind_groups.is_empty() { - let (mut guard, _) = hub.bind_groups.write(token); + let mut bind_groups_locked = hub.bind_groups.write(); let mut trackers = trackers.lock(); - while let Some(id) = self.suspected_resources.bind_groups.pop() { + while let Some(resource) = self.suspected_resources.bind_groups.pop() { + let id = resource.info.id(); if trackers.bind_groups.remove_abandoned(id) { log::debug!("Bind group {:?} will be destroyed", id); #[cfg(feature = "trace")] if let Some(t) = trace { - t.lock().add(trace::Action::DestroyBindGroup(id.0)); + t.lock().add(trace::Action::DestroyBindGroup(id)); } - if let Some(res) = hub.bind_groups.unregister_locked(id.0, &mut *guard) { + if let Some(res) = hub + .bind_groups + .unregister_locked(id.0, &mut *bind_groups_locked) + { self.suspected_resources.add_bind_group_states(&res.used); - + let bind_group_layout = + hub.bind_group_layouts.get(res.layout_id.0).unwrap(); self.suspected_resources .bind_group_layouts - .push(res.layout_id); + .push(bind_group_layout); - let submit_index = res.life_guard.life_count(); + let submit_index = res.info.submission_index(); self.active .iter_mut() .find(|a| a.index == submit_index) .map_or(&mut self.free_resources, |a| &mut a.last_resources) .bind_groups - .push(res.raw); + .push(res); } } } } if !self.suspected_resources.texture_views.is_empty() { - let (mut guard, _) = hub.texture_views.write(token); + let mut texture_views_locked = hub.texture_views.write(); let mut trackers = trackers.lock(); let mut list = mem::take(&mut self.suspected_resources.texture_views); - for id in list.drain(..) { + for texture_view in list.drain(..) { + let id = texture_view.info.id(); if trackers.views.remove_abandoned(id) { log::debug!("Texture view {:?} will be destroyed", id); #[cfg(feature = "trace")] if let Some(t) = trace { - t.lock().add(trace::Action::DestroyTextureView(id.0)); + t.lock().add(trace::Action::DestroyTextureView(id)); } - if let Some(res) = hub.texture_views.unregister_locked(id.0, &mut *guard) { - self.suspected_resources.textures.push(res.parent_id.value); - let submit_index = res.life_guard.life_count(); + if let Some(res) = hub + .texture_views + .unregister_locked(id.0, &mut *texture_views_locked) + { + let parent_texture = hub.textures.get(res.parent_id.0).unwrap(); + self.suspected_resources.textures.push(parent_texture); + let submit_index = res.info.submission_index(); self.active .iter_mut() .find(|a| a.index == submit_index) .map_or(&mut self.free_resources, |a| &mut a.last_resources) .texture_views - .push(res.raw); + .push(res); } } } @@ -585,209 +657,227 @@ impl LifetimeTracker { } if !self.suspected_resources.textures.is_empty() { - let (mut guard, _) = hub.textures.write(token); + let mut textures_locked = hub.textures.write(); let mut trackers = trackers.lock(); - for id in self.suspected_resources.textures.drain(..) { + for texture in self.suspected_resources.textures.drain(..) { + let id = texture.info.id(); if trackers.textures.remove_abandoned(id) { log::debug!("Texture {:?} will be destroyed", id); #[cfg(feature = "trace")] if let Some(t) = trace { - t.lock().add(trace::Action::DestroyTexture(id.0)); + t.lock().add(trace::Action::DestroyTexture(id)); } - if let Some(res) = hub.textures.unregister_locked(id.0, &mut *guard) { - let submit_index = res.life_guard.life_count(); - let raw = match res.inner { - resource::TextureInner::Native { raw: Some(raw) } => raw, - _ => continue, - }; + if let Some(res) = hub.textures.unregister_locked(id.0, &mut *textures_locked) { + let submit_index = res.info.submission_index(); let non_referenced_resources = self .active .iter_mut() .find(|a| a.index == submit_index) .map_or(&mut self.free_resources, |a| &mut a.last_resources); - non_referenced_resources.textures.push(raw); if let resource::TextureClearMode::RenderPass { clear_views, .. } = - res.clear_mode + &*res.clear_mode.read() { non_referenced_resources .texture_views - .extend(clear_views.into_iter()); + .extend(clear_views.iter().cloned().into_iter()); } + non_referenced_resources.textures.push(res); } } } } if !self.suspected_resources.samplers.is_empty() { - let (mut guard, _) = hub.samplers.write(token); + let mut samplers_locked = hub.samplers.write(); let mut trackers = trackers.lock(); - for id in self.suspected_resources.samplers.drain(..) { + for sampler in self.suspected_resources.samplers.drain(..) { + let id = sampler.info.id(); if trackers.samplers.remove_abandoned(id) { log::debug!("Sampler {:?} will be destroyed", id); #[cfg(feature = "trace")] if let Some(t) = trace { - t.lock().add(trace::Action::DestroySampler(id.0)); + t.lock().add(trace::Action::DestroySampler(id)); } - if let Some(res) = hub.samplers.unregister_locked(id.0, &mut *guard) { - let submit_index = res.life_guard.life_count(); + if let Some(res) = hub.samplers.unregister_locked(id.0, &mut *samplers_locked) { + let submit_index = res.info.submission_index(); self.active .iter_mut() .find(|a| a.index == submit_index) .map_or(&mut self.free_resources, |a| &mut a.last_resources) .samplers - .push(res.raw); + .push(res); } } } } if !self.suspected_resources.buffers.is_empty() { - let (mut guard, _) = hub.buffers.write(token); + let mut buffers_locked = hub.buffers.write(); let mut trackers = trackers.lock(); - for id in self.suspected_resources.buffers.drain(..) { + for buffer in self.suspected_resources.buffers.drain(..) { + let id = buffer.info.id(); if trackers.buffers.remove_abandoned(id) { log::debug!("Buffer {:?} will be destroyed", id); #[cfg(feature = "trace")] if let Some(t) = trace { - t.lock().add(trace::Action::DestroyBuffer(id.0)); + t.lock().add(trace::Action::DestroyBuffer(id)); } - if let Some(res) = hub.buffers.unregister_locked(id.0, &mut *guard) { - let submit_index = res.life_guard.life_count(); - if let resource::BufferMapState::Init { stage_buffer, .. } = res.map_state { - self.free_resources.buffers.push(stage_buffer); + if let Some(res) = hub.buffers.unregister_locked(id.0, &mut *buffers_locked) { + let submit_index = res.info.submission_index(); + if let resource::BufferMapState::Init { stage_buffer, .. } = + &*res.map_state.lock() + { + self.free_resources.buffers.push(stage_buffer.clone()); } self.active .iter_mut() .find(|a| a.index == submit_index) .map_or(&mut self.free_resources, |a| &mut a.last_resources) .buffers - .extend(res.raw); + .push(res); } } } } if !self.suspected_resources.compute_pipelines.is_empty() { - let (mut guard, _) = hub.compute_pipelines.write(token); + let mut compute_pipelines_locked = hub.compute_pipelines.write(); let mut trackers = trackers.lock(); - for id in self.suspected_resources.compute_pipelines.drain(..) { + for compute_pipeline in self.suspected_resources.compute_pipelines.drain(..) { + let id = compute_pipeline.info.id(); if trackers.compute_pipelines.remove_abandoned(id) { log::debug!("Compute pipeline {:?} will be destroyed", id); #[cfg(feature = "trace")] if let Some(t) = trace { - t.lock().add(trace::Action::DestroyComputePipeline(id.0)); + t.lock().add(trace::Action::DestroyComputePipeline(id)); } - if let Some(res) = hub.compute_pipelines.unregister_locked(id.0, &mut *guard) { - let submit_index = res.life_guard.life_count(); + if let Some(res) = hub + .compute_pipelines + .unregister_locked(id.0, &mut *compute_pipelines_locked) + { + let submit_index = res.info.submission_index(); self.active .iter_mut() .find(|a| a.index == submit_index) .map_or(&mut self.free_resources, |a| &mut a.last_resources) .compute_pipes - .push(res.raw); + .push(res); } } } } if !self.suspected_resources.render_pipelines.is_empty() { - let (mut guard, _) = hub.render_pipelines.write(token); + let mut render_pipelines_locked = hub.render_pipelines.write(); let mut trackers = trackers.lock(); - for id in self.suspected_resources.render_pipelines.drain(..) { + for render_pipeline in self.suspected_resources.render_pipelines.drain(..) { + let id = render_pipeline.info.id(); if trackers.render_pipelines.remove_abandoned(id) { log::debug!("Render pipeline {:?} will be destroyed", id); #[cfg(feature = "trace")] if let Some(t) = trace { - t.lock().add(trace::Action::DestroyRenderPipeline(id.0)); + t.lock().add(trace::Action::DestroyRenderPipeline(id)); } - if let Some(res) = hub.render_pipelines.unregister_locked(id.0, &mut *guard) { - let submit_index = res.life_guard.life_count(); + if let Some(res) = hub + .render_pipelines + .unregister_locked(id.0, &mut *render_pipelines_locked) + { + let submit_index = res.info.submission_index(); self.active .iter_mut() .find(|a| a.index == submit_index) .map_or(&mut self.free_resources, |a| &mut a.last_resources) .render_pipes - .push(res.raw); + .push(res); } } } } if !self.suspected_resources.pipeline_layouts.is_empty() { - let (mut guard, _) = hub.pipeline_layouts.write(token); + let mut pipeline_layouts_locked = hub.pipeline_layouts.write(); - for Stored { - value: id, - ref_count, - } in self.suspected_resources.pipeline_layouts.drain(..) - { + for pipeline_layout in self.suspected_resources.pipeline_layouts.drain(..) { + let id = pipeline_layout.info.id(); //Note: this has to happen after all the suspected pipelines are destroyed - if ref_count.load() == 1 { + if pipeline_layouts_locked.is_unique(id.0).unwrap() { log::debug!("Pipeline layout {:?} will be destroyed", id); #[cfg(feature = "trace")] if let Some(t) = trace { - t.lock().add(trace::Action::DestroyPipelineLayout(id.0)); + t.lock().add(trace::Action::DestroyPipelineLayout(id)); } - if let Some(lay) = hub.pipeline_layouts.unregister_locked(id.0, &mut *guard) { - self.suspected_resources - .bind_group_layouts - .extend_from_slice(&lay.bind_group_layout_ids); - self.free_resources.pipeline_layouts.push(lay.raw); + if let Some(lay) = hub + .pipeline_layouts + .unregister_locked(id.0, &mut *pipeline_layouts_locked) + { + for bgl_id in &lay.bind_group_layout_ids { + let bgl = hub.bind_group_layouts.get(bgl_id.0).unwrap(); + self.suspected_resources.bind_group_layouts.push(bgl); + } + self.free_resources.pipeline_layouts.push(lay); } } } } if !self.suspected_resources.bind_group_layouts.is_empty() { - let (mut guard, _) = hub.bind_group_layouts.write(token); + let mut bind_group_layouts_locked = hub.bind_group_layouts.write(); - for id in self.suspected_resources.bind_group_layouts.drain(..) { + for bgl in self.suspected_resources.bind_group_layouts.drain(..) { + let id = bgl.info().id(); //Note: this has to happen after all the suspected pipelines are destroyed //Note: nothing else can bump the refcount since the guard is locked exclusively //Note: same BGL can appear multiple times in the list, but only the last // encounter could drop the refcount to 0. - if guard[id].multi_ref_count.dec_and_check_empty() { + if bind_group_layouts_locked.is_unique(id.0).unwrap() { log::debug!("Bind group layout {:?} will be destroyed", id); #[cfg(feature = "trace")] if let Some(t) = trace { - t.lock().add(trace::Action::DestroyBindGroupLayout(id.0)); + t.lock().add(trace::Action::DestroyBindGroupLayout(id)); } - if let Some(lay) = hub.bind_group_layouts.unregister_locked(id.0, &mut *guard) { - self.free_resources.bind_group_layouts.push(lay.raw); + if let Some(lay) = hub + .bind_group_layouts + .unregister_locked(id.0, &mut *bind_group_layouts_locked) + { + self.free_resources.bind_group_layouts.push(lay); } } } } if !self.suspected_resources.query_sets.is_empty() { - let (mut guard, _) = hub.query_sets.write(token); + let mut query_sets_locked = hub.query_sets.write(); let mut trackers = trackers.lock(); - for id in self.suspected_resources.query_sets.drain(..) { + for query_set in self.suspected_resources.query_sets.drain(..) { + let id = query_set.info.id(); if trackers.query_sets.remove_abandoned(id) { log::debug!("Query set {:?} will be destroyed", id); // #[cfg(feature = "trace")] // trace.map(|t| t.lock().add(trace::Action::DestroyComputePipeline(id.0))); - if let Some(res) = hub.query_sets.unregister_locked(id.0, &mut *guard) { - let submit_index = res.life_guard.life_count(); + if let Some(res) = hub + .query_sets + .unregister_locked(id.0, &mut *query_sets_locked) + { + let submit_index = res.info.submission_index(); self.active .iter_mut() .find(|a| a.index == submit_index) .map_or(&mut self.free_resources, |a| &mut a.last_resources) .query_sets - .push(res.raw); + .push(res); } } } @@ -798,24 +888,16 @@ impl LifetimeTracker { /// GPU. /// /// See the documentation for [`LifetimeTracker`] for details. - pub(super) fn triage_mapped( - &mut self, - hub: &Hub, - token: &mut Token>, - ) { + pub(crate) fn triage_mapped(&mut self) { if self.mapped.is_empty() { return; } - let (buffer_guard, _) = hub.buffers.read(token); - - for stored in self.mapped.drain(..) { - let resource_id = stored.value; - let buf = &buffer_guard[resource_id]; - let submit_index = buf.life_guard.life_count(); + for buffer in self.mapped.drain(..) { + let submit_index = buffer.info.submission_index(); log::trace!( "Mapping of {:?} at submission {:?} gets assigned to active {:?}", - resource_id, + buffer.info.id(), submit_index, self.active.iter().position(|a| a.index == submit_index) ); @@ -824,7 +906,7 @@ impl LifetimeTracker { .iter_mut() .find(|a| a.index == submit_index) .map_or(&mut self.ready_to_map, |a| &mut a.mapped) - .push(resource_id); + .push(buffer); } } @@ -834,35 +916,33 @@ impl LifetimeTracker { /// /// See the documentation for [`LifetimeTracker`] for details. #[must_use] - pub(super) fn handle_mapping( + pub(crate) fn handle_mapping( &mut self, hub: &Hub, raw: &A::Device, trackers: &Mutex>, - token: &mut Token>, ) -> Vec { if self.ready_to_map.is_empty() { return Vec::new(); } - let (mut buffer_guard, _) = hub.buffers.write(token); + let mut buffers_locked = hub.buffers.write(); let mut pending_callbacks: Vec = Vec::with_capacity(self.ready_to_map.len()); let mut trackers = trackers.lock(); - for buffer_id in self.ready_to_map.drain(..) { - let buffer = &mut buffer_guard[buffer_id]; - if buffer.life_guard.ref_count.is_none() && trackers.buffers.remove_abandoned(buffer_id) - { - buffer.map_state = resource::BufferMapState::Idle; + for buffer in self.ready_to_map.drain(..) { + let buffer_id = buffer.info.id(); + if buffer.is_unique() && trackers.buffers.remove_abandoned(buffer_id) { + *buffer.map_state.lock() = resource::BufferMapState::Idle; log::debug!("Mapping request is dropped because the buffer is destroyed."); if let Some(buf) = hub .buffers - .unregister_locked(buffer_id.0, &mut *buffer_guard) + .unregister_locked(buffer_id.0, &mut *buffers_locked) { - self.free_resources.buffers.extend(buf.raw); + self.free_resources.buffers.push(buf); } } else { let mapping = match std::mem::replace( - &mut buffer.map_state, + &mut *buffer.map_state.lock(), resource::BufferMapState::Idle, ) { resource::BufferMapState::Waiting(pending_mapping) => pending_mapping, @@ -871,7 +951,7 @@ impl LifetimeTracker { // Mapping queued at least twice by map -> unmap -> map // and was already successfully mapped below active @ resource::BufferMapState::Active { .. } => { - buffer.map_state = active; + *buffer.map_state.lock() = active; continue; } _ => panic!("No pending mapping."), @@ -880,9 +960,9 @@ impl LifetimeTracker { log::debug!("Buffer {:?} map state -> Active", buffer_id); let host = mapping.op.host; let size = mapping.range.end - mapping.range.start; - match super::map_buffer(raw, buffer, mapping.range.start, size, host) { + match super::map_buffer(raw, &buffer, mapping.range.start, size, host) { Ok(ptr) => { - buffer.map_state = resource::BufferMapState::Active { + *buffer.map_state.lock() = resource::BufferMapState::Active { ptr, range: mapping.range.start..mapping.range.start + size, host, @@ -895,7 +975,7 @@ impl LifetimeTracker { } } } else { - buffer.map_state = resource::BufferMapState::Active { + *buffer.map_state.lock() = resource::BufferMapState::Active { ptr: std::ptr::NonNull::dangling(), range: mapping.range, host: mapping.op.host, diff --git a/wgpu-core/src/device/mod.rs b/wgpu-core/src/device/mod.rs index 1b35d862e9..60fab2777f 100644 --- a/wgpu-core/src/device/mod.rs +++ b/wgpu-core/src/device/mod.rs @@ -1,35 +1,30 @@ use crate::{ - binding_model, command, conv, + binding_model, device::life::WaitIdleError, - hub::{Global, GlobalIdentityHandlerFactory, HalApi, Hub, Input, InvalidId, Storage, Token}, - id, - init_tracker::{ - BufferInitTracker, BufferInitTrackerAction, MemoryInitKind, TextureInitRange, - TextureInitTracker, TextureInitTrackerAction, - }, - instance::{self, Adapter, Surface}, - pipeline, present, - resource::{self, BufferAccessResult, BufferMapState, TextureViewNotRenderableReason}, + hal_api::HalApi, + hub::Hub, + id::{self}, + identity::{GlobalIdentityHandlerFactory, Input}, + resource::{Buffer, BufferAccessResult}, resource::{BufferAccessError, BufferMapOperation}, - track::{BindGroupStates, TextureSelector, Tracker}, - validation::{self, check_buffer_usage, check_texture_usage}, - FastHashMap, Label, LabelHelpers as _, LifeGuard, MultiRefCount, RefCount, Stored, - SubmissionIndex, DOWNLEVEL_ERROR_MESSAGE, + Label, DOWNLEVEL_ERROR_MESSAGE, }; use arrayvec::ArrayVec; -use hal::{CommandEncoder as _, Device as _}; -use parking_lot::{Mutex, MutexGuard}; +use hal::Device as _; use smallvec::SmallVec; use thiserror::Error; -use wgt::{BufferAddress, TextureFormat, TextureViewDimension}; +use wgt::{BufferAddress, TextureFormat}; -use std::{borrow::Cow, iter, mem, num::NonZeroU32, ops::Range, ptr}; +use std::{iter, num::NonZeroU32, ptr}; +pub mod device; +pub mod global; mod life; pub mod queue; #[cfg(any(feature = "trace", feature = "replay"))] pub mod trace; +pub use device::Device; pub const SHADER_STAGE_COUNT: usize = 3; // Should be large enough for the largest possible texture row. This @@ -194,9 +189,9 @@ impl UserClosures { } } -fn map_buffer( +fn map_buffer( raw: &A::Device, - buffer: &mut resource::Buffer, + buffer: &Buffer, offset: BufferAddress, size: BufferAddress, kind: HostMap, @@ -206,7 +201,7 @@ fn map_buffer( .map_err(DeviceError::from)? }; - buffer.sync_mapped_writes = match kind { + *buffer.sync_mapped_writes.lock() = match kind { HostMap::Read if !mapping.is_coherent => unsafe { raw.invalidate_mapped_ranges( buffer.raw.as_ref().unwrap(), @@ -235,10 +230,15 @@ fn map_buffer( // reasonable way as all data is pushed to GPU anyways. // No need to flush if it is flushed later anyways. - let zero_init_needs_flush_now = mapping.is_coherent && buffer.sync_mapped_writes.is_none(); + let zero_init_needs_flush_now = + mapping.is_coherent && buffer.sync_mapped_writes.lock().is_none(); let mapped = unsafe { std::slice::from_raw_parts_mut(mapping.ptr.as_ptr(), size as usize) }; - for uninitialized in buffer.initialization_status.drain(offset..(size + offset)) { + for uninitialized in buffer + .initialization_status + .write() + .drain(offset..(size + offset)) + { // The mapping's pointer is already offset, however we track the // uninitialized range relative to the buffer's start. let fill_range = @@ -255,11 +255,11 @@ fn map_buffer( Ok(mapping.ptr) } -struct CommandAllocator { +pub(crate) struct CommandAllocator { free_encoders: Vec, } -impl CommandAllocator { +impl CommandAllocator { fn acquire_encoder( &mut self, device: &A::Device, @@ -288,5778 +288,62 @@ impl CommandAllocator { } } -/// Structure describing a logical device. Some members are internally mutable, -/// stored behind mutexes. -/// -/// TODO: establish clear order of locking for these: -/// `mem_allocator`, `desc_allocator`, `life_tracker`, `trackers`, -/// `render_passes`, `pending_writes`, `trace`. -/// -/// Currently, the rules are: -/// 1. `life_tracker` is locked after `hub.devices`, enforced by the type system -/// 1. `self.trackers` is locked last (unenforced) -/// 1. `self.trace` is locked last (unenforced) -pub struct Device { - pub(crate) raw: A::Device, - pub(crate) adapter_id: Stored, - pub(crate) queue: A::Queue, - pub(crate) zero_buffer: A::Buffer, - //pub(crate) cmd_allocator: command::CommandAllocator, - //mem_allocator: Mutex>, - //desc_allocator: Mutex>, - //Note: The submission index here corresponds to the last submission that is done. - pub(crate) life_guard: LifeGuard, - - /// A clone of `life_guard.ref_count`. - /// - /// Holding a separate clone of the `RefCount` here lets us tell whether the - /// device is referenced by other resources, even if `life_guard.ref_count` - /// was set to `None` by a call to `device_drop`. - ref_count: RefCount, - - command_allocator: Mutex>, - pub(crate) active_submission_index: SubmissionIndex, - fence: A::Fence, - - /// All live resources allocated with this [`Device`]. - /// - /// Has to be locked temporarily only (locked last) - pub(crate) trackers: Mutex>, - // Life tracker should be locked right after the device and before anything else. - life_tracker: Mutex>, - /// Temporary storage for resource management functions. Cleared at the end - /// of every call (unless an error occurs). - temp_suspected: life::SuspectedResources, - pub(crate) alignments: hal::Alignments, - pub(crate) limits: wgt::Limits, - pub(crate) features: wgt::Features, - pub(crate) downlevel: wgt::DownlevelCapabilities, - // TODO: move this behind another mutex. This would allow several methods to - // switch to borrow Device immutably, such as `write_buffer`, `write_texture`, - // and `buffer_unmap`. - pending_writes: queue::PendingWrites, - #[cfg(feature = "trace")] - pub(crate) trace: Option>, -} +#[derive(Clone, Debug, Error)] +#[error("Device is invalid")] +pub struct InvalidDevice; #[derive(Clone, Debug, Error)] -pub enum CreateDeviceError { +pub enum DeviceError { + #[error("Parent device is invalid")] + Invalid, + #[error("Parent device is lost")] + Lost, #[error("Not enough memory left")] OutOfMemory, - #[error("Failed to create internal buffer for initializing textures")] - FailedToCreateZeroBuffer(#[from] DeviceError), } -impl Device { - pub(crate) fn require_features(&self, feature: wgt::Features) -> Result<(), MissingFeatures> { - if self.features.contains(feature) { - Ok(()) - } else { - Err(MissingFeatures(feature)) - } - } - - pub(crate) fn require_downlevel_flags( - &self, - flags: wgt::DownlevelFlags, - ) -> Result<(), MissingDownlevelFlags> { - if self.downlevel.flags.contains(flags) { - Ok(()) - } else { - Err(MissingDownlevelFlags(flags)) +impl From for DeviceError { + fn from(error: hal::DeviceError) -> Self { + match error { + hal::DeviceError::Lost => DeviceError::Lost, + hal::DeviceError::OutOfMemory => DeviceError::OutOfMemory, } } } -impl Device { - pub(crate) fn new( - open: hal::OpenDevice, - adapter_id: Stored, - alignments: hal::Alignments, - downlevel: wgt::DownlevelCapabilities, - desc: &DeviceDescriptor, - trace_path: Option<&std::path::Path>, - ) -> Result { - #[cfg(not(feature = "trace"))] - if let Some(_) = trace_path { - log::error!("Feature 'trace' is not enabled"); - } - let fence = - unsafe { open.device.create_fence() }.map_err(|_| CreateDeviceError::OutOfMemory)?; - - let mut com_alloc = CommandAllocator { - free_encoders: Vec::new(), - }; - let pending_encoder = com_alloc - .acquire_encoder(&open.device, &open.queue) - .map_err(|_| CreateDeviceError::OutOfMemory)?; - let mut pending_writes = queue::PendingWrites::::new(pending_encoder); - - // Create zeroed buffer used for texture clears. - let zero_buffer = unsafe { - open.device - .create_buffer(&hal::BufferDescriptor { - label: Some("(wgpu internal) zero init buffer"), - size: ZERO_BUFFER_SIZE, - usage: hal::BufferUses::COPY_SRC | hal::BufferUses::COPY_DST, - memory_flags: hal::MemoryFlags::empty(), - }) - .map_err(DeviceError::from)? - }; - pending_writes.activate(); - unsafe { - pending_writes - .command_encoder - .transition_buffers(iter::once(hal::BufferBarrier { - buffer: &zero_buffer, - usage: hal::BufferUses::empty()..hal::BufferUses::COPY_DST, - })); - pending_writes - .command_encoder - .clear_buffer(&zero_buffer, 0..ZERO_BUFFER_SIZE); - pending_writes - .command_encoder - .transition_buffers(iter::once(hal::BufferBarrier { - buffer: &zero_buffer, - usage: hal::BufferUses::COPY_DST..hal::BufferUses::COPY_SRC, - })); - } - - let life_guard = LifeGuard::new(""); - let ref_count = life_guard.add_ref(); - Ok(Self { - raw: open.device, - adapter_id, - queue: open.queue, - zero_buffer, - life_guard, - ref_count, - command_allocator: Mutex::new(com_alloc), - active_submission_index: 0, - fence, - trackers: Mutex::new(Tracker::new()), - life_tracker: Mutex::new(life::LifetimeTracker::new()), - temp_suspected: life::SuspectedResources::default(), - #[cfg(feature = "trace")] - trace: trace_path.and_then(|path| match trace::Trace::new(path) { - Ok(mut trace) => { - trace.add(trace::Action::Init { - desc: desc.clone(), - backend: A::VARIANT, - }); - Some(Mutex::new(trace)) - } - Err(e) => { - log::error!("Unable to start a trace in '{:?}': {:?}", path, e); - None - } - }), - alignments, - limits: desc.limits.clone(), - features: desc.features, - downlevel, - pending_writes, - }) - } - - fn lock_life<'this, 'token: 'this>( - &'this self, - //TODO: fix this - the token has to be borrowed for the lock - _token: &mut Token<'token, Self>, - ) -> MutexGuard<'this, life::LifetimeTracker> { - self.life_tracker.lock() - } - - /// Check this device for completed commands. - /// - /// The `maintain` argument tells how the maintence function should behave, either - /// blocking or just polling the current state of the gpu. - /// - /// Return a pair `(closures, queue_empty)`, where: - /// - /// - `closures` is a list of actions to take: mapping buffers, notifying the user - /// - /// - `queue_empty` is a boolean indicating whether there are more queue - /// submissions still in flight. (We have to take the locks needed to - /// produce this information for other reasons, so we might as well just - /// return it to our callers.) - fn maintain<'this, 'token: 'this, G: GlobalIdentityHandlerFactory>( - &'this self, - hub: &Hub, - maintain: wgt::Maintain, - token: &mut Token<'token, Self>, - ) -> Result<(UserClosures, bool), WaitIdleError> { - profiling::scope!("Device::maintain"); - let mut life_tracker = self.lock_life(token); - - // Normally, `temp_suspected` exists only to save heap - // allocations: it's cleared at the start of the function - // call, and cleared by the end. But `Global::queue_submit` is - // fallible; if it exits early, it may leave some resources in - // `temp_suspected`. - life_tracker - .suspected_resources - .extend(&self.temp_suspected); - - life_tracker.triage_suspected( - hub, - &self.trackers, - #[cfg(feature = "trace")] - self.trace.as_ref(), - token, - ); - life_tracker.triage_mapped(hub, token); - - let last_done_index = if maintain.is_wait() { - let index_to_wait_for = match maintain { - wgt::Maintain::WaitForSubmissionIndex(submission_index) => { - // We don't need to check to see if the queue id matches - // as we already checked this from inside the poll call. - submission_index.index - } - _ => self.active_submission_index, - }; - unsafe { - self.raw - .wait(&self.fence, index_to_wait_for, CLEANUP_WAIT_MS) - .map_err(DeviceError::from)? - }; - index_to_wait_for - } else { - unsafe { - self.raw - .get_fence_value(&self.fence) - .map_err(DeviceError::from)? - } - }; - - let submission_closures = - life_tracker.triage_submissions(last_done_index, &self.command_allocator); - let mapping_closures = life_tracker.handle_mapping(hub, &self.raw, &self.trackers, token); - life_tracker.cleanup(&self.raw); - - let closures = UserClosures { - mappings: mapping_closures, - submissions: submission_closures, - }; - Ok((closures, life_tracker.queue_empty())) - } - - fn untrack<'this, 'token: 'this, G: GlobalIdentityHandlerFactory>( - &'this mut self, - hub: &Hub, - trackers: &Tracker, - token: &mut Token<'token, Self>, - ) { - self.temp_suspected.clear(); - // As the tracker is cleared/dropped, we need to consider all the resources - // that it references for destruction in the next GC pass. - { - let (bind_group_guard, mut token) = hub.bind_groups.read(token); - let (compute_pipe_guard, mut token) = hub.compute_pipelines.read(&mut token); - let (render_pipe_guard, mut token) = hub.render_pipelines.read(&mut token); - let (query_set_guard, mut token) = hub.query_sets.read(&mut token); - let (buffer_guard, mut token) = hub.buffers.read(&mut token); - let (texture_guard, mut token) = hub.textures.read(&mut token); - let (texture_view_guard, mut token) = hub.texture_views.read(&mut token); - let (sampler_guard, _) = hub.samplers.read(&mut token); - - for id in trackers.buffers.used() { - if buffer_guard[id].life_guard.ref_count.is_none() { - self.temp_suspected.buffers.push(id); - } - } - for id in trackers.textures.used() { - if texture_guard[id].life_guard.ref_count.is_none() { - self.temp_suspected.textures.push(id); - } - } - for id in trackers.views.used() { - if texture_view_guard[id].life_guard.ref_count.is_none() { - self.temp_suspected.texture_views.push(id); - } - } - for id in trackers.bind_groups.used() { - if bind_group_guard[id].life_guard.ref_count.is_none() { - self.temp_suspected.bind_groups.push(id); - } - } - for id in trackers.samplers.used() { - if sampler_guard[id].life_guard.ref_count.is_none() { - self.temp_suspected.samplers.push(id); - } - } - for id in trackers.compute_pipelines.used() { - if compute_pipe_guard[id].life_guard.ref_count.is_none() { - self.temp_suspected.compute_pipelines.push(id); - } - } - for id in trackers.render_pipelines.used() { - if render_pipe_guard[id].life_guard.ref_count.is_none() { - self.temp_suspected.render_pipelines.push(id); - } - } - for id in trackers.query_sets.used() { - if query_set_guard[id].life_guard.ref_count.is_none() { - self.temp_suspected.query_sets.push(id); - } - } - } - - self.lock_life(token) - .suspected_resources - .extend(&self.temp_suspected); - - self.temp_suspected.clear(); - } - - fn create_buffer( - &self, - self_id: id::DeviceId, - desc: &resource::BufferDescriptor, - transient: bool, - ) -> Result, resource::CreateBufferError> { - debug_assert_eq!(self_id.backend(), A::VARIANT); - - if desc.size > self.limits.max_buffer_size { - return Err(resource::CreateBufferError::MaxBufferSize { - requested: desc.size, - maximum: self.limits.max_buffer_size, - }); - } - - if desc.usage.contains(wgt::BufferUsages::INDEX) - && desc.usage.contains( - wgt::BufferUsages::VERTEX - | wgt::BufferUsages::UNIFORM - | wgt::BufferUsages::INDIRECT - | wgt::BufferUsages::STORAGE, - ) - { - self.require_downlevel_flags(wgt::DownlevelFlags::UNRESTRICTED_INDEX_BUFFER)?; - } - - let mut usage = conv::map_buffer_usage(desc.usage); - - if desc.usage.is_empty() || desc.usage.contains_invalid_bits() { - return Err(resource::CreateBufferError::InvalidUsage(desc.usage)); - } - - if !self - .features - .contains(wgt::Features::MAPPABLE_PRIMARY_BUFFERS) - { - use wgt::BufferUsages as Bu; - let write_mismatch = desc.usage.contains(Bu::MAP_WRITE) - && !(Bu::MAP_WRITE | Bu::COPY_SRC).contains(desc.usage); - let read_mismatch = desc.usage.contains(Bu::MAP_READ) - && !(Bu::MAP_READ | Bu::COPY_DST).contains(desc.usage); - if write_mismatch || read_mismatch { - return Err(resource::CreateBufferError::UsageMismatch(desc.usage)); - } - } - - if desc.mapped_at_creation { - if desc.size % wgt::COPY_BUFFER_ALIGNMENT != 0 { - return Err(resource::CreateBufferError::UnalignedSize); - } - if !desc.usage.contains(wgt::BufferUsages::MAP_WRITE) { - // we are going to be copying into it, internally - usage |= hal::BufferUses::COPY_DST; - } - } else { - // We are required to zero out (initialize) all memory. This is done - // on demand using clear_buffer which requires write transfer usage! - usage |= hal::BufferUses::COPY_DST; - } - - let actual_size = if desc.size == 0 { - wgt::COPY_BUFFER_ALIGNMENT - } else if desc.usage.contains(wgt::BufferUsages::VERTEX) { - // Bumping the size by 1 so that we can bind an empty range at the - // end of the buffer. - desc.size + 1 - } else { - desc.size - }; - let clear_remainder = actual_size % wgt::COPY_BUFFER_ALIGNMENT; - let aligned_size = if clear_remainder != 0 { - actual_size + wgt::COPY_BUFFER_ALIGNMENT - clear_remainder - } else { - actual_size - }; - - let mut memory_flags = hal::MemoryFlags::empty(); - memory_flags.set(hal::MemoryFlags::TRANSIENT, transient); - - let hal_desc = hal::BufferDescriptor { - label: desc.label.borrow_option(), - size: aligned_size, - usage, - memory_flags, - }; - let buffer = unsafe { self.raw.create_buffer(&hal_desc) }.map_err(DeviceError::from)?; - - Ok(resource::Buffer { - raw: Some(buffer), - device_id: Stored { - value: id::Valid(self_id), - ref_count: self.life_guard.add_ref(), - }, - usage: desc.usage, - size: desc.size, - initialization_status: BufferInitTracker::new(desc.size), - sync_mapped_writes: None, - map_state: resource::BufferMapState::Idle, - life_guard: LifeGuard::new(desc.label.borrow_or_default()), - }) - } - - fn create_texture_from_hal( - &self, - hal_texture: A::Texture, - hal_usage: hal::TextureUses, - self_id: id::DeviceId, - desc: &resource::TextureDescriptor, - format_features: wgt::TextureFormatFeatures, - clear_mode: resource::TextureClearMode, - ) -> resource::Texture { - debug_assert_eq!(self_id.backend(), A::VARIANT); - - resource::Texture { - inner: resource::TextureInner::Native { - raw: Some(hal_texture), - }, - device_id: Stored { - value: id::Valid(self_id), - ref_count: self.life_guard.add_ref(), - }, - desc: desc.map_label(|_| ()), - hal_usage, - format_features, - initialization_status: TextureInitTracker::new( - desc.mip_level_count, - desc.array_layer_count(), - ), - full_range: TextureSelector { - mips: 0..desc.mip_level_count, - layers: 0..desc.array_layer_count(), - }, - life_guard: LifeGuard::new(desc.label.borrow_or_default()), - clear_mode, - } - } - - fn create_texture( - &self, - self_id: id::DeviceId, - adapter: &Adapter, - desc: &resource::TextureDescriptor, - ) -> Result, resource::CreateTextureError> { - use resource::{CreateTextureError, TextureDimensionError}; - - if desc.usage.is_empty() || desc.usage.contains_invalid_bits() { - return Err(CreateTextureError::InvalidUsage(desc.usage)); - } - - conv::check_texture_dimension_size( - desc.dimension, - desc.size, - desc.sample_count, - &self.limits, - )?; - - if desc.dimension != wgt::TextureDimension::D2 { - // Depth textures can only be 2D - if desc.format.is_depth_stencil_format() { - return Err(CreateTextureError::InvalidDepthDimension( - desc.dimension, - desc.format, - )); - } - // Renderable textures can only be 2D - if desc.usage.contains(wgt::TextureUsages::RENDER_ATTACHMENT) { - return Err(CreateTextureError::InvalidDimensionUsages( - wgt::TextureUsages::RENDER_ATTACHMENT, - desc.dimension, - )); - } - - // Compressed textures can only be 2D - if desc.format.is_compressed() { - return Err(CreateTextureError::InvalidCompressedDimension( - desc.dimension, - desc.format, - )); - } - } - - if desc.format.is_compressed() { - let (block_width, block_height) = desc.format.block_dimensions(); - - if desc.size.width % block_width != 0 { - return Err(CreateTextureError::InvalidDimension( - TextureDimensionError::NotMultipleOfBlockWidth { - width: desc.size.width, - block_width, - format: desc.format, - }, - )); - } - - if desc.size.height % block_height != 0 { - return Err(CreateTextureError::InvalidDimension( - TextureDimensionError::NotMultipleOfBlockHeight { - height: desc.size.height, - block_height, - format: desc.format, - }, - )); - } - } - - let format_features = self - .describe_format_features(adapter, desc.format) - .map_err(|error| CreateTextureError::MissingFeatures(desc.format, error))?; - - if desc.sample_count > 1 { - if desc.mip_level_count != 1 { - return Err(CreateTextureError::InvalidMipLevelCount { - requested: desc.mip_level_count, - maximum: 1, - }); - } - - if desc.size.depth_or_array_layers != 1 { - return Err(CreateTextureError::InvalidDimension( - TextureDimensionError::MultisampledDepthOrArrayLayer( - desc.size.depth_or_array_layers, - ), - )); - } - - if desc.usage.contains(wgt::TextureUsages::STORAGE_BINDING) { - return Err(CreateTextureError::InvalidMultisampledStorageBinding); - } - - if !desc.usage.contains(wgt::TextureUsages::RENDER_ATTACHMENT) { - return Err(CreateTextureError::MultisampledNotRenderAttachment); - } - - if !format_features.flags.intersects( - wgt::TextureFormatFeatureFlags::MULTISAMPLE_X4 - | wgt::TextureFormatFeatureFlags::MULTISAMPLE_X2 - | wgt::TextureFormatFeatureFlags::MULTISAMPLE_X8 - | wgt::TextureFormatFeatureFlags::MULTISAMPLE_X16, - ) { - return Err(CreateTextureError::InvalidMultisampledFormat(desc.format)); - } - - if !format_features - .flags - .sample_count_supported(desc.sample_count) - { - return Err(CreateTextureError::InvalidSampleCount( - desc.sample_count, - desc.format, - )); - }; - } - - let mips = desc.mip_level_count; - let max_levels_allowed = desc.size.max_mips(desc.dimension).min(hal::MAX_MIP_LEVELS); - if mips == 0 || mips > max_levels_allowed { - return Err(CreateTextureError::InvalidMipLevelCount { - requested: mips, - maximum: max_levels_allowed, - }); - } - - let missing_allowed_usages = desc.usage - format_features.allowed_usages; - if !missing_allowed_usages.is_empty() { - // detect downlevel incompatibilities - let wgpu_allowed_usages = desc.format.guaranteed_format_features().allowed_usages; - let wgpu_missing_usages = desc.usage - wgpu_allowed_usages; - return Err(CreateTextureError::InvalidFormatUsages( - missing_allowed_usages, - desc.format, - wgpu_missing_usages.is_empty(), - )); - } - - let mut hal_view_formats = vec![]; - for format in desc.view_formats.iter() { - if desc.format == *format { - continue; - } - if desc.format.remove_srgb_suffix() != format.remove_srgb_suffix() { - return Err(CreateTextureError::InvalidViewFormat(*format, desc.format)); - } - hal_view_formats.push(*format); - } - if !hal_view_formats.is_empty() { - self.require_downlevel_flags(wgt::DownlevelFlags::VIEW_FORMATS)?; - } - - // Enforce having COPY_DST/DEPTH_STENCIL_WRITE/COLOR_TARGET otherwise we - // wouldn't be able to initialize the texture. - let hal_usage = conv::map_texture_usage(desc.usage, desc.format.into()) - | if desc.format.is_depth_stencil_format() { - hal::TextureUses::DEPTH_STENCIL_WRITE - } else if desc.usage.contains(wgt::TextureUsages::COPY_DST) { - hal::TextureUses::COPY_DST // (set already) - } else { - // Use COPY_DST only if we can't use COLOR_TARGET - if format_features - .allowed_usages - .contains(wgt::TextureUsages::RENDER_ATTACHMENT) - && desc.dimension == wgt::TextureDimension::D2 - // Render targets dimension must be 2d - { - hal::TextureUses::COLOR_TARGET - } else { - hal::TextureUses::COPY_DST - } - }; - - let hal_desc = hal::TextureDescriptor { - label: desc.label.borrow_option(), - size: desc.size, - mip_level_count: desc.mip_level_count, - sample_count: desc.sample_count, - dimension: desc.dimension, - format: desc.format, - usage: hal_usage, - memory_flags: hal::MemoryFlags::empty(), - view_formats: hal_view_formats, - }; - - let raw_texture = unsafe { - self.raw - .create_texture(&hal_desc) - .map_err(DeviceError::from)? - }; - - let clear_mode = if hal_usage - .intersects(hal::TextureUses::DEPTH_STENCIL_WRITE | hal::TextureUses::COLOR_TARGET) - { - let (is_color, usage) = if desc.format.is_depth_stencil_format() { - (false, hal::TextureUses::DEPTH_STENCIL_WRITE) - } else { - (true, hal::TextureUses::COLOR_TARGET) - }; - let dimension = match desc.dimension { - wgt::TextureDimension::D1 => wgt::TextureViewDimension::D1, - wgt::TextureDimension::D2 => wgt::TextureViewDimension::D2, - wgt::TextureDimension::D3 => unreachable!(), - }; - - let mut clear_views = SmallVec::new(); - for mip_level in 0..desc.mip_level_count { - for array_layer in 0..desc.size.depth_or_array_layers { - let desc = hal::TextureViewDescriptor { - label: Some("(wgpu internal) clear texture view"), - format: desc.format, - dimension, - usage, - range: wgt::ImageSubresourceRange { - aspect: wgt::TextureAspect::All, - base_mip_level: mip_level, - mip_level_count: Some(1), - base_array_layer: array_layer, - array_layer_count: Some(1), - }, - }; - clear_views.push( - unsafe { self.raw.create_texture_view(&raw_texture, &desc) } - .map_err(DeviceError::from)?, - ); - } - } - resource::TextureClearMode::RenderPass { - clear_views, - is_color, - } - } else { - resource::TextureClearMode::BufferCopy - }; - - let mut texture = self.create_texture_from_hal( - raw_texture, - hal_usage, - self_id, - desc, - format_features, - clear_mode, - ); - texture.hal_usage = hal_usage; - Ok(texture) - } - - fn create_texture_view( - &self, - texture: &resource::Texture, - texture_id: id::TextureId, - desc: &resource::TextureViewDescriptor, - ) -> Result, resource::CreateTextureViewError> { - let texture_raw = texture - .inner - .as_raw() - .ok_or(resource::CreateTextureViewError::InvalidTexture)?; - - // resolve TextureViewDescriptor defaults - // https://gpuweb.github.io/gpuweb/#abstract-opdef-resolving-gputextureviewdescriptor-defaults - - let resolved_format = desc.format.unwrap_or_else(|| { - texture - .desc - .format - .aspect_specific_format(desc.range.aspect) - .unwrap_or(texture.desc.format) - }); - - let resolved_dimension = desc - .dimension - .unwrap_or_else(|| match texture.desc.dimension { - wgt::TextureDimension::D1 => wgt::TextureViewDimension::D1, - wgt::TextureDimension::D2 => { - if texture.desc.array_layer_count() == 1 { - wgt::TextureViewDimension::D2 - } else { - wgt::TextureViewDimension::D2Array - } - } - wgt::TextureDimension::D3 => wgt::TextureViewDimension::D3, - }); - - let resolved_mip_level_count = desc.range.mip_level_count.unwrap_or_else(|| { - texture - .desc - .mip_level_count - .saturating_sub(desc.range.base_mip_level) - }); - - let resolved_array_layer_count = - desc.range - .array_layer_count - .unwrap_or_else(|| match resolved_dimension { - wgt::TextureViewDimension::D1 - | wgt::TextureViewDimension::D2 - | wgt::TextureViewDimension::D3 => 1, - wgt::TextureViewDimension::Cube => 6, - wgt::TextureViewDimension::D2Array | wgt::TextureViewDimension::CubeArray => { - texture - .desc - .array_layer_count() - .saturating_sub(desc.range.base_array_layer) - } - }); - - // validate TextureViewDescriptor - - let aspects = hal::FormatAspects::new(texture.desc.format, desc.range.aspect); - if aspects.is_empty() { - return Err(resource::CreateTextureViewError::InvalidAspect { - texture_format: texture.desc.format, - requested_aspect: desc.range.aspect, - }); - } - - let format_is_good = if desc.range.aspect == wgt::TextureAspect::All { - resolved_format == texture.desc.format - || texture.desc.view_formats.contains(&resolved_format) - } else { - Some(resolved_format) - == texture - .desc - .format - .aspect_specific_format(desc.range.aspect) - }; - if !format_is_good { - return Err(resource::CreateTextureViewError::FormatReinterpretation { - texture: texture.desc.format, - view: resolved_format, - }); - } - - // check if multisampled texture is seen as anything but 2D - if texture.desc.sample_count > 1 && resolved_dimension != wgt::TextureViewDimension::D2 { - return Err( - resource::CreateTextureViewError::InvalidMultisampledTextureViewDimension( - resolved_dimension, - ), - ); - } - - // check if the dimension is compatible with the texture - if texture.desc.dimension != resolved_dimension.compatible_texture_dimension() { - return Err( - resource::CreateTextureViewError::InvalidTextureViewDimension { - view: resolved_dimension, - texture: texture.desc.dimension, - }, - ); - } - - match resolved_dimension { - TextureViewDimension::D1 | TextureViewDimension::D2 | TextureViewDimension::D3 => { - if resolved_array_layer_count != 1 { - return Err(resource::CreateTextureViewError::InvalidArrayLayerCount { - requested: resolved_array_layer_count, - dim: resolved_dimension, - }); - } - } - TextureViewDimension::Cube => { - if resolved_array_layer_count != 6 { - return Err( - resource::CreateTextureViewError::InvalidCubemapTextureDepth { - depth: resolved_array_layer_count, - }, - ); - } - } - TextureViewDimension::CubeArray => { - if resolved_array_layer_count % 6 != 0 { - return Err( - resource::CreateTextureViewError::InvalidCubemapArrayTextureDepth { - depth: resolved_array_layer_count, - }, - ); - } - } - _ => {} - } - - match resolved_dimension { - TextureViewDimension::Cube | TextureViewDimension::CubeArray => { - if texture.desc.size.width != texture.desc.size.height { - return Err(resource::CreateTextureViewError::InvalidCubeTextureViewSize); - } - } - _ => {} - } - - if resolved_mip_level_count == 0 { - return Err(resource::CreateTextureViewError::ZeroMipLevelCount); - } - - let mip_level_end = desc - .range - .base_mip_level - .saturating_add(resolved_mip_level_count); - - let level_end = texture.desc.mip_level_count; - if mip_level_end > level_end { - return Err(resource::CreateTextureViewError::TooManyMipLevels { - requested: mip_level_end, - total: level_end, - }); - } - - if resolved_array_layer_count == 0 { - return Err(resource::CreateTextureViewError::ZeroArrayLayerCount); - } - - let array_layer_end = desc - .range - .base_array_layer - .saturating_add(resolved_array_layer_count); - - let layer_end = texture.desc.array_layer_count(); - if array_layer_end > layer_end { - return Err(resource::CreateTextureViewError::TooManyArrayLayers { - requested: array_layer_end, - total: layer_end, - }); - }; - - // https://gpuweb.github.io/gpuweb/#abstract-opdef-renderable-texture-view - let render_extent = 'b: loop { - if !texture - .desc - .usage - .contains(wgt::TextureUsages::RENDER_ATTACHMENT) - { - break 'b Err(TextureViewNotRenderableReason::Usage(texture.desc.usage)); - } - - if resolved_dimension != TextureViewDimension::D2 { - break 'b Err(TextureViewNotRenderableReason::Dimension( - resolved_dimension, - )); - } - - if resolved_mip_level_count != 1 { - break 'b Err(TextureViewNotRenderableReason::MipLevelCount( - resolved_mip_level_count, - )); - } - - if resolved_array_layer_count != 1 { - break 'b Err(TextureViewNotRenderableReason::ArrayLayerCount( - resolved_array_layer_count, - )); - } - - if aspects != hal::FormatAspects::from(texture.desc.format) { - break 'b Err(TextureViewNotRenderableReason::Aspects(aspects)); - } - - break 'b Ok(texture - .desc - .compute_render_extent(desc.range.base_mip_level)); - }; - - // filter the usages based on the other criteria - let usage = { - let mask_copy = !(hal::TextureUses::COPY_SRC | hal::TextureUses::COPY_DST); - let mask_dimension = match resolved_dimension { - wgt::TextureViewDimension::Cube | wgt::TextureViewDimension::CubeArray => { - hal::TextureUses::RESOURCE - } - wgt::TextureViewDimension::D3 => { - hal::TextureUses::RESOURCE - | hal::TextureUses::STORAGE_READ - | hal::TextureUses::STORAGE_READ_WRITE - } - _ => hal::TextureUses::all(), - }; - let mask_mip_level = if resolved_mip_level_count == 1 { - hal::TextureUses::all() - } else { - hal::TextureUses::RESOURCE - }; - texture.hal_usage & mask_copy & mask_dimension & mask_mip_level - }; - - log::debug!( - "Create view for texture {:?} filters usages to {:?}", - texture_id, - usage - ); - - // use the combined depth-stencil format for the view - let format = if resolved_format.is_depth_stencil_component(texture.desc.format) { - texture.desc.format - } else { - resolved_format - }; - - let resolved_range = wgt::ImageSubresourceRange { - aspect: desc.range.aspect, - base_mip_level: desc.range.base_mip_level, - mip_level_count: Some(resolved_mip_level_count), - base_array_layer: desc.range.base_array_layer, - array_layer_count: Some(resolved_array_layer_count), - }; - - let hal_desc = hal::TextureViewDescriptor { - label: desc.label.borrow_option(), - format, - dimension: resolved_dimension, - usage, - range: resolved_range, - }; - - let raw = unsafe { - self.raw - .create_texture_view(texture_raw, &hal_desc) - .map_err(|_| resource::CreateTextureViewError::OutOfMemory)? - }; - - let selector = TextureSelector { - mips: desc.range.base_mip_level..mip_level_end, - layers: desc.range.base_array_layer..array_layer_end, - }; - - Ok(resource::TextureView { - raw, - parent_id: Stored { - value: id::Valid(texture_id), - ref_count: texture.life_guard.add_ref(), - }, - device_id: texture.device_id.clone(), - desc: resource::HalTextureViewDescriptor { - format: resolved_format, - dimension: resolved_dimension, - range: resolved_range, - }, - format_features: texture.format_features, - render_extent, - samples: texture.desc.sample_count, - selector, - life_guard: LifeGuard::new(desc.label.borrow_or_default()), - }) - } - - fn create_sampler( - &self, - self_id: id::DeviceId, - desc: &resource::SamplerDescriptor, - ) -> Result, resource::CreateSamplerError> { - if desc - .address_modes - .iter() - .any(|am| am == &wgt::AddressMode::ClampToBorder) - { - self.require_features(wgt::Features::ADDRESS_MODE_CLAMP_TO_BORDER)?; - } +#[derive(Clone, Debug, Error)] +#[error("Features {0:?} are required but not enabled on the device")] +pub struct MissingFeatures(pub wgt::Features); - if desc.border_color == Some(wgt::SamplerBorderColor::Zero) { - self.require_features(wgt::Features::ADDRESS_MODE_CLAMP_TO_ZERO)?; - } +#[derive(Clone, Debug, Error)] +#[error( + "Downlevel flags {0:?} are required but not supported on the device.\n{}", + DOWNLEVEL_ERROR_MESSAGE +)] +pub struct MissingDownlevelFlags(pub wgt::DownlevelFlags); - if desc.lod_min_clamp < 0.0 { - return Err(resource::CreateSamplerError::InvalidLodMinClamp( - desc.lod_min_clamp, - )); - } - if desc.lod_max_clamp < desc.lod_min_clamp { - return Err(resource::CreateSamplerError::InvalidLodMaxClamp { - lod_min_clamp: desc.lod_min_clamp, - lod_max_clamp: desc.lod_max_clamp, - }); - } +#[derive(Clone, Debug)] +#[cfg_attr(feature = "trace", derive(serde::Serialize))] +#[cfg_attr(feature = "replay", derive(serde::Deserialize))] +pub struct ImplicitPipelineContext { + pub root_id: id::PipelineLayoutId, + pub group_ids: ArrayVec, +} - if desc.anisotropy_clamp < 1 { - return Err(resource::CreateSamplerError::InvalidAnisotropy( - desc.anisotropy_clamp, - )); - } +pub struct ImplicitPipelineIds<'a, G: GlobalIdentityHandlerFactory> { + pub root_id: Input, + pub group_ids: &'a [Input], +} - if desc.anisotropy_clamp != 1 { - if !matches!(desc.min_filter, wgt::FilterMode::Linear) { - return Err( - resource::CreateSamplerError::InvalidFilterModeWithAnisotropy { - filter_type: resource::SamplerFilterErrorType::MinFilter, - filter_mode: desc.min_filter, - anisotropic_clamp: desc.anisotropy_clamp, - }, - ); - } - if !matches!(desc.mag_filter, wgt::FilterMode::Linear) { - return Err( - resource::CreateSamplerError::InvalidFilterModeWithAnisotropy { - filter_type: resource::SamplerFilterErrorType::MagFilter, - filter_mode: desc.mag_filter, - anisotropic_clamp: desc.anisotropy_clamp, - }, - ); - } - if !matches!(desc.mipmap_filter, wgt::FilterMode::Linear) { - return Err( - resource::CreateSamplerError::InvalidFilterModeWithAnisotropy { - filter_type: resource::SamplerFilterErrorType::MipmapFilter, - filter_mode: desc.mipmap_filter, - anisotropic_clamp: desc.anisotropy_clamp, - }, - ); - } +impl ImplicitPipelineIds<'_, G> { + fn prepare(self, hub: &Hub) -> ImplicitPipelineContext { + ImplicitPipelineContext { + root_id: hub.pipeline_layouts.prepare(self.root_id).into_id(), + group_ids: self + .group_ids + .iter() + .map(|id_in| hub.bind_group_layouts.prepare(id_in.clone()).into_id()) + .collect(), } - - let anisotropy_clamp = if self - .downlevel - .flags - .contains(wgt::DownlevelFlags::ANISOTROPIC_FILTERING) - { - // Clamp anisotropy clamp to [1, 16] per the wgpu-hal interface - desc.anisotropy_clamp.min(16) - } else { - // If it isn't supported, set this unconditionally to 1 - 1 - }; - - //TODO: check for wgt::DownlevelFlags::COMPARISON_SAMPLERS - - let hal_desc = hal::SamplerDescriptor { - label: desc.label.borrow_option(), - address_modes: desc.address_modes, - mag_filter: desc.mag_filter, - min_filter: desc.min_filter, - mipmap_filter: desc.mipmap_filter, - lod_clamp: desc.lod_min_clamp..desc.lod_max_clamp, - compare: desc.compare, - anisotropy_clamp, - border_color: desc.border_color, - }; - - let raw = unsafe { - self.raw - .create_sampler(&hal_desc) - .map_err(DeviceError::from)? - }; - Ok(resource::Sampler { - raw, - device_id: Stored { - value: id::Valid(self_id), - ref_count: self.life_guard.add_ref(), - }, - life_guard: LifeGuard::new(desc.label.borrow_or_default()), - comparison: desc.compare.is_some(), - filtering: desc.min_filter == wgt::FilterMode::Linear - || desc.mag_filter == wgt::FilterMode::Linear, - }) - } - - fn create_shader_module<'a>( - &self, - self_id: id::DeviceId, - desc: &pipeline::ShaderModuleDescriptor<'a>, - source: pipeline::ShaderModuleSource<'a>, - ) -> Result, pipeline::CreateShaderModuleError> { - let (module, source) = match source { - #[cfg(feature = "wgsl")] - pipeline::ShaderModuleSource::Wgsl(code) => { - profiling::scope!("naga::wgsl::parse_str"); - let module = naga::front::wgsl::parse_str(&code).map_err(|inner| { - pipeline::CreateShaderModuleError::Parsing(pipeline::ShaderError { - source: code.to_string(), - label: desc.label.as_ref().map(|l| l.to_string()), - inner: Box::new(inner), - }) - })?; - (Cow::Owned(module), code.into_owned()) - } - pipeline::ShaderModuleSource::Naga(module) => (module, String::new()), - pipeline::ShaderModuleSource::Dummy(_) => panic!("found `ShaderModuleSource::Dummy`"), - }; - for (_, var) in module.global_variables.iter() { - match var.binding { - Some(ref br) if br.group >= self.limits.max_bind_groups => { - return Err(pipeline::CreateShaderModuleError::InvalidGroupIndex { - bind: br.clone(), - group: br.group, - limit: self.limits.max_bind_groups, - }); - } - _ => continue, - }; - } - - use naga::valid::Capabilities as Caps; - profiling::scope!("naga::validate"); - - let mut caps = Caps::empty(); - caps.set( - Caps::PUSH_CONSTANT, - self.features.contains(wgt::Features::PUSH_CONSTANTS), - ); - caps.set( - Caps::FLOAT64, - self.features.contains(wgt::Features::SHADER_F64), - ); - caps.set( - Caps::PRIMITIVE_INDEX, - self.features - .contains(wgt::Features::SHADER_PRIMITIVE_INDEX), - ); - caps.set( - Caps::SAMPLED_TEXTURE_AND_STORAGE_BUFFER_ARRAY_NON_UNIFORM_INDEXING, - self.features.contains( - wgt::Features::SAMPLED_TEXTURE_AND_STORAGE_BUFFER_ARRAY_NON_UNIFORM_INDEXING, - ), - ); - caps.set( - Caps::UNIFORM_BUFFER_AND_STORAGE_TEXTURE_ARRAY_NON_UNIFORM_INDEXING, - self.features.contains( - wgt::Features::UNIFORM_BUFFER_AND_STORAGE_TEXTURE_ARRAY_NON_UNIFORM_INDEXING, - ), - ); - // TODO: This needs a proper wgpu feature - caps.set( - Caps::SAMPLER_NON_UNIFORM_INDEXING, - self.features.contains( - wgt::Features::SAMPLED_TEXTURE_AND_STORAGE_BUFFER_ARRAY_NON_UNIFORM_INDEXING, - ), - ); - caps.set( - Caps::STORAGE_TEXTURE_16BIT_NORM_FORMATS, - self.features - .contains(wgt::Features::TEXTURE_FORMAT_16BIT_NORM), - ); - caps.set( - Caps::MULTIVIEW, - self.features.contains(wgt::Features::MULTIVIEW), - ); - caps.set( - Caps::EARLY_DEPTH_TEST, - self.features - .contains(wgt::Features::SHADER_EARLY_DEPTH_TEST), - ); - caps.set( - Caps::MULTISAMPLED_SHADING, - self.downlevel - .flags - .contains(wgt::DownlevelFlags::MULTISAMPLED_SHADING), - ); - - let info = naga::valid::Validator::new(naga::valid::ValidationFlags::all(), caps) - .validate(&module) - .map_err(|inner| { - pipeline::CreateShaderModuleError::Validation(pipeline::ShaderError { - source, - label: desc.label.as_ref().map(|l| l.to_string()), - inner: Box::new(inner), - }) - })?; - let interface = - validation::Interface::new(&module, &info, self.features, self.limits.clone()); - let hal_shader = hal::ShaderInput::Naga(hal::NagaShader { module, info }); - - let hal_desc = hal::ShaderModuleDescriptor { - label: desc.label.borrow_option(), - runtime_checks: desc.shader_bound_checks.runtime_checks(), - }; - let raw = match unsafe { self.raw.create_shader_module(&hal_desc, hal_shader) } { - Ok(raw) => raw, - Err(error) => { - return Err(match error { - hal::ShaderError::Device(error) => { - pipeline::CreateShaderModuleError::Device(error.into()) - } - hal::ShaderError::Compilation(ref msg) => { - log::error!("Shader error: {}", msg); - pipeline::CreateShaderModuleError::Generation - } - }) - } - }; - - Ok(pipeline::ShaderModule { - raw, - device_id: Stored { - value: id::Valid(self_id), - ref_count: self.life_guard.add_ref(), - }, - interface: Some(interface), - #[cfg(debug_assertions)] - label: desc.label.borrow_or_default().to_string(), - }) - } - - #[allow(unused_unsafe)] - unsafe fn create_shader_module_spirv<'a>( - &self, - self_id: id::DeviceId, - desc: &pipeline::ShaderModuleDescriptor<'a>, - source: &'a [u32], - ) -> Result, pipeline::CreateShaderModuleError> { - self.require_features(wgt::Features::SPIRV_SHADER_PASSTHROUGH)?; - let hal_desc = hal::ShaderModuleDescriptor { - label: desc.label.borrow_option(), - runtime_checks: desc.shader_bound_checks.runtime_checks(), - }; - let hal_shader = hal::ShaderInput::SpirV(source); - let raw = match unsafe { self.raw.create_shader_module(&hal_desc, hal_shader) } { - Ok(raw) => raw, - Err(error) => { - return Err(match error { - hal::ShaderError::Device(error) => { - pipeline::CreateShaderModuleError::Device(error.into()) - } - hal::ShaderError::Compilation(ref msg) => { - log::error!("Shader error: {}", msg); - pipeline::CreateShaderModuleError::Generation - } - }) - } - }; - - Ok(pipeline::ShaderModule { - raw, - device_id: Stored { - value: id::Valid(self_id), - ref_count: self.life_guard.add_ref(), - }, - interface: None, - #[cfg(debug_assertions)] - label: desc.label.borrow_or_default().to_string(), - }) - } - - fn deduplicate_bind_group_layout( - self_id: id::DeviceId, - entry_map: &binding_model::BindEntryMap, - guard: &Storage, id::BindGroupLayoutId>, - ) -> Option { - guard - .iter(self_id.backend()) - .find(|&(_, bgl)| bgl.device_id.value.0 == self_id && bgl.entries == *entry_map) - .map(|(id, value)| { - value.multi_ref_count.inc(); - id - }) - } - - fn get_introspection_bind_group_layouts<'a>( - pipeline_layout: &binding_model::PipelineLayout, - bgl_guard: &'a Storage, id::BindGroupLayoutId>, - ) -> ArrayVec<&'a binding_model::BindEntryMap, { hal::MAX_BIND_GROUPS }> { - pipeline_layout - .bind_group_layout_ids - .iter() - .map(|&id| &bgl_guard[id].entries) - .collect() - } - - /// Generate information about late-validated buffer bindings for pipelines. - //TODO: should this be combined with `get_introspection_bind_group_layouts` in some way? - fn make_late_sized_buffer_groups<'a>( - shader_binding_sizes: &FastHashMap, - layout: &binding_model::PipelineLayout, - bgl_guard: &'a Storage, id::BindGroupLayoutId>, - ) -> ArrayVec { - // Given the shader-required binding sizes and the pipeline layout, - // return the filtered list of them in the layout order, - // removing those with given `min_binding_size`. - layout - .bind_group_layout_ids - .iter() - .enumerate() - .map(|(group_index, &bgl_id)| pipeline::LateSizedBufferGroup { - shader_sizes: bgl_guard[bgl_id] - .entries - .values() - .filter_map(|entry| match entry.ty { - wgt::BindingType::Buffer { - min_binding_size: None, - .. - } => { - let rb = naga::ResourceBinding { - group: group_index as u32, - binding: entry.binding, - }; - let shader_size = - shader_binding_sizes.get(&rb).map_or(0, |nz| nz.get()); - Some(shader_size) - } - _ => None, - }) - .collect(), - }) - .collect() - } - - fn create_bind_group_layout( - &self, - self_id: id::DeviceId, - label: Option<&str>, - entry_map: binding_model::BindEntryMap, - ) -> Result, binding_model::CreateBindGroupLayoutError> { - #[derive(PartialEq)] - enum WritableStorage { - Yes, - No, - } - - for entry in entry_map.values() { - use wgt::BindingType as Bt; - - let mut required_features = wgt::Features::empty(); - let mut required_downlevel_flags = wgt::DownlevelFlags::empty(); - let (array_feature, writable_storage) = match entry.ty { - Bt::Buffer { - ty: wgt::BufferBindingType::Uniform, - has_dynamic_offset: false, - min_binding_size: _, - } => ( - Some(wgt::Features::BUFFER_BINDING_ARRAY), - WritableStorage::No, - ), - Bt::Buffer { - ty: wgt::BufferBindingType::Uniform, - has_dynamic_offset: true, - min_binding_size: _, - } => ( - Some(wgt::Features::BUFFER_BINDING_ARRAY), - WritableStorage::No, - ), - Bt::Buffer { - ty: wgt::BufferBindingType::Storage { read_only }, - .. - } => ( - Some( - wgt::Features::BUFFER_BINDING_ARRAY - | wgt::Features::STORAGE_RESOURCE_BINDING_ARRAY, - ), - match read_only { - true => WritableStorage::No, - false => WritableStorage::Yes, - }, - ), - Bt::Sampler { .. } => ( - Some(wgt::Features::TEXTURE_BINDING_ARRAY), - WritableStorage::No, - ), - Bt::Texture { .. } => ( - Some(wgt::Features::TEXTURE_BINDING_ARRAY), - WritableStorage::No, - ), - Bt::StorageTexture { - access, - view_dimension, - format: _, - } => { - match view_dimension { - wgt::TextureViewDimension::Cube | wgt::TextureViewDimension::CubeArray => { - return Err(binding_model::CreateBindGroupLayoutError::Entry { - binding: entry.binding, - error: binding_model::BindGroupLayoutEntryError::StorageTextureCube, - }) - } - _ => (), - } - match access { - wgt::StorageTextureAccess::ReadOnly - | wgt::StorageTextureAccess::ReadWrite - if !self.features.contains( - wgt::Features::TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES, - ) => - { - return Err(binding_model::CreateBindGroupLayoutError::Entry { - binding: entry.binding, - error: binding_model::BindGroupLayoutEntryError::StorageTextureReadWrite, - }); - } - _ => (), - } - ( - Some( - wgt::Features::TEXTURE_BINDING_ARRAY - | wgt::Features::STORAGE_RESOURCE_BINDING_ARRAY, - ), - match access { - wgt::StorageTextureAccess::WriteOnly => WritableStorage::Yes, - wgt::StorageTextureAccess::ReadOnly => { - required_features |= - wgt::Features::TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES; - WritableStorage::No - } - wgt::StorageTextureAccess::ReadWrite => { - required_features |= - wgt::Features::TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES; - WritableStorage::Yes - } - }, - ) - } - }; - - // Validate the count parameter - if entry.count.is_some() { - required_features |= array_feature - .ok_or(binding_model::BindGroupLayoutEntryError::ArrayUnsupported) - .map_err(|error| binding_model::CreateBindGroupLayoutError::Entry { - binding: entry.binding, - error, - })?; - } - - if entry.visibility.contains_invalid_bits() { - return Err( - binding_model::CreateBindGroupLayoutError::InvalidVisibility(entry.visibility), - ); - } - - if entry.visibility.contains(wgt::ShaderStages::VERTEX) { - if writable_storage == WritableStorage::Yes { - required_features |= wgt::Features::VERTEX_WRITABLE_STORAGE; - } - if let Bt::Buffer { - ty: wgt::BufferBindingType::Storage { .. }, - .. - } = entry.ty - { - required_downlevel_flags |= wgt::DownlevelFlags::VERTEX_STORAGE; - } - } - if writable_storage == WritableStorage::Yes - && entry.visibility.contains(wgt::ShaderStages::FRAGMENT) - { - required_downlevel_flags |= wgt::DownlevelFlags::FRAGMENT_WRITABLE_STORAGE; - } - - self.require_features(required_features) - .map_err(binding_model::BindGroupLayoutEntryError::MissingFeatures) - .map_err(|error| binding_model::CreateBindGroupLayoutError::Entry { - binding: entry.binding, - error, - })?; - self.require_downlevel_flags(required_downlevel_flags) - .map_err(binding_model::BindGroupLayoutEntryError::MissingDownlevelFlags) - .map_err(|error| binding_model::CreateBindGroupLayoutError::Entry { - binding: entry.binding, - error, - })?; - } - - let bgl_flags = conv::bind_group_layout_flags(self.features); - - let mut hal_bindings = entry_map.values().cloned().collect::>(); - hal_bindings.sort_by_key(|b| b.binding); - let hal_desc = hal::BindGroupLayoutDescriptor { - label, - flags: bgl_flags, - entries: &hal_bindings, - }; - let raw = unsafe { - self.raw - .create_bind_group_layout(&hal_desc) - .map_err(DeviceError::from)? - }; - - let mut count_validator = binding_model::BindingTypeMaxCountValidator::default(); - for entry in entry_map.values() { - count_validator.add_binding(entry); - } - // If a single bind group layout violates limits, the pipeline layout is - // definitely going to violate limits too, lets catch it now. - count_validator - .validate(&self.limits) - .map_err(binding_model::CreateBindGroupLayoutError::TooManyBindings)?; - - Ok(binding_model::BindGroupLayout { - raw, - device_id: Stored { - value: id::Valid(self_id), - ref_count: self.life_guard.add_ref(), - }, - multi_ref_count: MultiRefCount::new(), - dynamic_count: entry_map - .values() - .filter(|b| b.ty.has_dynamic_offset()) - .count(), - count_validator, - entries: entry_map, - #[cfg(debug_assertions)] - label: label.unwrap_or("").to_string(), - }) - } - - fn create_buffer_binding<'a>( - bb: &binding_model::BufferBinding, - binding: u32, - decl: &wgt::BindGroupLayoutEntry, - used_buffer_ranges: &mut Vec, - dynamic_binding_info: &mut Vec, - late_buffer_binding_sizes: &mut FastHashMap, - used: &mut BindGroupStates, - storage: &'a Storage, id::BufferId>, - limits: &wgt::Limits, - ) -> Result, binding_model::CreateBindGroupError> { - use crate::binding_model::CreateBindGroupError as Error; - - let (binding_ty, dynamic, min_size) = match decl.ty { - wgt::BindingType::Buffer { - ty, - has_dynamic_offset, - min_binding_size, - } => (ty, has_dynamic_offset, min_binding_size), - _ => { - return Err(Error::WrongBindingType { - binding, - actual: decl.ty, - expected: "UniformBuffer, StorageBuffer or ReadonlyStorageBuffer", - }) - } - }; - let (pub_usage, internal_use, range_limit) = match binding_ty { - wgt::BufferBindingType::Uniform => ( - wgt::BufferUsages::UNIFORM, - hal::BufferUses::UNIFORM, - limits.max_uniform_buffer_binding_size, - ), - wgt::BufferBindingType::Storage { read_only } => ( - wgt::BufferUsages::STORAGE, - if read_only { - hal::BufferUses::STORAGE_READ - } else { - hal::BufferUses::STORAGE_READ_WRITE - }, - limits.max_storage_buffer_binding_size, - ), - }; - - let (align, align_limit_name) = - binding_model::buffer_binding_type_alignment(limits, binding_ty); - if bb.offset % align as u64 != 0 { - return Err(Error::UnalignedBufferOffset( - bb.offset, - align_limit_name, - align, - )); - } - - let buffer = used - .buffers - .add_single(storage, bb.buffer_id, internal_use) - .ok_or(Error::InvalidBuffer(bb.buffer_id))?; - check_buffer_usage(buffer.usage, pub_usage)?; - let raw_buffer = buffer - .raw - .as_ref() - .ok_or(Error::InvalidBuffer(bb.buffer_id))?; - - let (bind_size, bind_end) = match bb.size { - Some(size) => { - let end = bb.offset + size.get(); - if end > buffer.size { - return Err(Error::BindingRangeTooLarge { - buffer: bb.buffer_id, - range: bb.offset..end, - size: buffer.size, - }); - } - (size.get(), end) - } - None => (buffer.size - bb.offset, buffer.size), - }; - - if bind_size > range_limit as u64 { - return Err(Error::BufferRangeTooLarge { - binding, - given: bind_size as u32, - limit: range_limit, - }); - } - - // Record binding info for validating dynamic offsets - if dynamic { - dynamic_binding_info.push(binding_model::BindGroupDynamicBindingData { - binding_idx: binding, - buffer_size: buffer.size, - binding_range: bb.offset..bind_end, - maximum_dynamic_offset: buffer.size - bind_end, - binding_type: binding_ty, - }); - } - - if let Some(non_zero) = min_size { - let min_size = non_zero.get(); - if min_size > bind_size { - return Err(Error::BindingSizeTooSmall { - buffer: bb.buffer_id, - actual: bind_size, - min: min_size, - }); - } - } else { - let late_size = - wgt::BufferSize::new(bind_size).ok_or(Error::BindingZeroSize(bb.buffer_id))?; - late_buffer_binding_sizes.insert(binding, late_size); - } - - assert_eq!(bb.offset % wgt::COPY_BUFFER_ALIGNMENT, 0); - used_buffer_ranges.extend(buffer.initialization_status.create_action( - bb.buffer_id, - bb.offset..bb.offset + bind_size, - MemoryInitKind::NeedsInitializedMemory, - )); - - Ok(hal::BufferBinding { - buffer: raw_buffer, - offset: bb.offset, - size: bb.size, - }) - } - - fn create_texture_binding( - view: &resource::TextureView, - texture_guard: &Storage, id::TextureId>, - internal_use: hal::TextureUses, - pub_usage: wgt::TextureUsages, - used: &mut BindGroupStates, - used_texture_ranges: &mut Vec, - ) -> Result<(), binding_model::CreateBindGroupError> { - // Careful here: the texture may no longer have its own ref count, - // if it was deleted by the user. - let texture = used - .textures - .add_single( - texture_guard, - view.parent_id.value.0, - view.parent_id.ref_count.clone(), - Some(view.selector.clone()), - internal_use, - ) - .ok_or(binding_model::CreateBindGroupError::InvalidTexture( - view.parent_id.value.0, - ))?; - check_texture_usage(texture.desc.usage, pub_usage)?; - - used_texture_ranges.push(TextureInitTrackerAction { - id: view.parent_id.value.0, - range: TextureInitRange { - mip_range: view.desc.range.mip_range(texture.desc.mip_level_count), - layer_range: view - .desc - .range - .layer_range(texture.desc.array_layer_count()), - }, - kind: MemoryInitKind::NeedsInitializedMemory, - }); - - Ok(()) - } - - fn create_bind_group( - &self, - self_id: id::DeviceId, - layout: &binding_model::BindGroupLayout, - desc: &binding_model::BindGroupDescriptor, - hub: &Hub, - token: &mut Token>, - ) -> Result, binding_model::CreateBindGroupError> { - use crate::binding_model::{BindingResource as Br, CreateBindGroupError as Error}; - { - // Check that the number of entries in the descriptor matches - // the number of entries in the layout. - let actual = desc.entries.len(); - let expected = layout.entries.len(); - if actual != expected { - return Err(Error::BindingsNumMismatch { expected, actual }); - } - } - - // TODO: arrayvec/smallvec, or re-use allocations - // Record binding info for dynamic offset validation - let mut dynamic_binding_info = Vec::new(); - // Map of binding -> shader reflected size - //Note: we can't collect into a vector right away because - // it needs to be in BGL iteration order, not BG entry order. - let mut late_buffer_binding_sizes = FastHashMap::default(); - // fill out the descriptors - let mut used = BindGroupStates::new(); - - let (buffer_guard, mut token) = hub.buffers.read(token); - let (texture_guard, mut token) = hub.textures.read(&mut token); //skip token - let (texture_view_guard, mut token) = hub.texture_views.read(&mut token); - let (sampler_guard, _) = hub.samplers.read(&mut token); - - let mut used_buffer_ranges = Vec::new(); - let mut used_texture_ranges = Vec::new(); - let mut hal_entries = Vec::with_capacity(desc.entries.len()); - let mut hal_buffers = Vec::new(); - let mut hal_samplers = Vec::new(); - let mut hal_textures = Vec::new(); - for entry in desc.entries.iter() { - let binding = entry.binding; - // Find the corresponding declaration in the layout - let decl = layout - .entries - .get(&binding) - .ok_or(Error::MissingBindingDeclaration(binding))?; - let (res_index, count) = match entry.resource { - Br::Buffer(ref bb) => { - let bb = Self::create_buffer_binding( - bb, - binding, - decl, - &mut used_buffer_ranges, - &mut dynamic_binding_info, - &mut late_buffer_binding_sizes, - &mut used, - &*buffer_guard, - &self.limits, - )?; - - let res_index = hal_buffers.len(); - hal_buffers.push(bb); - (res_index, 1) - } - Br::BufferArray(ref bindings_array) => { - let num_bindings = bindings_array.len(); - Self::check_array_binding(self.features, decl.count, num_bindings)?; - - let res_index = hal_buffers.len(); - for bb in bindings_array.iter() { - let bb = Self::create_buffer_binding( - bb, - binding, - decl, - &mut used_buffer_ranges, - &mut dynamic_binding_info, - &mut late_buffer_binding_sizes, - &mut used, - &*buffer_guard, - &self.limits, - )?; - hal_buffers.push(bb); - } - (res_index, num_bindings) - } - Br::Sampler(id) => { - match decl.ty { - wgt::BindingType::Sampler(ty) => { - let sampler = used - .samplers - .add_single(&*sampler_guard, id) - .ok_or(Error::InvalidSampler(id))?; - - // Allowed sampler values for filtering and comparison - let (allowed_filtering, allowed_comparison) = match ty { - wgt::SamplerBindingType::Filtering => (None, false), - wgt::SamplerBindingType::NonFiltering => (Some(false), false), - wgt::SamplerBindingType::Comparison => (None, true), - }; - - if let Some(allowed_filtering) = allowed_filtering { - if allowed_filtering != sampler.filtering { - return Err(Error::WrongSamplerFiltering { - binding, - layout_flt: allowed_filtering, - sampler_flt: sampler.filtering, - }); - } - } - - if allowed_comparison != sampler.comparison { - return Err(Error::WrongSamplerComparison { - binding, - layout_cmp: allowed_comparison, - sampler_cmp: sampler.comparison, - }); - } - - let res_index = hal_samplers.len(); - hal_samplers.push(&sampler.raw); - (res_index, 1) - } - _ => { - return Err(Error::WrongBindingType { - binding, - actual: decl.ty, - expected: "Sampler", - }) - } - } - } - Br::SamplerArray(ref bindings_array) => { - let num_bindings = bindings_array.len(); - Self::check_array_binding(self.features, decl.count, num_bindings)?; - - let res_index = hal_samplers.len(); - for &id in bindings_array.iter() { - let sampler = used - .samplers - .add_single(&*sampler_guard, id) - .ok_or(Error::InvalidSampler(id))?; - hal_samplers.push(&sampler.raw); - } - - (res_index, num_bindings) - } - Br::TextureView(id) => { - let view = used - .views - .add_single(&*texture_view_guard, id) - .ok_or(Error::InvalidTextureView(id))?; - let (pub_usage, internal_use) = Self::texture_use_parameters( - binding, - decl, - view, - "SampledTexture, ReadonlyStorageTexture or WriteonlyStorageTexture", - )?; - Self::create_texture_binding( - view, - &texture_guard, - internal_use, - pub_usage, - &mut used, - &mut used_texture_ranges, - )?; - let res_index = hal_textures.len(); - hal_textures.push(hal::TextureBinding { - view: &view.raw, - usage: internal_use, - }); - (res_index, 1) - } - Br::TextureViewArray(ref bindings_array) => { - let num_bindings = bindings_array.len(); - Self::check_array_binding(self.features, decl.count, num_bindings)?; - - let res_index = hal_textures.len(); - for &id in bindings_array.iter() { - let view = used - .views - .add_single(&*texture_view_guard, id) - .ok_or(Error::InvalidTextureView(id))?; - let (pub_usage, internal_use) = - Self::texture_use_parameters(binding, decl, view, - "SampledTextureArray, ReadonlyStorageTextureArray or WriteonlyStorageTextureArray")?; - Self::create_texture_binding( - view, - &texture_guard, - internal_use, - pub_usage, - &mut used, - &mut used_texture_ranges, - )?; - hal_textures.push(hal::TextureBinding { - view: &view.raw, - usage: internal_use, - }); - } - - (res_index, num_bindings) - } - }; - - hal_entries.push(hal::BindGroupEntry { - binding, - resource_index: res_index as u32, - count: count as u32, - }); - } - - used.optimize(); - - hal_entries.sort_by_key(|entry| entry.binding); - for (a, b) in hal_entries.iter().zip(hal_entries.iter().skip(1)) { - if a.binding == b.binding { - return Err(Error::DuplicateBinding(a.binding)); - } - } - - let hal_desc = hal::BindGroupDescriptor { - label: desc.label.borrow_option(), - layout: &layout.raw, - entries: &hal_entries, - buffers: &hal_buffers, - samplers: &hal_samplers, - textures: &hal_textures, - }; - let raw = unsafe { - self.raw - .create_bind_group(&hal_desc) - .map_err(DeviceError::from)? - }; - - // manually add a dependency on BGL - layout.multi_ref_count.inc(); - - Ok(binding_model::BindGroup { - raw, - device_id: Stored { - value: id::Valid(self_id), - ref_count: self.life_guard.add_ref(), - }, - layout_id: id::Valid(desc.layout), - life_guard: LifeGuard::new(desc.label.borrow_or_default()), - used, - used_buffer_ranges, - used_texture_ranges, - dynamic_binding_info, - // collect in the order of BGL iteration - late_buffer_binding_sizes: layout - .entries - .keys() - .flat_map(|binding| late_buffer_binding_sizes.get(binding).cloned()) - .collect(), - }) - } - - fn check_array_binding( - features: wgt::Features, - count: Option, - num_bindings: usize, - ) -> Result<(), super::binding_model::CreateBindGroupError> { - use super::binding_model::CreateBindGroupError as Error; - - if let Some(count) = count { - let count = count.get() as usize; - if count < num_bindings { - return Err(Error::BindingArrayPartialLengthMismatch { - actual: num_bindings, - expected: count, - }); - } - if count != num_bindings - && !features.contains(wgt::Features::PARTIALLY_BOUND_BINDING_ARRAY) - { - return Err(Error::BindingArrayLengthMismatch { - actual: num_bindings, - expected: count, - }); - } - if num_bindings == 0 { - return Err(Error::BindingArrayZeroLength); - } - } else { - return Err(Error::SingleBindingExpected); - }; - - Ok(()) - } - - fn texture_use_parameters( - binding: u32, - decl: &wgt::BindGroupLayoutEntry, - view: &crate::resource::TextureView, - expected: &'static str, - ) -> Result<(wgt::TextureUsages, hal::TextureUses), binding_model::CreateBindGroupError> { - use crate::binding_model::CreateBindGroupError as Error; - if view - .desc - .aspects() - .contains(hal::FormatAspects::DEPTH | hal::FormatAspects::STENCIL) - { - return Err(Error::DepthStencilAspect); - } - match decl.ty { - wgt::BindingType::Texture { - sample_type, - view_dimension, - multisampled, - } => { - use wgt::TextureSampleType as Tst; - if multisampled != (view.samples != 1) { - return Err(Error::InvalidTextureMultisample { - binding, - layout_multisampled: multisampled, - view_samples: view.samples, - }); - } - let compat_sample_type = view - .desc - .format - .sample_type(Some(view.desc.range.aspect)) - .unwrap(); - match (sample_type, compat_sample_type) { - (Tst::Uint, Tst::Uint) | - (Tst::Sint, Tst::Sint) | - (Tst::Depth, Tst::Depth) | - // if we expect non-filterable, accept anything float - (Tst::Float { filterable: false }, Tst::Float { .. }) | - // if we expect filterable, require it - (Tst::Float { filterable: true }, Tst::Float { filterable: true }) | - // if we expect non-filterable, also accept depth - (Tst::Float { filterable: false }, Tst::Depth) => {} - // if we expect filterable, also accept Float that is defined as - // unfilterable if filterable feature is explicitly enabled (only hit - // if wgt::Features::TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES is - // enabled) - (Tst::Float { filterable: true }, Tst::Float { .. }) if view.format_features.flags.contains(wgt::TextureFormatFeatureFlags::FILTERABLE) => {} - _ => { - return Err(Error::InvalidTextureSampleType { - binding, - layout_sample_type: sample_type, - view_format: view.desc.format, - }) - } - } - if view_dimension != view.desc.dimension { - return Err(Error::InvalidTextureDimension { - binding, - layout_dimension: view_dimension, - view_dimension: view.desc.dimension, - }); - } - Ok(( - wgt::TextureUsages::TEXTURE_BINDING, - hal::TextureUses::RESOURCE, - )) - } - wgt::BindingType::StorageTexture { - access, - format, - view_dimension, - } => { - if format != view.desc.format { - return Err(Error::InvalidStorageTextureFormat { - binding, - layout_format: format, - view_format: view.desc.format, - }); - } - if view_dimension != view.desc.dimension { - return Err(Error::InvalidTextureDimension { - binding, - layout_dimension: view_dimension, - view_dimension: view.desc.dimension, - }); - } - - let mip_level_count = view.selector.mips.end - view.selector.mips.start; - if mip_level_count != 1 { - return Err(Error::InvalidStorageTextureMipLevelCount { - binding, - mip_level_count, - }); - } - - let internal_use = match access { - wgt::StorageTextureAccess::WriteOnly => hal::TextureUses::STORAGE_READ_WRITE, - wgt::StorageTextureAccess::ReadOnly => { - if !view - .format_features - .flags - .contains(wgt::TextureFormatFeatureFlags::STORAGE_READ_WRITE) - { - return Err(Error::StorageReadNotSupported(view.desc.format)); - } - hal::TextureUses::STORAGE_READ - } - wgt::StorageTextureAccess::ReadWrite => { - if !view - .format_features - .flags - .contains(wgt::TextureFormatFeatureFlags::STORAGE_READ_WRITE) - { - return Err(Error::StorageReadNotSupported(view.desc.format)); - } - - hal::TextureUses::STORAGE_READ_WRITE - } - }; - Ok((wgt::TextureUsages::STORAGE_BINDING, internal_use)) - } - _ => Err(Error::WrongBindingType { - binding, - actual: decl.ty, - expected, - }), - } - } - - fn create_pipeline_layout( - &self, - self_id: id::DeviceId, - desc: &binding_model::PipelineLayoutDescriptor, - bgl_guard: &Storage, id::BindGroupLayoutId>, - ) -> Result, binding_model::CreatePipelineLayoutError> { - use crate::binding_model::CreatePipelineLayoutError as Error; - - let bind_group_layouts_count = desc.bind_group_layouts.len(); - let device_max_bind_groups = self.limits.max_bind_groups as usize; - if bind_group_layouts_count > device_max_bind_groups { - return Err(Error::TooManyGroups { - actual: bind_group_layouts_count, - max: device_max_bind_groups, - }); - } - - if !desc.push_constant_ranges.is_empty() { - self.require_features(wgt::Features::PUSH_CONSTANTS)?; - } - - let mut used_stages = wgt::ShaderStages::empty(); - for (index, pc) in desc.push_constant_ranges.iter().enumerate() { - if pc.stages.intersects(used_stages) { - return Err(Error::MoreThanOnePushConstantRangePerStage { - index, - provided: pc.stages, - intersected: pc.stages & used_stages, - }); - } - used_stages |= pc.stages; - - let device_max_pc_size = self.limits.max_push_constant_size; - if device_max_pc_size < pc.range.end { - return Err(Error::PushConstantRangeTooLarge { - index, - range: pc.range.clone(), - max: device_max_pc_size, - }); - } - - if pc.range.start % wgt::PUSH_CONSTANT_ALIGNMENT != 0 { - return Err(Error::MisalignedPushConstantRange { - index, - bound: pc.range.start, - }); - } - if pc.range.end % wgt::PUSH_CONSTANT_ALIGNMENT != 0 { - return Err(Error::MisalignedPushConstantRange { - index, - bound: pc.range.end, - }); - } - } - - let mut count_validator = binding_model::BindingTypeMaxCountValidator::default(); - - // validate total resource counts - for &id in desc.bind_group_layouts.iter() { - let bind_group_layout = bgl_guard - .get(id) - .map_err(|_| Error::InvalidBindGroupLayout(id))?; - count_validator.merge(&bind_group_layout.count_validator); - } - count_validator - .validate(&self.limits) - .map_err(Error::TooManyBindings)?; - - let bgl_vec = desc - .bind_group_layouts - .iter() - .map(|&id| &bgl_guard.get(id).unwrap().raw) - .collect::>(); - let hal_desc = hal::PipelineLayoutDescriptor { - label: desc.label.borrow_option(), - flags: hal::PipelineLayoutFlags::BASE_VERTEX_INSTANCE, - bind_group_layouts: &bgl_vec, - push_constant_ranges: desc.push_constant_ranges.as_ref(), - }; - - let raw = unsafe { - self.raw - .create_pipeline_layout(&hal_desc) - .map_err(DeviceError::from)? - }; - - Ok(binding_model::PipelineLayout { - raw, - device_id: Stored { - value: id::Valid(self_id), - ref_count: self.life_guard.add_ref(), - }, - life_guard: LifeGuard::new(desc.label.borrow_or_default()), - bind_group_layout_ids: desc - .bind_group_layouts - .iter() - .map(|&id| { - // manually add a dependency to BGL - bgl_guard.get(id).unwrap().multi_ref_count.inc(); - id::Valid(id) - }) - .collect(), - push_constant_ranges: desc.push_constant_ranges.iter().cloned().collect(), - }) - } - - //TODO: refactor this. It's the only method of `Device` that registers new objects - // (the pipeline layout). - fn derive_pipeline_layout( - &self, - self_id: id::DeviceId, - implicit_context: Option, - mut derived_group_layouts: ArrayVec, - bgl_guard: &mut Storage, id::BindGroupLayoutId>, - pipeline_layout_guard: &mut Storage, id::PipelineLayoutId>, - ) -> Result { - while derived_group_layouts - .last() - .map_or(false, |map| map.is_empty()) - { - derived_group_layouts.pop(); - } - let mut ids = implicit_context.ok_or(pipeline::ImplicitLayoutError::MissingIds(0))?; - let group_count = derived_group_layouts.len(); - if ids.group_ids.len() < group_count { - log::error!( - "Not enough bind group IDs ({}) specified for the implicit layout ({})", - ids.group_ids.len(), - derived_group_layouts.len() - ); - return Err(pipeline::ImplicitLayoutError::MissingIds(group_count as _)); - } - - for (bgl_id, map) in ids.group_ids.iter_mut().zip(derived_group_layouts) { - match Device::deduplicate_bind_group_layout(self_id, &map, bgl_guard) { - Some(dedup_id) => { - *bgl_id = dedup_id; - } - None => { - let bgl = self.create_bind_group_layout(self_id, None, map)?; - bgl_guard.force_replace(*bgl_id, bgl); - } - }; - } - - let layout_desc = binding_model::PipelineLayoutDescriptor { - label: None, - bind_group_layouts: Cow::Borrowed(&ids.group_ids[..group_count]), - push_constant_ranges: Cow::Borrowed(&[]), //TODO? - }; - let layout = self.create_pipeline_layout(self_id, &layout_desc, bgl_guard)?; - pipeline_layout_guard.force_replace(ids.root_id, layout); - Ok(ids.root_id) - } - - fn create_compute_pipeline( - &self, - self_id: id::DeviceId, - desc: &pipeline::ComputePipelineDescriptor, - implicit_context: Option, - hub: &Hub, - token: &mut Token, - ) -> Result, pipeline::CreateComputePipelineError> { - //TODO: only lock mutable if the layout is derived - let (mut pipeline_layout_guard, mut token) = hub.pipeline_layouts.write(token); - let (mut bgl_guard, mut token) = hub.bind_group_layouts.write(&mut token); - - // This has to be done first, or otherwise the IDs may be pointing to entries - // that are not even in the storage. - if let Some(ref ids) = implicit_context { - pipeline_layout_guard.insert_error(ids.root_id, IMPLICIT_FAILURE); - for &bgl_id in ids.group_ids.iter() { - bgl_guard.insert_error(bgl_id, IMPLICIT_FAILURE); - } - } - - self.require_downlevel_flags(wgt::DownlevelFlags::COMPUTE_SHADERS)?; - - let mut derived_group_layouts = - ArrayVec::::new(); - let mut shader_binding_sizes = FastHashMap::default(); - - let io = validation::StageIo::default(); - let (shader_module_guard, _) = hub.shader_modules.read(&mut token); - - let shader_module = shader_module_guard - .get(desc.stage.module) - .map_err(|_| validation::StageError::InvalidModule)?; - - { - let flag = wgt::ShaderStages::COMPUTE; - let provided_layouts = match desc.layout { - Some(pipeline_layout_id) => Some(Device::get_introspection_bind_group_layouts( - pipeline_layout_guard - .get(pipeline_layout_id) - .map_err(|_| pipeline::CreateComputePipelineError::InvalidLayout)?, - &*bgl_guard, - )), - None => { - for _ in 0..self.limits.max_bind_groups { - derived_group_layouts.push(binding_model::BindEntryMap::default()); - } - None - } - }; - if let Some(ref interface) = shader_module.interface { - let _ = interface.check_stage( - provided_layouts.as_ref().map(|p| p.as_slice()), - &mut derived_group_layouts, - &mut shader_binding_sizes, - &desc.stage.entry_point, - flag, - io, - None, - )?; - } - } - - let pipeline_layout_id = match desc.layout { - Some(id) => id, - None => self.derive_pipeline_layout( - self_id, - implicit_context, - derived_group_layouts, - &mut *bgl_guard, - &mut *pipeline_layout_guard, - )?, - }; - let layout = pipeline_layout_guard - .get(pipeline_layout_id) - .map_err(|_| pipeline::CreateComputePipelineError::InvalidLayout)?; - - let late_sized_buffer_groups = - Device::make_late_sized_buffer_groups(&shader_binding_sizes, layout, &*bgl_guard); - - let pipeline_desc = hal::ComputePipelineDescriptor { - label: desc.label.borrow_option(), - layout: &layout.raw, - stage: hal::ProgrammableStage { - entry_point: desc.stage.entry_point.as_ref(), - module: &shader_module.raw, - }, - }; - - let raw = - unsafe { self.raw.create_compute_pipeline(&pipeline_desc) }.map_err( - |err| match err { - hal::PipelineError::Device(error) => { - pipeline::CreateComputePipelineError::Device(error.into()) - } - hal::PipelineError::Linkage(_stages, msg) => { - pipeline::CreateComputePipelineError::Internal(msg) - } - hal::PipelineError::EntryPoint(_stage) => { - pipeline::CreateComputePipelineError::Internal(EP_FAILURE.to_string()) - } - }, - )?; - - let pipeline = pipeline::ComputePipeline { - raw, - layout_id: Stored { - value: id::Valid(pipeline_layout_id), - ref_count: layout.life_guard.add_ref(), - }, - device_id: Stored { - value: id::Valid(self_id), - ref_count: self.life_guard.add_ref(), - }, - late_sized_buffer_groups, - life_guard: LifeGuard::new(desc.label.borrow_or_default()), - }; - Ok(pipeline) - } - - fn create_render_pipeline( - &self, - self_id: id::DeviceId, - adapter: &Adapter, - desc: &pipeline::RenderPipelineDescriptor, - implicit_context: Option, - hub: &Hub, - token: &mut Token, - ) -> Result, pipeline::CreateRenderPipelineError> { - use wgt::TextureFormatFeatureFlags as Tfff; - - //TODO: only lock mutable if the layout is derived - let (mut pipeline_layout_guard, mut token) = hub.pipeline_layouts.write(token); - let (mut bgl_guard, mut token) = hub.bind_group_layouts.write(&mut token); - - // This has to be done first, or otherwise the IDs may be pointing to entries - // that are not even in the storage. - if let Some(ref ids) = implicit_context { - pipeline_layout_guard.insert_error(ids.root_id, IMPLICIT_FAILURE); - for &bgl_id in ids.group_ids.iter() { - bgl_guard.insert_error(bgl_id, IMPLICIT_FAILURE); - } - } - - let mut derived_group_layouts = - ArrayVec::::new(); - let mut shader_binding_sizes = FastHashMap::default(); - - let num_attachments = desc.fragment.as_ref().map(|f| f.targets.len()).unwrap_or(0); - if num_attachments > hal::MAX_COLOR_ATTACHMENTS { - return Err(pipeline::CreateRenderPipelineError::ColorAttachment( - command::ColorAttachmentError::TooMany { - given: num_attachments, - limit: hal::MAX_COLOR_ATTACHMENTS, - }, - )); - } - - let color_targets = desc - .fragment - .as_ref() - .map_or(&[][..], |fragment| &fragment.targets); - let depth_stencil_state = desc.depth_stencil.as_ref(); - - let cts: ArrayVec<_, { hal::MAX_COLOR_ATTACHMENTS }> = - color_targets.iter().filter_map(|x| x.as_ref()).collect(); - if !cts.is_empty() && { - let first = &cts[0]; - cts[1..] - .iter() - .any(|ct| ct.write_mask != first.write_mask || ct.blend != first.blend) - } { - log::info!("Color targets: {:?}", color_targets); - self.require_downlevel_flags(wgt::DownlevelFlags::INDEPENDENT_BLEND)?; - } - - let mut io = validation::StageIo::default(); - let mut validated_stages = wgt::ShaderStages::empty(); - - let mut vertex_steps = Vec::with_capacity(desc.vertex.buffers.len()); - let mut vertex_buffers = Vec::with_capacity(desc.vertex.buffers.len()); - let mut total_attributes = 0; - for (i, vb_state) in desc.vertex.buffers.iter().enumerate() { - vertex_steps.push(pipeline::VertexStep { - stride: vb_state.array_stride, - mode: vb_state.step_mode, - }); - if vb_state.attributes.is_empty() { - continue; - } - if vb_state.array_stride > self.limits.max_vertex_buffer_array_stride as u64 { - return Err(pipeline::CreateRenderPipelineError::VertexStrideTooLarge { - index: i as u32, - given: vb_state.array_stride as u32, - limit: self.limits.max_vertex_buffer_array_stride, - }); - } - if vb_state.array_stride % wgt::VERTEX_STRIDE_ALIGNMENT != 0 { - return Err(pipeline::CreateRenderPipelineError::UnalignedVertexStride { - index: i as u32, - stride: vb_state.array_stride, - }); - } - vertex_buffers.push(hal::VertexBufferLayout { - array_stride: vb_state.array_stride, - step_mode: vb_state.step_mode, - attributes: vb_state.attributes.as_ref(), - }); - - for attribute in vb_state.attributes.iter() { - if attribute.offset >= 0x10000000 { - return Err( - pipeline::CreateRenderPipelineError::InvalidVertexAttributeOffset { - location: attribute.shader_location, - offset: attribute.offset, - }, - ); - } - - if let wgt::VertexFormat::Float64 - | wgt::VertexFormat::Float64x2 - | wgt::VertexFormat::Float64x3 - | wgt::VertexFormat::Float64x4 = attribute.format - { - self.require_features(wgt::Features::VERTEX_ATTRIBUTE_64BIT)?; - } - - let previous = io.insert( - attribute.shader_location, - validation::InterfaceVar::vertex_attribute(attribute.format), - ); - - if previous.is_some() { - return Err(pipeline::CreateRenderPipelineError::ShaderLocationClash( - attribute.shader_location, - )); - } - } - total_attributes += vb_state.attributes.len(); - } - - if vertex_buffers.len() > self.limits.max_vertex_buffers as usize { - return Err(pipeline::CreateRenderPipelineError::TooManyVertexBuffers { - given: vertex_buffers.len() as u32, - limit: self.limits.max_vertex_buffers, - }); - } - if total_attributes > self.limits.max_vertex_attributes as usize { - return Err( - pipeline::CreateRenderPipelineError::TooManyVertexAttributes { - given: total_attributes as u32, - limit: self.limits.max_vertex_attributes, - }, - ); - } - - if desc.primitive.strip_index_format.is_some() && !desc.primitive.topology.is_strip() { - return Err( - pipeline::CreateRenderPipelineError::StripIndexFormatForNonStripTopology { - strip_index_format: desc.primitive.strip_index_format, - topology: desc.primitive.topology, - }, - ); - } - - if desc.primitive.unclipped_depth { - self.require_features(wgt::Features::DEPTH_CLIP_CONTROL)?; - } - - if desc.primitive.polygon_mode == wgt::PolygonMode::Line { - self.require_features(wgt::Features::POLYGON_MODE_LINE)?; - } - if desc.primitive.polygon_mode == wgt::PolygonMode::Point { - self.require_features(wgt::Features::POLYGON_MODE_POINT)?; - } - - if desc.primitive.conservative { - self.require_features(wgt::Features::CONSERVATIVE_RASTERIZATION)?; - } - - if desc.primitive.conservative && desc.primitive.polygon_mode != wgt::PolygonMode::Fill { - return Err( - pipeline::CreateRenderPipelineError::ConservativeRasterizationNonFillPolygonMode, - ); - } - - for (i, cs) in color_targets.iter().enumerate() { - if let Some(cs) = cs.as_ref() { - let error = loop { - if cs.write_mask.contains_invalid_bits() { - break Some(pipeline::ColorStateError::InvalidWriteMask(cs.write_mask)); - } - - let format_features = self.describe_format_features(adapter, cs.format)?; - if !format_features - .allowed_usages - .contains(wgt::TextureUsages::RENDER_ATTACHMENT) - { - break Some(pipeline::ColorStateError::FormatNotRenderable(cs.format)); - } - let blendable = format_features.flags.contains(Tfff::BLENDABLE); - let filterable = format_features.flags.contains(Tfff::FILTERABLE); - let adapter_specific = self - .features - .contains(wgt::Features::TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES); - // according to WebGPU specifications the texture needs to be - // [`TextureFormatFeatureFlags::FILTERABLE`] if blending is set - use - // [`Features::TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES`] to elude - // this limitation - if cs.blend.is_some() && (!blendable || (!filterable && !adapter_specific)) { - break Some(pipeline::ColorStateError::FormatNotBlendable(cs.format)); - } - if !hal::FormatAspects::from(cs.format).contains(hal::FormatAspects::COLOR) { - break Some(pipeline::ColorStateError::FormatNotColor(cs.format)); - } - if desc.multisample.count > 1 - && !format_features - .flags - .sample_count_supported(desc.multisample.count) - { - break Some(pipeline::ColorStateError::FormatNotMultisampled(cs.format)); - } - - break None; - }; - if let Some(e) = error { - return Err(pipeline::CreateRenderPipelineError::ColorState(i as u8, e)); - } - } - } - - if let Some(ds) = depth_stencil_state { - let error = loop { - let format_features = self.describe_format_features(adapter, ds.format)?; - if !format_features - .allowed_usages - .contains(wgt::TextureUsages::RENDER_ATTACHMENT) - { - break Some(pipeline::DepthStencilStateError::FormatNotRenderable( - ds.format, - )); - } - - let aspect = hal::FormatAspects::from(ds.format); - if ds.is_depth_enabled() && !aspect.contains(hal::FormatAspects::DEPTH) { - break Some(pipeline::DepthStencilStateError::FormatNotDepth(ds.format)); - } - if ds.stencil.is_enabled() && !aspect.contains(hal::FormatAspects::STENCIL) { - break Some(pipeline::DepthStencilStateError::FormatNotStencil( - ds.format, - )); - } - if desc.multisample.count > 1 - && !format_features - .flags - .sample_count_supported(desc.multisample.count) - { - break Some(pipeline::DepthStencilStateError::FormatNotMultisampled( - ds.format, - )); - } - - break None; - }; - if let Some(e) = error { - return Err(pipeline::CreateRenderPipelineError::DepthStencilState(e)); - } - - if ds.bias.clamp != 0.0 { - self.require_downlevel_flags(wgt::DownlevelFlags::DEPTH_BIAS_CLAMP)?; - } - } - - if desc.layout.is_none() { - for _ in 0..self.limits.max_bind_groups { - derived_group_layouts.push(binding_model::BindEntryMap::default()); - } - } - - let samples = { - let sc = desc.multisample.count; - if sc == 0 || sc > 32 || !conv::is_power_of_two_u32(sc) { - return Err(pipeline::CreateRenderPipelineError::InvalidSampleCount(sc)); - } - sc - }; - - let (shader_module_guard, _) = hub.shader_modules.read(&mut token); - - let vertex_stage = { - let stage = &desc.vertex.stage; - let flag = wgt::ShaderStages::VERTEX; - - let shader_module = shader_module_guard.get(stage.module).map_err(|_| { - pipeline::CreateRenderPipelineError::Stage { - stage: flag, - error: validation::StageError::InvalidModule, - } - })?; - - let provided_layouts = match desc.layout { - Some(pipeline_layout_id) => { - let pipeline_layout = pipeline_layout_guard - .get(pipeline_layout_id) - .map_err(|_| pipeline::CreateRenderPipelineError::InvalidLayout)?; - Some(Device::get_introspection_bind_group_layouts( - pipeline_layout, - &*bgl_guard, - )) - } - None => None, - }; - - if let Some(ref interface) = shader_module.interface { - io = interface - .check_stage( - provided_layouts.as_ref().map(|p| p.as_slice()), - &mut derived_group_layouts, - &mut shader_binding_sizes, - &stage.entry_point, - flag, - io, - desc.depth_stencil.as_ref().map(|d| d.depth_compare), - ) - .map_err(|error| pipeline::CreateRenderPipelineError::Stage { - stage: flag, - error, - })?; - validated_stages |= flag; - } - - hal::ProgrammableStage { - module: &shader_module.raw, - entry_point: stage.entry_point.as_ref(), - } - }; - - let fragment_stage = match desc.fragment { - Some(ref fragment) => { - let flag = wgt::ShaderStages::FRAGMENT; - - let shader_module = - shader_module_guard - .get(fragment.stage.module) - .map_err(|_| pipeline::CreateRenderPipelineError::Stage { - stage: flag, - error: validation::StageError::InvalidModule, - })?; - - let provided_layouts = match desc.layout { - Some(pipeline_layout_id) => Some(Device::get_introspection_bind_group_layouts( - pipeline_layout_guard - .get(pipeline_layout_id) - .map_err(|_| pipeline::CreateRenderPipelineError::InvalidLayout)?, - &*bgl_guard, - )), - None => None, - }; - - if validated_stages == wgt::ShaderStages::VERTEX { - if let Some(ref interface) = shader_module.interface { - io = interface - .check_stage( - provided_layouts.as_ref().map(|p| p.as_slice()), - &mut derived_group_layouts, - &mut shader_binding_sizes, - &fragment.stage.entry_point, - flag, - io, - desc.depth_stencil.as_ref().map(|d| d.depth_compare), - ) - .map_err(|error| pipeline::CreateRenderPipelineError::Stage { - stage: flag, - error, - })?; - validated_stages |= flag; - } - } - - Some(hal::ProgrammableStage { - module: &shader_module.raw, - entry_point: fragment.stage.entry_point.as_ref(), - }) - } - None => None, - }; - - if validated_stages.contains(wgt::ShaderStages::FRAGMENT) { - for (i, output) in io.iter() { - match color_targets.get(*i as usize) { - Some(&Some(ref state)) => { - validation::check_texture_format(state.format, &output.ty).map_err( - |pipeline| { - pipeline::CreateRenderPipelineError::ColorState( - *i as u8, - pipeline::ColorStateError::IncompatibleFormat { - pipeline, - shader: output.ty, - }, - ) - }, - )?; - } - _ => { - log::info!( - "The fragment stage {:?} output @location({}) values are ignored", - fragment_stage - .as_ref() - .map_or("", |stage| stage.entry_point), - i - ); - } - } - } - } - let last_stage = match desc.fragment { - Some(_) => wgt::ShaderStages::FRAGMENT, - None => wgt::ShaderStages::VERTEX, - }; - if desc.layout.is_none() && !validated_stages.contains(last_stage) { - return Err(pipeline::ImplicitLayoutError::ReflectionError(last_stage).into()); - } - - let pipeline_layout_id = match desc.layout { - Some(id) => id, - None => self.derive_pipeline_layout( - self_id, - implicit_context, - derived_group_layouts, - &mut *bgl_guard, - &mut *pipeline_layout_guard, - )?, - }; - let layout = pipeline_layout_guard - .get(pipeline_layout_id) - .map_err(|_| pipeline::CreateRenderPipelineError::InvalidLayout)?; - - // Multiview is only supported if the feature is enabled - if desc.multiview.is_some() { - self.require_features(wgt::Features::MULTIVIEW)?; - } - - if !self - .downlevel - .flags - .contains(wgt::DownlevelFlags::BUFFER_BINDINGS_NOT_16_BYTE_ALIGNED) - { - for (binding, size) in shader_binding_sizes.iter() { - if size.get() % 16 != 0 { - return Err(pipeline::CreateRenderPipelineError::UnalignedShader { - binding: binding.binding, - group: binding.group, - size: size.get(), - }); - } - } - } - - let late_sized_buffer_groups = - Device::make_late_sized_buffer_groups(&shader_binding_sizes, layout, &*bgl_guard); - - let pipeline_desc = hal::RenderPipelineDescriptor { - label: desc.label.borrow_option(), - layout: &layout.raw, - vertex_buffers: &vertex_buffers, - vertex_stage, - primitive: desc.primitive, - depth_stencil: desc.depth_stencil.clone(), - multisample: desc.multisample, - fragment_stage, - color_targets, - multiview: desc.multiview, - }; - let raw = - unsafe { self.raw.create_render_pipeline(&pipeline_desc) }.map_err( - |err| match err { - hal::PipelineError::Device(error) => { - pipeline::CreateRenderPipelineError::Device(error.into()) - } - hal::PipelineError::Linkage(stage, msg) => { - pipeline::CreateRenderPipelineError::Internal { stage, error: msg } - } - hal::PipelineError::EntryPoint(stage) => { - pipeline::CreateRenderPipelineError::Internal { - stage: hal::auxil::map_naga_stage(stage), - error: EP_FAILURE.to_string(), - } - } - }, - )?; - - let pass_context = RenderPassContext { - attachments: AttachmentData { - colors: color_targets - .iter() - .map(|state| state.as_ref().map(|s| s.format)) - .collect(), - resolves: ArrayVec::new(), - depth_stencil: depth_stencil_state.as_ref().map(|state| state.format), - }, - sample_count: samples, - multiview: desc.multiview, - }; - - let mut flags = pipeline::PipelineFlags::empty(); - for state in color_targets.iter().filter_map(|s| s.as_ref()) { - if let Some(ref bs) = state.blend { - if bs.color.uses_constant() | bs.alpha.uses_constant() { - flags |= pipeline::PipelineFlags::BLEND_CONSTANT; - } - } - } - if let Some(ds) = depth_stencil_state.as_ref() { - if ds.stencil.is_enabled() && ds.stencil.needs_ref_value() { - flags |= pipeline::PipelineFlags::STENCIL_REFERENCE; - } - if !ds.is_depth_read_only() { - flags |= pipeline::PipelineFlags::WRITES_DEPTH; - } - if !ds.is_stencil_read_only(desc.primitive.cull_mode) { - flags |= pipeline::PipelineFlags::WRITES_STENCIL; - } - } - - let pipeline = pipeline::RenderPipeline { - raw, - layout_id: Stored { - value: id::Valid(pipeline_layout_id), - ref_count: layout.life_guard.add_ref(), - }, - device_id: Stored { - value: id::Valid(self_id), - ref_count: self.life_guard.add_ref(), - }, - pass_context, - flags, - strip_index_format: desc.primitive.strip_index_format, - vertex_steps, - late_sized_buffer_groups, - life_guard: LifeGuard::new(desc.label.borrow_or_default()), - }; - Ok(pipeline) - } - - fn describe_format_features( - &self, - adapter: &Adapter, - format: TextureFormat, - ) -> Result { - self.require_features(format.required_features())?; - - let using_device_features = self - .features - .contains(wgt::Features::TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES); - // If we're running downlevel, we need to manually ask the backend what - // we can use as we can't trust WebGPU. - let downlevel = !self.downlevel.is_webgpu_compliant(); - - if using_device_features || downlevel { - Ok(adapter.get_texture_format_features(format)) - } else { - Ok(format.guaranteed_format_features()) - } - } - - fn wait_for_submit( - &self, - submission_index: SubmissionIndex, - token: &mut Token, - ) -> Result<(), WaitIdleError> { - let last_done_index = unsafe { - self.raw - .get_fence_value(&self.fence) - .map_err(DeviceError::from)? - }; - if last_done_index < submission_index { - log::info!("Waiting for submission {:?}", submission_index); - unsafe { - self.raw - .wait(&self.fence, submission_index, !0) - .map_err(DeviceError::from)? - }; - let closures = self - .lock_life(token) - .triage_submissions(submission_index, &self.command_allocator); - assert!( - closures.is_empty(), - "wait_for_submit is not expected to work with closures" - ); - } - Ok(()) - } - - fn create_query_set( - &self, - self_id: id::DeviceId, - desc: &resource::QuerySetDescriptor, - ) -> Result, resource::CreateQuerySetError> { - use resource::CreateQuerySetError as Error; - - match desc.ty { - wgt::QueryType::Occlusion => {} - wgt::QueryType::Timestamp => { - self.require_features(wgt::Features::TIMESTAMP_QUERY)?; - } - wgt::QueryType::PipelineStatistics(..) => { - self.require_features(wgt::Features::PIPELINE_STATISTICS_QUERY)?; - } - } - - if desc.count == 0 { - return Err(Error::ZeroCount); - } - - if desc.count > wgt::QUERY_SET_MAX_QUERIES { - return Err(Error::TooManyQueries { - count: desc.count, - maximum: wgt::QUERY_SET_MAX_QUERIES, - }); - } - - let hal_desc = desc.map_label(super::LabelHelpers::borrow_option); - Ok(resource::QuerySet { - raw: unsafe { self.raw.create_query_set(&hal_desc).unwrap() }, - device_id: Stored { - value: id::Valid(self_id), - ref_count: self.life_guard.add_ref(), - }, - life_guard: LifeGuard::new(""), - desc: desc.map_label(|_| ()), - }) - } -} - -impl Device { - pub(crate) fn destroy_buffer(&self, buffer: resource::Buffer) { - if let Some(raw) = buffer.raw { - unsafe { - self.raw.destroy_buffer(raw); - } - } - } - - pub(crate) fn destroy_command_buffer(&self, cmd_buf: command::CommandBuffer) { - let mut baked = cmd_buf.into_baked(); - unsafe { - baked.encoder.reset_all(baked.list.into_iter()); - } - unsafe { - self.raw.destroy_command_encoder(baked.encoder); - } - } - - /// Wait for idle and remove resources that we can, before we die. - pub(crate) fn prepare_to_die(&mut self) { - self.pending_writes.deactivate(); - let mut life_tracker = self.life_tracker.lock(); - let current_index = self.active_submission_index; - if let Err(error) = unsafe { self.raw.wait(&self.fence, current_index, CLEANUP_WAIT_MS) } { - log::error!("failed to wait for the device: {:?}", error); - } - let _ = life_tracker.triage_submissions(current_index, &self.command_allocator); - life_tracker.cleanup(&self.raw); - #[cfg(feature = "trace")] - { - self.trace = None; - } - } - - pub(crate) fn dispose(self) { - self.pending_writes.dispose(&self.raw); - self.command_allocator.into_inner().dispose(&self.raw); - unsafe { - self.raw.destroy_buffer(self.zero_buffer); - self.raw.destroy_fence(self.fence); - self.raw.exit(self.queue); - } - } -} - -impl crate::hub::Resource for Device { - const TYPE: &'static str = "Device"; - - fn life_guard(&self) -> &LifeGuard { - &self.life_guard - } -} - -#[derive(Clone, Debug, Error)] -#[error("Device is invalid")] -pub struct InvalidDevice; - -#[derive(Clone, Debug, Error)] -pub enum DeviceError { - #[error("Parent device is invalid")] - Invalid, - #[error("Parent device is lost")] - Lost, - #[error("Not enough memory left")] - OutOfMemory, -} - -impl From for DeviceError { - fn from(error: hal::DeviceError) -> Self { - match error { - hal::DeviceError::Lost => DeviceError::Lost, - hal::DeviceError::OutOfMemory => DeviceError::OutOfMemory, - } - } -} - -#[derive(Clone, Debug, Error)] -#[error("Features {0:?} are required but not enabled on the device")] -pub struct MissingFeatures(pub wgt::Features); - -#[derive(Clone, Debug, Error)] -#[error( - "Downlevel flags {0:?} are required but not supported on the device.\n{}", - DOWNLEVEL_ERROR_MESSAGE -)] -pub struct MissingDownlevelFlags(pub wgt::DownlevelFlags); - -#[derive(Clone, Debug)] -#[cfg_attr(feature = "trace", derive(serde::Serialize))] -#[cfg_attr(feature = "replay", derive(serde::Deserialize))] -pub struct ImplicitPipelineContext { - pub root_id: id::PipelineLayoutId, - pub group_ids: ArrayVec, -} - -pub struct ImplicitPipelineIds<'a, G: GlobalIdentityHandlerFactory> { - pub root_id: Input, - pub group_ids: &'a [Input], -} - -impl ImplicitPipelineIds<'_, G> { - fn prepare(self, hub: &Hub) -> ImplicitPipelineContext { - ImplicitPipelineContext { - root_id: hub.pipeline_layouts.prepare(self.root_id).into_id(), - group_ids: self - .group_ids - .iter() - .map(|id_in| hub.bind_group_layouts.prepare(id_in.clone()).into_id()) - .collect(), - } - } -} - -impl Global { - pub fn adapter_is_surface_supported( - &self, - adapter_id: id::AdapterId, - surface_id: id::SurfaceId, - ) -> Result { - let hub = A::hub(self); - let mut token = Token::root(); - - let (surface_guard, mut token) = self.surfaces.read(&mut token); - let (adapter_guard, mut _token) = hub.adapters.read(&mut token); - let adapter = adapter_guard - .get(adapter_id) - .map_err(|_| instance::IsSurfaceSupportedError::InvalidAdapter)?; - let surface = surface_guard - .get(surface_id) - .map_err(|_| instance::IsSurfaceSupportedError::InvalidSurface)?; - Ok(adapter.is_surface_supported(surface)) - } - - pub fn surface_get_capabilities( - &self, - surface_id: id::SurfaceId, - adapter_id: id::AdapterId, - ) -> Result { - profiling::scope!("Surface::get_capabilities"); - self.fetch_adapter_and_surface::(surface_id, adapter_id, |adapter, surface| { - let mut hal_caps = surface.get_capabilities(adapter)?; - - hal_caps.formats.sort_by_key(|f| !f.is_srgb()); - - Ok(wgt::SurfaceCapabilities { - formats: hal_caps.formats, - present_modes: hal_caps.present_modes, - alpha_modes: hal_caps.composite_alpha_modes, - }) - }) - } - - fn fetch_adapter_and_surface< - A: HalApi, - F: FnOnce(&Adapter, &Surface) -> Result, - B, - >( - &self, - surface_id: id::SurfaceId, - adapter_id: id::AdapterId, - get_supported_callback: F, - ) -> Result { - let hub = A::hub(self); - let mut token = Token::root(); - - let (surface_guard, mut token) = self.surfaces.read(&mut token); - let (adapter_guard, mut _token) = hub.adapters.read(&mut token); - let adapter = adapter_guard - .get(adapter_id) - .map_err(|_| instance::GetSurfaceSupportError::InvalidAdapter)?; - let surface = surface_guard - .get(surface_id) - .map_err(|_| instance::GetSurfaceSupportError::InvalidSurface)?; - - get_supported_callback(adapter, surface) - } - - pub fn device_features( - &self, - device_id: id::DeviceId, - ) -> Result { - let hub = A::hub(self); - let mut token = Token::root(); - let (device_guard, _) = hub.devices.read(&mut token); - let device = device_guard.get(device_id).map_err(|_| InvalidDevice)?; - - Ok(device.features) - } - - pub fn device_limits( - &self, - device_id: id::DeviceId, - ) -> Result { - let hub = A::hub(self); - let mut token = Token::root(); - let (device_guard, _) = hub.devices.read(&mut token); - let device = device_guard.get(device_id).map_err(|_| InvalidDevice)?; - - Ok(device.limits.clone()) - } - - pub fn device_downlevel_properties( - &self, - device_id: id::DeviceId, - ) -> Result { - let hub = A::hub(self); - let mut token = Token::root(); - let (device_guard, _) = hub.devices.read(&mut token); - let device = device_guard.get(device_id).map_err(|_| InvalidDevice)?; - - Ok(device.downlevel.clone()) - } - - pub fn device_create_buffer( - &self, - device_id: id::DeviceId, - desc: &resource::BufferDescriptor, - id_in: Input, - ) -> (id::BufferId, Option) { - profiling::scope!("Device::create_buffer"); - - let hub = A::hub(self); - let mut token = Token::root(); - let fid = hub.buffers.prepare(id_in); - - let (device_guard, mut token) = hub.devices.read(&mut token); - let error = loop { - let device = match device_guard.get(device_id) { - Ok(device) => device, - Err(_) => break DeviceError::Invalid.into(), - }; - #[cfg(feature = "trace")] - if let Some(ref trace) = device.trace { - let mut desc = desc.clone(); - let mapped_at_creation = mem::replace(&mut desc.mapped_at_creation, false); - if mapped_at_creation && !desc.usage.contains(wgt::BufferUsages::MAP_WRITE) { - desc.usage |= wgt::BufferUsages::COPY_DST; - } - trace - .lock() - .add(trace::Action::CreateBuffer(fid.id(), desc)); - } - - let mut buffer = match device.create_buffer(device_id, desc, false) { - Ok(buffer) => buffer, - Err(e) => break e, - }; - let ref_count = buffer.life_guard.add_ref(); - - let buffer_use = if !desc.mapped_at_creation { - hal::BufferUses::empty() - } else if desc.usage.contains(wgt::BufferUsages::MAP_WRITE) { - // buffer is mappable, so we are just doing that at start - let map_size = buffer.size; - let ptr = if map_size == 0 { - std::ptr::NonNull::dangling() - } else { - match map_buffer(&device.raw, &mut buffer, 0, map_size, HostMap::Write) { - Ok(ptr) => ptr, - Err(e) => { - let raw = buffer.raw.unwrap(); - device.lock_life(&mut token).schedule_resource_destruction( - queue::TempResource::Buffer(raw), - !0, - ); - break e.into(); - } - } - }; - buffer.map_state = resource::BufferMapState::Active { - ptr, - range: 0..map_size, - host: HostMap::Write, - }; - hal::BufferUses::MAP_WRITE - } else { - // buffer needs staging area for initialization only - let stage_desc = wgt::BufferDescriptor { - label: Some(Cow::Borrowed( - "(wgpu internal) initializing unmappable buffer", - )), - size: desc.size, - usage: wgt::BufferUsages::MAP_WRITE | wgt::BufferUsages::COPY_SRC, - mapped_at_creation: false, - }; - let mut stage = match device.create_buffer(device_id, &stage_desc, true) { - Ok(stage) => stage, - Err(e) => { - let raw = buffer.raw.unwrap(); - device - .lock_life(&mut token) - .schedule_resource_destruction(queue::TempResource::Buffer(raw), !0); - break e; - } - }; - let stage_buffer = stage.raw.unwrap(); - let mapping = match unsafe { device.raw.map_buffer(&stage_buffer, 0..stage.size) } { - Ok(mapping) => mapping, - Err(e) => { - let raw = buffer.raw.unwrap(); - let mut life_lock = device.lock_life(&mut token); - life_lock - .schedule_resource_destruction(queue::TempResource::Buffer(raw), !0); - life_lock.schedule_resource_destruction( - queue::TempResource::Buffer(stage_buffer), - !0, - ); - break DeviceError::from(e).into(); - } - }; - - assert_eq!(buffer.size % wgt::COPY_BUFFER_ALIGNMENT, 0); - // Zero initialize memory and then mark both staging and buffer as initialized - // (it's guaranteed that this is the case by the time the buffer is usable) - unsafe { ptr::write_bytes(mapping.ptr.as_ptr(), 0, buffer.size as usize) }; - buffer.initialization_status.drain(0..buffer.size); - stage.initialization_status.drain(0..buffer.size); - - buffer.map_state = resource::BufferMapState::Init { - ptr: mapping.ptr, - needs_flush: !mapping.is_coherent, - stage_buffer, - }; - hal::BufferUses::COPY_DST - }; - - let id = fid.assign(buffer, &mut token); - log::info!("Created buffer {:?} with {:?}", id, desc); - - device - .trackers - .lock() - .buffers - .insert_single(id, ref_count, buffer_use); - - return (id.0, None); - }; - - let id = fid.assign_error(desc.label.borrow_or_default(), &mut token); - (id, Some(error)) - } - - /// Assign `id_in` an error with the given `label`. - /// - /// Ensure that future attempts to use `id_in` as a buffer ID will propagate - /// the error, following the WebGPU ["contagious invalidity"] style. - /// - /// Firefox uses this function to comply strictly with the WebGPU spec, - /// which requires [`GPUBufferDescriptor`] validation to be generated on the - /// Device timeline and leave the newly created [`GPUBuffer`] invalid. - /// - /// Ideally, we would simply let [`device_create_buffer`] take care of all - /// of this, but some errors must be detected before we can even construct a - /// [`wgpu_types::BufferDescriptor`] to give it. For example, the WebGPU API - /// allows a `GPUBufferDescriptor`'s [`usage`] property to be any WebIDL - /// `unsigned long` value, but we can't construct a - /// [`wgpu_types::BufferUsages`] value from values with unassigned bits - /// set. This means we must validate `usage` before we can call - /// `device_create_buffer`. - /// - /// When that validation fails, we must arrange for the buffer id to be - /// considered invalid. This method provides the means to do so. - /// - /// ["contagious invalidity"]: https://www.w3.org/TR/webgpu/#invalidity - /// [`GPUBufferDescriptor`]: https://www.w3.org/TR/webgpu/#dictdef-gpubufferdescriptor - /// [`GPUBuffer`]: https://www.w3.org/TR/webgpu/#gpubuffer - /// [`wgpu_types::BufferDescriptor`]: wgt::BufferDescriptor - /// [`device_create_buffer`]: Global::device_create_buffer - /// [`usage`]: https://www.w3.org/TR/webgpu/#dom-gputexturedescriptor-usage - /// [`wgpu_types::BufferUsages`]: wgt::BufferUsages - pub fn create_buffer_error(&self, id_in: Input, label: Label) { - let hub = A::hub(self); - let mut token = Token::root(); - let fid = hub.buffers.prepare(id_in); - - let (_, mut token) = hub.devices.read(&mut token); - fid.assign_error(label.borrow_or_default(), &mut token); - } - - /// Assign `id_in` an error with the given `label`. - /// - /// See `create_buffer_error` for more context and explaination. - pub fn create_texture_error(&self, id_in: Input, label: Label) { - let hub = A::hub(self); - let mut token = Token::root(); - let fid = hub.textures.prepare(id_in); - - let (_, mut token) = hub.devices.read(&mut token); - fid.assign_error(label.borrow_or_default(), &mut token); - } - - #[cfg(feature = "replay")] - pub fn device_wait_for_buffer( - &self, - device_id: id::DeviceId, - buffer_id: id::BufferId, - ) -> Result<(), WaitIdleError> { - let hub = A::hub(self); - let mut token = Token::root(); - let (device_guard, mut token) = hub.devices.read(&mut token); - let last_submission = { - let (buffer_guard, _) = hub.buffers.write(&mut token); - match buffer_guard.get(buffer_id) { - Ok(buffer) => buffer.life_guard.life_count(), - Err(_) => return Ok(()), - } - }; - - device_guard - .get(device_id) - .map_err(|_| DeviceError::Invalid)? - .wait_for_submit(last_submission, &mut token) - } - - #[doc(hidden)] - pub fn device_set_buffer_sub_data( - &self, - device_id: id::DeviceId, - buffer_id: id::BufferId, - offset: BufferAddress, - data: &[u8], - ) -> BufferAccessResult { - profiling::scope!("Device::set_buffer_sub_data"); - - let hub = A::hub(self); - let mut token = Token::root(); - - let (device_guard, mut token) = hub.devices.read(&mut token); - let (mut buffer_guard, _) = hub.buffers.write(&mut token); - let device = device_guard - .get(device_id) - .map_err(|_| DeviceError::Invalid)?; - let buffer = buffer_guard - .get_mut(buffer_id) - .map_err(|_| BufferAccessError::Invalid)?; - check_buffer_usage(buffer.usage, wgt::BufferUsages::MAP_WRITE)?; - //assert!(buffer isn't used by the GPU); - - #[cfg(feature = "trace")] - if let Some(ref trace) = device.trace { - let mut trace = trace.lock(); - let data_path = trace.make_binary("bin", data); - trace.add(trace::Action::WriteBuffer { - id: buffer_id, - data: data_path, - range: offset..offset + data.len() as BufferAddress, - queued: false, - }); - } - - let raw_buf = buffer.raw.as_ref().unwrap(); - unsafe { - let mapping = device - .raw - .map_buffer(raw_buf, offset..offset + data.len() as u64) - .map_err(DeviceError::from)?; - ptr::copy_nonoverlapping(data.as_ptr(), mapping.ptr.as_ptr(), data.len()); - if !mapping.is_coherent { - device - .raw - .flush_mapped_ranges(raw_buf, iter::once(offset..offset + data.len() as u64)); - } - device - .raw - .unmap_buffer(raw_buf) - .map_err(DeviceError::from)?; - } - - Ok(()) - } - - #[doc(hidden)] - pub fn device_get_buffer_sub_data( - &self, - device_id: id::DeviceId, - buffer_id: id::BufferId, - offset: BufferAddress, - data: &mut [u8], - ) -> BufferAccessResult { - profiling::scope!("Device::get_buffer_sub_data"); - - let hub = A::hub(self); - let mut token = Token::root(); - - let (device_guard, mut token) = hub.devices.read(&mut token); - let (mut buffer_guard, _) = hub.buffers.write(&mut token); - let device = device_guard - .get(device_id) - .map_err(|_| DeviceError::Invalid)?; - let buffer = buffer_guard - .get_mut(buffer_id) - .map_err(|_| BufferAccessError::Invalid)?; - check_buffer_usage(buffer.usage, wgt::BufferUsages::MAP_READ)?; - //assert!(buffer isn't used by the GPU); - - let raw_buf = buffer.raw.as_ref().unwrap(); - unsafe { - let mapping = device - .raw - .map_buffer(raw_buf, offset..offset + data.len() as u64) - .map_err(DeviceError::from)?; - if !mapping.is_coherent { - device.raw.invalidate_mapped_ranges( - raw_buf, - iter::once(offset..offset + data.len() as u64), - ); - } - ptr::copy_nonoverlapping(mapping.ptr.as_ptr(), data.as_mut_ptr(), data.len()); - device - .raw - .unmap_buffer(raw_buf) - .map_err(DeviceError::from)?; - } - - Ok(()) - } - - pub fn buffer_label(&self, id: id::BufferId) -> String { - A::hub(self).buffers.label_for_resource(id) - } - - pub fn buffer_destroy( - &self, - buffer_id: id::BufferId, - ) -> Result<(), resource::DestroyError> { - profiling::scope!("Buffer::destroy"); - - let map_closure; - // Restrict the locks to this scope. - { - let hub = A::hub(self); - let mut token = Token::root(); - - //TODO: lock pending writes separately, keep the device read-only - let (mut device_guard, mut token) = hub.devices.write(&mut token); - - log::info!("Buffer {:?} is destroyed", buffer_id); - let (mut buffer_guard, _) = hub.buffers.write(&mut token); - let buffer = buffer_guard - .get_mut(buffer_id) - .map_err(|_| resource::DestroyError::Invalid)?; - - let device = &mut device_guard[buffer.device_id.value]; - - map_closure = match &buffer.map_state { - &BufferMapState::Waiting(..) // To get the proper callback behavior. - | &BufferMapState::Init { .. } - | &BufferMapState::Active { .. } - => { - self.buffer_unmap_inner(buffer_id, buffer, device) - .unwrap_or(None) - } - _ => None, - }; - - #[cfg(feature = "trace")] - if let Some(ref trace) = device.trace { - trace.lock().add(trace::Action::FreeBuffer(buffer_id)); - } - - let raw = buffer - .raw - .take() - .ok_or(resource::DestroyError::AlreadyDestroyed)?; - let temp = queue::TempResource::Buffer(raw); - - if device.pending_writes.dst_buffers.contains(&buffer_id) { - device.pending_writes.temp_resources.push(temp); - } else { - let last_submit_index = buffer.life_guard.life_count(); - drop(buffer_guard); - device - .lock_life(&mut token) - .schedule_resource_destruction(temp, last_submit_index); - } - } - - // Note: outside the scope where locks are held when calling the callback - if let Some((operation, status)) = map_closure { - operation.callback.call(status); - } - - Ok(()) - } - - pub fn buffer_drop(&self, buffer_id: id::BufferId, wait: bool) { - profiling::scope!("Buffer::drop"); - log::debug!("buffer {:?} is dropped", buffer_id); - - let hub = A::hub(self); - let mut token = Token::root(); - - let (ref_count, last_submit_index, device_id) = { - let (mut buffer_guard, _) = hub.buffers.write(&mut token); - match buffer_guard.get_mut(buffer_id) { - Ok(buffer) => { - let ref_count = buffer.life_guard.ref_count.take().unwrap(); - let last_submit_index = buffer.life_guard.life_count(); - (ref_count, last_submit_index, buffer.device_id.value) - } - Err(InvalidId) => { - hub.buffers.unregister_locked(buffer_id, &mut *buffer_guard); - return; - } - } - }; - - let (device_guard, mut token) = hub.devices.read(&mut token); - let device = &device_guard[device_id]; - { - let mut life_lock = device.lock_life(&mut token); - if device.pending_writes.dst_buffers.contains(&buffer_id) { - life_lock.future_suspected_buffers.push(Stored { - value: id::Valid(buffer_id), - ref_count, - }); - } else { - drop(ref_count); - life_lock - .suspected_resources - .buffers - .push(id::Valid(buffer_id)); - } - } - - if wait { - match device.wait_for_submit(last_submit_index, &mut token) { - Ok(()) => (), - Err(e) => log::error!("Failed to wait for buffer {:?}: {:?}", buffer_id, e), - } - } - } - - pub fn device_create_texture( - &self, - device_id: id::DeviceId, - desc: &resource::TextureDescriptor, - id_in: Input, - ) -> (id::TextureId, Option) { - profiling::scope!("Device::create_texture"); - - let hub = A::hub(self); - let mut token = Token::root(); - let fid = hub.textures.prepare(id_in); - - let (adapter_guard, mut token) = hub.adapters.read(&mut token); - let (device_guard, mut token) = hub.devices.read(&mut token); - let error = loop { - let device = match device_guard.get(device_id) { - Ok(device) => device, - Err(_) => break DeviceError::Invalid.into(), - }; - #[cfg(feature = "trace")] - if let Some(ref trace) = device.trace { - trace - .lock() - .add(trace::Action::CreateTexture(fid.id(), desc.clone())); - } - - let adapter = &adapter_guard[device.adapter_id.value]; - let texture = match device.create_texture(device_id, adapter, desc) { - Ok(texture) => texture, - Err(error) => break error, - }; - let ref_count = texture.life_guard.add_ref(); - - let id = fid.assign(texture, &mut token); - log::info!("Created texture {:?} with {:?}", id, desc); - - device.trackers.lock().textures.insert_single( - id.0, - ref_count, - hal::TextureUses::UNINITIALIZED, - ); - - return (id.0, None); - }; - - let id = fid.assign_error(desc.label.borrow_or_default(), &mut token); - (id, Some(error)) - } - - /// # Safety - /// - /// - `hal_texture` must be created from `device_id` corresponding raw handle. - /// - `hal_texture` must be created respecting `desc` - /// - `hal_texture` must be initialized - pub unsafe fn create_texture_from_hal( - &self, - hal_texture: A::Texture, - device_id: id::DeviceId, - desc: &resource::TextureDescriptor, - id_in: Input, - ) -> (id::TextureId, Option) { - profiling::scope!("Device::create_texture"); - - let hub = A::hub(self); - let mut token = Token::root(); - let fid = hub.textures.prepare(id_in); - - let (adapter_guard, mut token) = hub.adapters.read(&mut token); - let (device_guard, mut token) = hub.devices.read(&mut token); - let error = loop { - let device = match device_guard.get(device_id) { - Ok(device) => device, - Err(_) => break DeviceError::Invalid.into(), - }; - - // NB: Any change done through the raw texture handle will not be - // recorded in the replay - #[cfg(feature = "trace")] - if let Some(ref trace) = device.trace { - trace - .lock() - .add(trace::Action::CreateTexture(fid.id(), desc.clone())); - } - - let adapter = &adapter_guard[device.adapter_id.value]; - - let format_features = match device - .describe_format_features(adapter, desc.format) - .map_err(|error| resource::CreateTextureError::MissingFeatures(desc.format, error)) - { - Ok(features) => features, - Err(error) => break error, - }; - - let mut texture = device.create_texture_from_hal( - hal_texture, - conv::map_texture_usage(desc.usage, desc.format.into()), - device_id, - desc, - format_features, - resource::TextureClearMode::None, - ); - if desc.usage.contains(wgt::TextureUsages::COPY_DST) { - texture.hal_usage |= hal::TextureUses::COPY_DST; - } - - texture.initialization_status = TextureInitTracker::new(desc.mip_level_count, 0); - - let ref_count = texture.life_guard.add_ref(); - - let id = fid.assign(texture, &mut token); - log::info!("Created texture {:?} with {:?}", id, desc); - - device.trackers.lock().textures.insert_single( - id.0, - ref_count, - hal::TextureUses::UNINITIALIZED, - ); - - return (id.0, None); - }; - - let id = fid.assign_error(desc.label.borrow_or_default(), &mut token); - (id, Some(error)) - } - - pub fn texture_label(&self, id: id::TextureId) -> String { - A::hub(self).textures.label_for_resource(id) - } - - pub fn texture_destroy( - &self, - texture_id: id::TextureId, - ) -> Result<(), resource::DestroyError> { - profiling::scope!("Texture::destroy"); - - let hub = A::hub(self); - let mut token = Token::root(); - - //TODO: lock pending writes separately, keep the device read-only - let (mut device_guard, mut token) = hub.devices.write(&mut token); - - log::info!("Buffer {:?} is destroyed", texture_id); - let (mut texture_guard, _) = hub.textures.write(&mut token); - let texture = texture_guard - .get_mut(texture_id) - .map_err(|_| resource::DestroyError::Invalid)?; - - let device = &mut device_guard[texture.device_id.value]; - - #[cfg(feature = "trace")] - if let Some(ref trace) = device.trace { - trace.lock().add(trace::Action::FreeTexture(texture_id)); - } - - let last_submit_index = texture.life_guard.life_count(); - - let clear_views = - match std::mem::replace(&mut texture.clear_mode, resource::TextureClearMode::None) { - resource::TextureClearMode::BufferCopy => SmallVec::new(), - resource::TextureClearMode::RenderPass { clear_views, .. } => clear_views, - resource::TextureClearMode::None => SmallVec::new(), - }; - - match texture.inner { - resource::TextureInner::Native { ref mut raw } => { - let raw = raw.take().ok_or(resource::DestroyError::AlreadyDestroyed)?; - let temp = queue::TempResource::Texture(raw, clear_views); - - if device.pending_writes.dst_textures.contains(&texture_id) { - device.pending_writes.temp_resources.push(temp); - } else { - drop(texture_guard); - device - .lock_life(&mut token) - .schedule_resource_destruction(temp, last_submit_index); - } - } - resource::TextureInner::Surface { .. } => { - for clear_view in clear_views { - unsafe { - device.raw.destroy_texture_view(clear_view); - } - } - // TODO? - } - } - - Ok(()) - } - - pub fn texture_drop(&self, texture_id: id::TextureId, wait: bool) { - profiling::scope!("Texture::drop"); - log::debug!("texture {:?} is dropped", texture_id); - - let hub = A::hub(self); - let mut token = Token::root(); - - let (ref_count, last_submit_index, device_id) = { - let (mut texture_guard, _) = hub.textures.write(&mut token); - match texture_guard.get_mut(texture_id) { - Ok(texture) => { - let ref_count = texture.life_guard.ref_count.take().unwrap(); - let last_submit_index = texture.life_guard.life_count(); - (ref_count, last_submit_index, texture.device_id.value) - } - Err(InvalidId) => { - hub.textures - .unregister_locked(texture_id, &mut *texture_guard); - return; - } - } - }; - - let (device_guard, mut token) = hub.devices.read(&mut token); - let device = &device_guard[device_id]; - { - let mut life_lock = device.lock_life(&mut token); - if device.pending_writes.dst_textures.contains(&texture_id) { - life_lock.future_suspected_textures.push(Stored { - value: id::Valid(texture_id), - ref_count, - }); - } else { - drop(ref_count); - life_lock - .suspected_resources - .textures - .push(id::Valid(texture_id)); - } - } - - if wait { - match device.wait_for_submit(last_submit_index, &mut token) { - Ok(()) => (), - Err(e) => log::error!("Failed to wait for texture {:?}: {:?}", texture_id, e), - } - } - } - - pub fn texture_create_view( - &self, - texture_id: id::TextureId, - desc: &resource::TextureViewDescriptor, - id_in: Input, - ) -> (id::TextureViewId, Option) { - profiling::scope!("Texture::create_view"); - - let hub = A::hub(self); - let mut token = Token::root(); - let fid = hub.texture_views.prepare(id_in); - - let (device_guard, mut token) = hub.devices.read(&mut token); - let (texture_guard, mut token) = hub.textures.read(&mut token); - let error = loop { - let texture = match texture_guard.get(texture_id) { - Ok(texture) => texture, - Err(_) => break resource::CreateTextureViewError::InvalidTexture, - }; - let device = &device_guard[texture.device_id.value]; - #[cfg(feature = "trace")] - if let Some(ref trace) = device.trace { - trace.lock().add(trace::Action::CreateTextureView { - id: fid.id(), - parent_id: texture_id, - desc: desc.clone(), - }); - } - - let view = match device.create_texture_view(texture, texture_id, desc) { - Ok(view) => view, - Err(e) => break e, - }; - let ref_count = view.life_guard.add_ref(); - let id = fid.assign(view, &mut token); - - device.trackers.lock().views.insert_single(id, ref_count); - return (id.0, None); - }; - - let id = fid.assign_error(desc.label.borrow_or_default(), &mut token); - (id, Some(error)) - } - - pub fn texture_view_label(&self, id: id::TextureViewId) -> String { - A::hub(self).texture_views.label_for_resource(id) - } - - pub fn texture_view_drop( - &self, - texture_view_id: id::TextureViewId, - wait: bool, - ) -> Result<(), resource::TextureViewDestroyError> { - profiling::scope!("TextureView::drop"); - log::debug!("texture view {:?} is dropped", texture_view_id); - - let hub = A::hub(self); - let mut token = Token::root(); - - let (last_submit_index, device_id) = { - let (mut texture_view_guard, _) = hub.texture_views.write(&mut token); - - match texture_view_guard.get_mut(texture_view_id) { - Ok(view) => { - let _ref_count = view.life_guard.ref_count.take(); - let last_submit_index = view.life_guard.life_count(); - (last_submit_index, view.device_id.value) - } - Err(InvalidId) => { - hub.texture_views - .unregister_locked(texture_view_id, &mut *texture_view_guard); - return Ok(()); - } - } - }; - - let (device_guard, mut token) = hub.devices.read(&mut token); - let device = &device_guard[device_id]; - device - .lock_life(&mut token) - .suspected_resources - .texture_views - .push(id::Valid(texture_view_id)); - - if wait { - match device.wait_for_submit(last_submit_index, &mut token) { - Ok(()) => (), - Err(e) => log::error!( - "Failed to wait for texture view {:?}: {:?}", - texture_view_id, - e - ), - } - } - Ok(()) - } - - pub fn device_create_sampler( - &self, - device_id: id::DeviceId, - desc: &resource::SamplerDescriptor, - id_in: Input, - ) -> (id::SamplerId, Option) { - profiling::scope!("Device::create_sampler"); - - let hub = A::hub(self); - let mut token = Token::root(); - let fid = hub.samplers.prepare(id_in); - - let (device_guard, mut token) = hub.devices.read(&mut token); - let error = loop { - let device = match device_guard.get(device_id) { - Ok(device) => device, - Err(_) => break DeviceError::Invalid.into(), - }; - #[cfg(feature = "trace")] - if let Some(ref trace) = device.trace { - trace - .lock() - .add(trace::Action::CreateSampler(fid.id(), desc.clone())); - } - - let sampler = match device.create_sampler(device_id, desc) { - Ok(sampler) => sampler, - Err(e) => break e, - }; - let ref_count = sampler.life_guard.add_ref(); - let id = fid.assign(sampler, &mut token); - - device.trackers.lock().samplers.insert_single(id, ref_count); - - return (id.0, None); - }; - - let id = fid.assign_error(desc.label.borrow_or_default(), &mut token); - (id, Some(error)) - } - - pub fn sampler_label(&self, id: id::SamplerId) -> String { - A::hub(self).samplers.label_for_resource(id) - } - - pub fn sampler_drop(&self, sampler_id: id::SamplerId) { - profiling::scope!("Sampler::drop"); - log::debug!("sampler {:?} is dropped", sampler_id); - - let hub = A::hub(self); - let mut token = Token::root(); - - let device_id = { - let (mut sampler_guard, _) = hub.samplers.write(&mut token); - match sampler_guard.get_mut(sampler_id) { - Ok(sampler) => { - sampler.life_guard.ref_count.take(); - sampler.device_id.value - } - Err(InvalidId) => { - hub.samplers - .unregister_locked(sampler_id, &mut *sampler_guard); - return; - } - } - }; - - let (device_guard, mut token) = hub.devices.read(&mut token); - device_guard[device_id] - .lock_life(&mut token) - .suspected_resources - .samplers - .push(id::Valid(sampler_id)); - } - - pub fn device_create_bind_group_layout( - &self, - device_id: id::DeviceId, - desc: &binding_model::BindGroupLayoutDescriptor, - id_in: Input, - ) -> ( - id::BindGroupLayoutId, - Option, - ) { - profiling::scope!("Device::create_bind_group_layout"); - - let mut token = Token::root(); - let hub = A::hub(self); - let fid = hub.bind_group_layouts.prepare(id_in); - - let error = 'outer: loop { - let (device_guard, mut token) = hub.devices.read(&mut token); - let device = match device_guard.get(device_id) { - Ok(device) => device, - Err(_) => break DeviceError::Invalid.into(), - }; - #[cfg(feature = "trace")] - if let Some(ref trace) = device.trace { - trace - .lock() - .add(trace::Action::CreateBindGroupLayout(fid.id(), desc.clone())); - } - - let mut entry_map = FastHashMap::default(); - for entry in desc.entries.iter() { - if entry.binding > device.limits.max_bindings_per_bind_group { - break 'outer binding_model::CreateBindGroupLayoutError::InvalidBindingIndex { - binding: entry.binding, - maximum: device.limits.max_bindings_per_bind_group, - }; - } - if entry_map.insert(entry.binding, *entry).is_some() { - break 'outer binding_model::CreateBindGroupLayoutError::ConflictBinding( - entry.binding, - ); - } - } - - // If there is an equivalent BGL, just bump the refcount and return it. - // This is only applicable for identity filters that are generating new IDs, - // so their inputs are `PhantomData` of size 0. - if mem::size_of::>() == 0 { - let (bgl_guard, _) = hub.bind_group_layouts.read(&mut token); - if let Some(id) = - Device::deduplicate_bind_group_layout(device_id, &entry_map, &*bgl_guard) - { - return (id, None); - } - } - - let layout = match device.create_bind_group_layout( - device_id, - desc.label.borrow_option(), - entry_map, - ) { - Ok(layout) => layout, - Err(e) => break e, - }; - - let id = fid.assign(layout, &mut token); - return (id.0, None); - }; - - let id = fid.assign_error(desc.label.borrow_or_default(), &mut token); - (id, Some(error)) - } - - pub fn bind_group_layout_label(&self, id: id::BindGroupLayoutId) -> String { - A::hub(self).bind_group_layouts.label_for_resource(id) - } - - pub fn bind_group_layout_drop(&self, bind_group_layout_id: id::BindGroupLayoutId) { - profiling::scope!("BindGroupLayout::drop"); - log::debug!("bind group layout {:?} is dropped", bind_group_layout_id); - - let hub = A::hub(self); - let mut token = Token::root(); - let device_id = { - let (mut bind_group_layout_guard, _) = hub.bind_group_layouts.write(&mut token); - match bind_group_layout_guard.get_mut(bind_group_layout_id) { - Ok(layout) => layout.device_id.value, - Err(InvalidId) => { - hub.bind_group_layouts - .unregister_locked(bind_group_layout_id, &mut *bind_group_layout_guard); - return; - } - } - }; - - let (device_guard, mut token) = hub.devices.read(&mut token); - device_guard[device_id] - .lock_life(&mut token) - .suspected_resources - .bind_group_layouts - .push(id::Valid(bind_group_layout_id)); - } - - pub fn device_create_pipeline_layout( - &self, - device_id: id::DeviceId, - desc: &binding_model::PipelineLayoutDescriptor, - id_in: Input, - ) -> ( - id::PipelineLayoutId, - Option, - ) { - profiling::scope!("Device::create_pipeline_layout"); - - let hub = A::hub(self); - let mut token = Token::root(); - let fid = hub.pipeline_layouts.prepare(id_in); - - let (device_guard, mut token) = hub.devices.read(&mut token); - let error = loop { - let device = match device_guard.get(device_id) { - Ok(device) => device, - Err(_) => break DeviceError::Invalid.into(), - }; - #[cfg(feature = "trace")] - if let Some(ref trace) = device.trace { - trace - .lock() - .add(trace::Action::CreatePipelineLayout(fid.id(), desc.clone())); - } - - let layout = { - let (bgl_guard, _) = hub.bind_group_layouts.read(&mut token); - match device.create_pipeline_layout(device_id, desc, &*bgl_guard) { - Ok(layout) => layout, - Err(e) => break e, - } - }; - - let id = fid.assign(layout, &mut token); - return (id.0, None); - }; - - let id = fid.assign_error(desc.label.borrow_or_default(), &mut token); - (id, Some(error)) - } - - pub fn pipeline_layout_label(&self, id: id::PipelineLayoutId) -> String { - A::hub(self).pipeline_layouts.label_for_resource(id) - } - - pub fn pipeline_layout_drop(&self, pipeline_layout_id: id::PipelineLayoutId) { - profiling::scope!("PipelineLayout::drop"); - log::debug!("pipeline layout {:?} is dropped", pipeline_layout_id); - - let hub = A::hub(self); - let mut token = Token::root(); - let (device_id, ref_count) = { - let (mut pipeline_layout_guard, _) = hub.pipeline_layouts.write(&mut token); - match pipeline_layout_guard.get_mut(pipeline_layout_id) { - Ok(layout) => ( - layout.device_id.value, - layout.life_guard.ref_count.take().unwrap(), - ), - Err(InvalidId) => { - hub.pipeline_layouts - .unregister_locked(pipeline_layout_id, &mut *pipeline_layout_guard); - return; - } - } - }; - - let (device_guard, mut token) = hub.devices.read(&mut token); - device_guard[device_id] - .lock_life(&mut token) - .suspected_resources - .pipeline_layouts - .push(Stored { - value: id::Valid(pipeline_layout_id), - ref_count, - }); - } - - pub fn device_create_bind_group( - &self, - device_id: id::DeviceId, - desc: &binding_model::BindGroupDescriptor, - id_in: Input, - ) -> (id::BindGroupId, Option) { - profiling::scope!("Device::create_bind_group"); - - let hub = A::hub(self); - let mut token = Token::root(); - let fid = hub.bind_groups.prepare(id_in); - - let (device_guard, mut token) = hub.devices.read(&mut token); - let (bind_group_layout_guard, mut token) = hub.bind_group_layouts.read(&mut token); - - let error = loop { - let device = match device_guard.get(device_id) { - Ok(device) => device, - Err(_) => break DeviceError::Invalid.into(), - }; - #[cfg(feature = "trace")] - if let Some(ref trace) = device.trace { - trace - .lock() - .add(trace::Action::CreateBindGroup(fid.id(), desc.clone())); - } - - let bind_group_layout = match bind_group_layout_guard.get(desc.layout) { - Ok(layout) => layout, - Err(_) => break binding_model::CreateBindGroupError::InvalidLayout, - }; - let bind_group = - match device.create_bind_group(device_id, bind_group_layout, desc, hub, &mut token) - { - Ok(bind_group) => bind_group, - Err(e) => break e, - }; - let ref_count = bind_group.life_guard.add_ref(); - - let id = fid.assign(bind_group, &mut token); - log::debug!("Bind group {:?}", id,); - - device - .trackers - .lock() - .bind_groups - .insert_single(id, ref_count); - return (id.0, None); - }; - - let id = fid.assign_error(desc.label.borrow_or_default(), &mut token); - (id, Some(error)) - } - - pub fn bind_group_label(&self, id: id::BindGroupId) -> String { - A::hub(self).bind_groups.label_for_resource(id) - } - - pub fn bind_group_drop(&self, bind_group_id: id::BindGroupId) { - profiling::scope!("BindGroup::drop"); - log::debug!("bind group {:?} is dropped", bind_group_id); - - let hub = A::hub(self); - let mut token = Token::root(); - - let device_id = { - let (mut bind_group_guard, _) = hub.bind_groups.write(&mut token); - match bind_group_guard.get_mut(bind_group_id) { - Ok(bind_group) => { - bind_group.life_guard.ref_count.take(); - bind_group.device_id.value - } - Err(InvalidId) => { - hub.bind_groups - .unregister_locked(bind_group_id, &mut *bind_group_guard); - return; - } - } - }; - - let (device_guard, mut token) = hub.devices.read(&mut token); - device_guard[device_id] - .lock_life(&mut token) - .suspected_resources - .bind_groups - .push(id::Valid(bind_group_id)); - } - - pub fn device_create_shader_module( - &self, - device_id: id::DeviceId, - desc: &pipeline::ShaderModuleDescriptor, - source: pipeline::ShaderModuleSource, - id_in: Input, - ) -> ( - id::ShaderModuleId, - Option, - ) { - profiling::scope!("Device::create_shader_module"); - - let hub = A::hub(self); - let mut token = Token::root(); - let fid = hub.shader_modules.prepare(id_in); - - let (device_guard, mut token) = hub.devices.read(&mut token); - let error = loop { - let device = match device_guard.get(device_id) { - Ok(device) => device, - Err(_) => break DeviceError::Invalid.into(), - }; - #[cfg(feature = "trace")] - if let Some(ref trace) = device.trace { - let mut trace = trace.lock(); - let data = match source { - #[cfg(feature = "wgsl")] - pipeline::ShaderModuleSource::Wgsl(ref code) => { - trace.make_binary("wgsl", code.as_bytes()) - } - pipeline::ShaderModuleSource::Naga(ref module) => { - let string = - ron::ser::to_string_pretty(module, ron::ser::PrettyConfig::default()) - .unwrap(); - trace.make_binary("ron", string.as_bytes()) - } - pipeline::ShaderModuleSource::Dummy(_) => { - panic!("found `ShaderModuleSource::Dummy`") - } - }; - trace.add(trace::Action::CreateShaderModule { - id: fid.id(), - desc: desc.clone(), - data, - }); - }; - - let shader = match device.create_shader_module(device_id, desc, source) { - Ok(shader) => shader, - Err(e) => break e, - }; - let id = fid.assign(shader, &mut token); - return (id.0, None); - }; - - let id = fid.assign_error(desc.label.borrow_or_default(), &mut token); - (id, Some(error)) - } - - // Unsafe-ness of internal calls has little to do with unsafe-ness of this. - #[allow(unused_unsafe)] - /// # Safety - /// - /// This function passes SPIR-V binary to the backend as-is and can potentially result in a - /// driver crash. - pub unsafe fn device_create_shader_module_spirv( - &self, - device_id: id::DeviceId, - desc: &pipeline::ShaderModuleDescriptor, - source: Cow<[u32]>, - id_in: Input, - ) -> ( - id::ShaderModuleId, - Option, - ) { - profiling::scope!("Device::create_shader_module"); - - let hub = A::hub(self); - let mut token = Token::root(); - let fid = hub.shader_modules.prepare(id_in); - - let (device_guard, mut token) = hub.devices.read(&mut token); - let error = loop { - let device = match device_guard.get(device_id) { - Ok(device) => device, - Err(_) => break DeviceError::Invalid.into(), - }; - #[cfg(feature = "trace")] - if let Some(ref trace) = device.trace { - let mut trace = trace.lock(); - let data = trace.make_binary("spv", unsafe { - std::slice::from_raw_parts(source.as_ptr() as *const u8, source.len() * 4) - }); - trace.add(trace::Action::CreateShaderModule { - id: fid.id(), - desc: desc.clone(), - data, - }); - }; - - let shader = - match unsafe { device.create_shader_module_spirv(device_id, desc, &source) } { - Ok(shader) => shader, - Err(e) => break e, - }; - let id = fid.assign(shader, &mut token); - return (id.0, None); - }; - - let id = fid.assign_error(desc.label.borrow_or_default(), &mut token); - (id, Some(error)) - } - - pub fn shader_module_label(&self, id: id::ShaderModuleId) -> String { - A::hub(self).shader_modules.label_for_resource(id) - } - - pub fn shader_module_drop(&self, shader_module_id: id::ShaderModuleId) { - profiling::scope!("ShaderModule::drop"); - log::debug!("shader module {:?} is dropped", shader_module_id); - - let hub = A::hub(self); - let mut token = Token::root(); - let (device_guard, mut token) = hub.devices.read(&mut token); - let (module, _) = hub.shader_modules.unregister(shader_module_id, &mut token); - if let Some(module) = module { - let device = &device_guard[module.device_id.value]; - #[cfg(feature = "trace")] - if let Some(ref trace) = device.trace { - trace - .lock() - .add(trace::Action::DestroyShaderModule(shader_module_id)); - } - unsafe { - device.raw.destroy_shader_module(module.raw); - } - } - } - - pub fn device_create_command_encoder( - &self, - device_id: id::DeviceId, - desc: &wgt::CommandEncoderDescriptor(command_buffer_id) - } - - pub fn device_create_render_bundle_encoder( - &self, - device_id: id::DeviceId, - desc: &command::RenderBundleEncoderDescriptor, - ) -> ( - id::RenderBundleEncoderId, - Option, - ) { - profiling::scope!("Device::create_render_bundle_encoder"); - let (encoder, error) = match command::RenderBundleEncoder::new(desc, device_id, None) { - Ok(encoder) => (encoder, None), - Err(e) => (command::RenderBundleEncoder::dummy(device_id), Some(e)), - }; - (Box::into_raw(Box::new(encoder)), error) - } - - pub fn render_bundle_encoder_finish( - &self, - bundle_encoder: command::RenderBundleEncoder, - desc: &command::RenderBundleDescriptor, - id_in: Input, - ) -> (id::RenderBundleId, Option) { - profiling::scope!("RenderBundleEncoder::finish"); - - let hub = A::hub(self); - let mut token = Token::root(); - let fid = hub.render_bundles.prepare(id_in); - - let (device_guard, mut token) = hub.devices.read(&mut token); - let error = loop { - let device = match device_guard.get(bundle_encoder.parent()) { - Ok(device) => device, - Err(_) => break command::RenderBundleError::INVALID_DEVICE, - }; - #[cfg(feature = "trace")] - if let Some(ref trace) = device.trace { - trace.lock().add(trace::Action::CreateRenderBundle { - id: fid.id(), - desc: trace::new_render_bundle_encoder_descriptor( - desc.label.clone(), - &bundle_encoder.context, - bundle_encoder.is_depth_read_only, - bundle_encoder.is_stencil_read_only, - ), - base: bundle_encoder.to_base_pass(), - }); - } - - let render_bundle = match bundle_encoder.finish(desc, device, hub, &mut token) { - Ok(bundle) => bundle, - Err(e) => break e, - }; - - log::debug!("Render bundle"); - let ref_count = render_bundle.life_guard.add_ref(); - let id = fid.assign(render_bundle, &mut token); - - device.trackers.lock().bundles.insert_single(id, ref_count); - return (id.0, None); - }; - - let id = fid.assign_error(desc.label.borrow_or_default(), &mut token); - (id, Some(error)) - } - - pub fn render_bundle_label(&self, id: id::RenderBundleId) -> String { - A::hub(self).render_bundles.label_for_resource(id) - } - - pub fn render_bundle_drop(&self, render_bundle_id: id::RenderBundleId) { - profiling::scope!("RenderBundle::drop"); - log::debug!("render bundle {:?} is dropped", render_bundle_id); - let hub = A::hub(self); - let mut token = Token::root(); - - let (device_guard, mut token) = hub.devices.read(&mut token); - let device_id = { - let (mut bundle_guard, _) = hub.render_bundles.write(&mut token); - match bundle_guard.get_mut(render_bundle_id) { - Ok(bundle) => { - bundle.life_guard.ref_count.take(); - bundle.device_id.value - } - Err(InvalidId) => { - hub.render_bundles - .unregister_locked(render_bundle_id, &mut *bundle_guard); - return; - } - } - }; - - device_guard[device_id] - .lock_life(&mut token) - .suspected_resources - .render_bundles - .push(id::Valid(render_bundle_id)); - } - - pub fn device_create_query_set( - &self, - device_id: id::DeviceId, - desc: &resource::QuerySetDescriptor, - id_in: Input, - ) -> (id::QuerySetId, Option) { - profiling::scope!("Device::create_query_set"); - - let hub = A::hub(self); - let mut token = Token::root(); - let fid = hub.query_sets.prepare(id_in); - - let (device_guard, mut token) = hub.devices.read(&mut token); - let error = loop { - let device = match device_guard.get(device_id) { - Ok(device) => device, - Err(_) => break DeviceError::Invalid.into(), - }; - #[cfg(feature = "trace")] - if let Some(ref trace) = device.trace { - trace.lock().add(trace::Action::CreateQuerySet { - id: fid.id(), - desc: desc.clone(), - }); - } - - let query_set = match device.create_query_set(device_id, desc) { - Ok(query_set) => query_set, - Err(err) => break err, - }; - - let ref_count = query_set.life_guard.add_ref(); - let id = fid.assign(query_set, &mut token); - - device - .trackers - .lock() - .query_sets - .insert_single(id, ref_count); - - return (id.0, None); - }; - - let id = fid.assign_error("", &mut token); - (id, Some(error)) - } - - pub fn query_set_drop(&self, query_set_id: id::QuerySetId) { - profiling::scope!("QuerySet::drop"); - log::debug!("query set {:?} is dropped", query_set_id); - - let hub = A::hub(self); - let mut token = Token::root(); - - let device_id = { - let (mut query_set_guard, _) = hub.query_sets.write(&mut token); - let query_set = query_set_guard.get_mut(query_set_id).unwrap(); - query_set.life_guard.ref_count.take(); - query_set.device_id.value - }; - - let (device_guard, mut token) = hub.devices.read(&mut token); - let device = &device_guard[device_id]; - - #[cfg(feature = "trace")] - if let Some(ref trace) = device.trace { - trace - .lock() - .add(trace::Action::DestroyQuerySet(query_set_id)); - } - - device - .lock_life(&mut token) - .suspected_resources - .query_sets - .push(id::Valid(query_set_id)); - } - - pub fn query_set_label(&self, id: id::QuerySetId) -> String { - A::hub(self).query_sets.label_for_resource(id) - } - - pub fn device_create_render_pipeline( - &self, - device_id: id::DeviceId, - desc: &pipeline::RenderPipelineDescriptor, - id_in: Input, - implicit_pipeline_ids: Option>, - ) -> ( - id::RenderPipelineId, - Option, - ) { - profiling::scope!("Device::create_render_pipeline"); - - let hub = A::hub(self); - let mut token = Token::root(); - - let fid = hub.render_pipelines.prepare(id_in); - let implicit_context = implicit_pipeline_ids.map(|ipi| ipi.prepare(hub)); - - let (adapter_guard, mut token) = hub.adapters.read(&mut token); - let (device_guard, mut token) = hub.devices.read(&mut token); - let error = loop { - let device = match device_guard.get(device_id) { - Ok(device) => device, - Err(_) => break DeviceError::Invalid.into(), - }; - let adapter = &adapter_guard[device.adapter_id.value]; - #[cfg(feature = "trace")] - if let Some(ref trace) = device.trace { - trace.lock().add(trace::Action::CreateRenderPipeline { - id: fid.id(), - desc: desc.clone(), - implicit_context: implicit_context.clone(), - }); - } - - let pipeline = match device.create_render_pipeline( - device_id, - adapter, - desc, - implicit_context, - hub, - &mut token, - ) { - Ok(pair) => pair, - Err(e) => break e, - }; - let ref_count = pipeline.life_guard.add_ref(); - - let id = fid.assign(pipeline, &mut token); - log::info!("Created render pipeline {:?} with {:?}", id, desc); - - device - .trackers - .lock() - .render_pipelines - .insert_single(id, ref_count); - - return (id.0, None); - }; - - let id = fid.assign_error(desc.label.borrow_or_default(), &mut token); - (id, Some(error)) - } - - /// Get an ID of one of the bind group layouts. The ID adds a refcount, - /// which needs to be released by calling `bind_group_layout_drop`. - pub fn render_pipeline_get_bind_group_layout( - &self, - pipeline_id: id::RenderPipelineId, - index: u32, - id_in: Input, - ) -> ( - id::BindGroupLayoutId, - Option, - ) { - let hub = A::hub(self); - let mut token = Token::root(); - let (pipeline_layout_guard, mut token) = hub.pipeline_layouts.read(&mut token); - - let error = loop { - let (bgl_guard, mut token) = hub.bind_group_layouts.read(&mut token); - let (_, mut token) = hub.bind_groups.read(&mut token); - let (pipeline_guard, _) = hub.render_pipelines.read(&mut token); - - let pipeline = match pipeline_guard.get(pipeline_id) { - Ok(pipeline) => pipeline, - Err(_) => break binding_model::GetBindGroupLayoutError::InvalidPipeline, - }; - let id = match pipeline_layout_guard[pipeline.layout_id.value] - .bind_group_layout_ids - .get(index as usize) - { - Some(id) => id, - None => break binding_model::GetBindGroupLayoutError::InvalidGroupIndex(index), - }; - - bgl_guard[*id].multi_ref_count.inc(); - return (id.0, None); - }; - - let id = hub - .bind_group_layouts - .prepare(id_in) - .assign_error("", &mut token); - (id, Some(error)) - } - - pub fn render_pipeline_label(&self, id: id::RenderPipelineId) -> String { - A::hub(self).render_pipelines.label_for_resource(id) - } - - pub fn render_pipeline_drop(&self, render_pipeline_id: id::RenderPipelineId) { - profiling::scope!("RenderPipeline::drop"); - log::debug!("render pipeline {:?} is dropped", render_pipeline_id); - let hub = A::hub(self); - let mut token = Token::root(); - let (device_guard, mut token) = hub.devices.read(&mut token); - - let (device_id, layout_id) = { - let (mut pipeline_guard, _) = hub.render_pipelines.write(&mut token); - match pipeline_guard.get_mut(render_pipeline_id) { - Ok(pipeline) => { - pipeline.life_guard.ref_count.take(); - (pipeline.device_id.value, pipeline.layout_id.clone()) - } - Err(InvalidId) => { - hub.render_pipelines - .unregister_locked(render_pipeline_id, &mut *pipeline_guard); - return; - } - } - }; - - let mut life_lock = device_guard[device_id].lock_life(&mut token); - life_lock - .suspected_resources - .render_pipelines - .push(id::Valid(render_pipeline_id)); - life_lock - .suspected_resources - .pipeline_layouts - .push(layout_id); - } - - pub fn device_create_compute_pipeline( - &self, - device_id: id::DeviceId, - desc: &pipeline::ComputePipelineDescriptor, - id_in: Input, - implicit_pipeline_ids: Option>, - ) -> ( - id::ComputePipelineId, - Option, - ) { - profiling::scope!("Device::create_compute_pipeline"); - - let hub = A::hub(self); - let mut token = Token::root(); - - let fid = hub.compute_pipelines.prepare(id_in); - let implicit_context = implicit_pipeline_ids.map(|ipi| ipi.prepare(hub)); - - let (device_guard, mut token) = hub.devices.read(&mut token); - let error = loop { - let device = match device_guard.get(device_id) { - Ok(device) => device, - Err(_) => break DeviceError::Invalid.into(), - }; - #[cfg(feature = "trace")] - if let Some(ref trace) = device.trace { - trace.lock().add(trace::Action::CreateComputePipeline { - id: fid.id(), - desc: desc.clone(), - implicit_context: implicit_context.clone(), - }); - } - - let pipeline = match device.create_compute_pipeline( - device_id, - desc, - implicit_context, - hub, - &mut token, - ) { - Ok(pair) => pair, - Err(e) => break e, - }; - let ref_count = pipeline.life_guard.add_ref(); - - let id = fid.assign(pipeline, &mut token); - log::info!("Created compute pipeline {:?} with {:?}", id, desc); - - device - .trackers - .lock() - .compute_pipelines - .insert_single(id, ref_count); - return (id.0, None); - }; - - let id = fid.assign_error(desc.label.borrow_or_default(), &mut token); - (id, Some(error)) - } - - /// Get an ID of one of the bind group layouts. The ID adds a refcount, - /// which needs to be released by calling `bind_group_layout_drop`. - pub fn compute_pipeline_get_bind_group_layout( - &self, - pipeline_id: id::ComputePipelineId, - index: u32, - id_in: Input, - ) -> ( - id::BindGroupLayoutId, - Option, - ) { - let hub = A::hub(self); - let mut token = Token::root(); - let (pipeline_layout_guard, mut token) = hub.pipeline_layouts.read(&mut token); - - let error = loop { - let (bgl_guard, mut token) = hub.bind_group_layouts.read(&mut token); - let (_, mut token) = hub.bind_groups.read(&mut token); - let (pipeline_guard, _) = hub.compute_pipelines.read(&mut token); - - let pipeline = match pipeline_guard.get(pipeline_id) { - Ok(pipeline) => pipeline, - Err(_) => break binding_model::GetBindGroupLayoutError::InvalidPipeline, - }; - let id = match pipeline_layout_guard[pipeline.layout_id.value] - .bind_group_layout_ids - .get(index as usize) - { - Some(id) => id, - None => break binding_model::GetBindGroupLayoutError::InvalidGroupIndex(index), - }; - - bgl_guard[*id].multi_ref_count.inc(); - return (id.0, None); - }; - - let id = hub - .bind_group_layouts - .prepare(id_in) - .assign_error("", &mut token); - (id, Some(error)) - } - - pub fn compute_pipeline_label(&self, id: id::ComputePipelineId) -> String { - A::hub(self).compute_pipelines.label_for_resource(id) - } - - pub fn compute_pipeline_drop(&self, compute_pipeline_id: id::ComputePipelineId) { - profiling::scope!("ComputePipeline::drop"); - log::debug!("compute pipeline {:?} is dropped", compute_pipeline_id); - let hub = A::hub(self); - let mut token = Token::root(); - let (device_guard, mut token) = hub.devices.read(&mut token); - - let (device_id, layout_id) = { - let (mut pipeline_guard, _) = hub.compute_pipelines.write(&mut token); - match pipeline_guard.get_mut(compute_pipeline_id) { - Ok(pipeline) => { - pipeline.life_guard.ref_count.take(); - (pipeline.device_id.value, pipeline.layout_id.clone()) - } - Err(InvalidId) => { - hub.compute_pipelines - .unregister_locked(compute_pipeline_id, &mut *pipeline_guard); - return; - } - } - }; - - let mut life_lock = device_guard[device_id].lock_life(&mut token); - life_lock - .suspected_resources - .compute_pipelines - .push(id::Valid(compute_pipeline_id)); - life_lock - .suspected_resources - .pipeline_layouts - .push(layout_id); - } - - pub fn surface_configure( - &self, - surface_id: id::SurfaceId, - device_id: id::DeviceId, - config: &wgt::SurfaceConfiguration>, - ) -> Option { - use hal::{Adapter as _, Surface as _}; - use present::ConfigureSurfaceError as E; - profiling::scope!("surface_configure"); - - fn validate_surface_configuration( - config: &mut hal::SurfaceConfiguration, - caps: &hal::SurfaceCapabilities, - ) -> Result<(), E> { - let width = config.extent.width; - let height = config.extent.height; - if width < caps.extents.start().width - || width > caps.extents.end().width - || height < caps.extents.start().height - || height > caps.extents.end().height - { - log::warn!( - "Requested size {}x{} is outside of the supported range: {:?}", - width, - height, - caps.extents - ); - } - if !caps.present_modes.contains(&config.present_mode) { - let new_mode = 'b: loop { - // Automatic present mode checks. - // - // The "Automatic" modes are never supported by the backends. - let fallbacks = match config.present_mode { - wgt::PresentMode::AutoVsync => { - &[wgt::PresentMode::FifoRelaxed, wgt::PresentMode::Fifo][..] - } - // Always end in FIFO to make sure it's always supported - wgt::PresentMode::AutoNoVsync => &[ - wgt::PresentMode::Immediate, - wgt::PresentMode::Mailbox, - wgt::PresentMode::Fifo, - ][..], - _ => { - return Err(E::UnsupportedPresentMode { - requested: config.present_mode, - available: caps.present_modes.clone(), - }); - } - }; - - for &fallback in fallbacks { - if caps.present_modes.contains(&fallback) { - break 'b fallback; - } - } - - unreachable!("Fallback system failed to choose present mode. This is a bug. Mode: {:?}, Options: {:?}", config.present_mode, &caps.present_modes); - }; - - log::info!( - "Automatically choosing presentation mode by rule {:?}. Chose {new_mode:?}", - config.present_mode - ); - config.present_mode = new_mode; - } - if !caps.formats.contains(&config.format) { - return Err(E::UnsupportedFormat { - requested: config.format, - available: caps.formats.clone(), - }); - } - if !caps - .composite_alpha_modes - .contains(&config.composite_alpha_mode) - { - let new_alpha_mode = 'alpha: loop { - // Automatic alpha mode checks. - let fallbacks = match config.composite_alpha_mode { - wgt::CompositeAlphaMode::Auto => &[ - wgt::CompositeAlphaMode::Opaque, - wgt::CompositeAlphaMode::Inherit, - ][..], - _ => { - return Err(E::UnsupportedAlphaMode { - requested: config.composite_alpha_mode, - available: caps.composite_alpha_modes.clone(), - }); - } - }; - - for &fallback in fallbacks { - if caps.composite_alpha_modes.contains(&fallback) { - break 'alpha fallback; - } - } - - unreachable!( - "Fallback system failed to choose alpha mode. This is a bug. \ - AlphaMode: {:?}, Options: {:?}", - config.composite_alpha_mode, &caps.composite_alpha_modes - ); - }; - - log::info!( - "Automatically choosing alpha mode by rule {:?}. Chose {new_alpha_mode:?}", - config.composite_alpha_mode - ); - config.composite_alpha_mode = new_alpha_mode; - } - if !caps.usage.contains(config.usage) { - return Err(E::UnsupportedUsage); - } - if width == 0 || height == 0 { - return Err(E::ZeroArea); - } - Ok(()) - } - - log::info!("configuring surface with {:?}", config); - let hub = A::hub(self); - let mut token = Token::root(); - - let (mut surface_guard, mut token) = self.surfaces.write(&mut token); - let (adapter_guard, mut token) = hub.adapters.read(&mut token); - let (device_guard, _token) = hub.devices.read(&mut token); - - let error = 'outer: loop { - let device = match device_guard.get(device_id) { - Ok(device) => device, - Err(_) => break DeviceError::Invalid.into(), - }; - #[cfg(feature = "trace")] - if let Some(ref trace) = device.trace { - trace - .lock() - .add(trace::Action::ConfigureSurface(surface_id, config.clone())); - } - - let surface = match surface_guard.get_mut(surface_id) { - Ok(surface) => surface, - Err(_) => break E::InvalidSurface, - }; - - let caps = unsafe { - let suf = A::get_surface(surface); - let adapter = &adapter_guard[device.adapter_id.value]; - match adapter.raw.adapter.surface_capabilities(&suf.unwrap().raw) { - Some(caps) => caps, - None => break E::UnsupportedQueueFamily, - } - }; - - let mut hal_view_formats = vec![]; - for format in config.view_formats.iter() { - if *format == config.format { - continue; - } - if !caps.formats.contains(&config.format) { - break 'outer E::UnsupportedFormat { - requested: config.format, - available: caps.formats.clone(), - }; - } - if config.format.remove_srgb_suffix() != format.remove_srgb_suffix() { - break 'outer E::InvalidViewFormat(*format, config.format); - } - hal_view_formats.push(*format); - } - - if !hal_view_formats.is_empty() { - if let Err(missing_flag) = - device.require_downlevel_flags(wgt::DownlevelFlags::SURFACE_VIEW_FORMATS) - { - break 'outer E::MissingDownlevelFlags(missing_flag); - } - } - - let num_frames = present::DESIRED_NUM_FRAMES - .clamp(*caps.swap_chain_sizes.start(), *caps.swap_chain_sizes.end()); - let mut hal_config = hal::SurfaceConfiguration { - swap_chain_size: num_frames, - present_mode: config.present_mode, - composite_alpha_mode: config.alpha_mode, - format: config.format, - extent: wgt::Extent3d { - width: config.width, - height: config.height, - depth_or_array_layers: 1, - }, - usage: conv::map_texture_usage(config.usage, hal::FormatAspects::COLOR), - view_formats: hal_view_formats, - }; - - if let Err(error) = validate_surface_configuration(&mut hal_config, &caps) { - break error; - } - - match unsafe { - A::get_surface_mut(surface) - .unwrap() - .raw - .configure(&device.raw, &hal_config) - } { - Ok(()) => (), - Err(error) => { - break match error { - hal::SurfaceError::Outdated | hal::SurfaceError::Lost => E::InvalidSurface, - hal::SurfaceError::Device(error) => E::Device(error.into()), - hal::SurfaceError::Other(message) => { - log::error!("surface configuration failed: {}", message); - E::InvalidSurface - } - } - } - } - - if let Some(present) = surface.presentation.take() { - if present.acquired_texture.is_some() { - break E::PreviousOutputExists; - } - } - - surface.presentation = Some(present::Presentation { - device_id: Stored { - value: id::Valid(device_id), - ref_count: device.life_guard.add_ref(), - }, - config: config.clone(), - num_frames, - acquired_texture: None, - }); - - return None; - }; - - Some(error) - } - - #[cfg(feature = "replay")] - /// Only triange suspected resource IDs. This helps us to avoid ID collisions - /// upon creating new resources when re-playing a trace. - pub fn device_maintain_ids( - &self, - device_id: id::DeviceId, - ) -> Result<(), InvalidDevice> { - let hub = A::hub(self); - let mut token = Token::root(); - let (device_guard, mut token) = hub.devices.read(&mut token); - let device = device_guard.get(device_id).map_err(|_| InvalidDevice)?; - device.lock_life(&mut token).triage_suspected( - hub, - &device.trackers, - #[cfg(feature = "trace")] - None, - &mut token, - ); - Ok(()) - } - - /// Check `device_id` for freeable resources and completed buffer mappings. - /// - /// Return `queue_empty` indicating whether there are more queue submissions still in flight. - pub fn device_poll( - &self, - device_id: id::DeviceId, - maintain: wgt::Maintain, - ) -> Result { - let (closures, queue_empty) = { - if let wgt::Maintain::WaitForSubmissionIndex(submission_index) = maintain { - if submission_index.queue_id != device_id { - return Err(WaitIdleError::WrongSubmissionIndex( - submission_index.queue_id, - device_id, - )); - } - } - - let hub = A::hub(self); - let mut token = Token::root(); - let (device_guard, mut token) = hub.devices.read(&mut token); - device_guard - .get(device_id) - .map_err(|_| DeviceError::Invalid)? - .maintain(hub, maintain, &mut token)? - }; - - closures.fire(); - - Ok(queue_empty) - } - - /// Poll all devices belonging to the backend `A`. - /// - /// If `force_wait` is true, block until all buffer mappings are done. - /// - /// Return `all_queue_empty` indicating whether there are more queue - /// submissions still in flight. - fn poll_devices( - &self, - force_wait: bool, - closures: &mut UserClosures, - ) -> Result { - profiling::scope!("poll_devices"); - - let hub = A::hub(self); - let mut devices_to_drop = vec![]; - let mut all_queue_empty = true; - { - let mut token = Token::root(); - let (device_guard, mut token) = hub.devices.read(&mut token); - - for (id, device) in device_guard.iter(A::VARIANT) { - let maintain = if force_wait { - wgt::Maintain::Wait - } else { - wgt::Maintain::Poll - }; - let (cbs, queue_empty) = device.maintain(hub, maintain, &mut token)?; - all_queue_empty = all_queue_empty && queue_empty; - - // If the device's own `RefCount` clone is the only one left, and - // its submission queue is empty, then it can be freed. - if queue_empty && device.ref_count.load() == 1 { - devices_to_drop.push(id); - } - closures.extend(cbs); - } - } - - for device_id in devices_to_drop { - self.exit_device::(device_id); - } - - Ok(all_queue_empty) - } - - /// Poll all devices on all backends. - /// - /// This is the implementation of `wgpu::Instance::poll_all`. - /// - /// Return `all_queue_empty` indicating whether there are more queue - /// submissions still in flight. - pub fn poll_all_devices(&self, force_wait: bool) -> Result { - let mut closures = UserClosures::default(); - let mut all_queue_empty = true; - - #[cfg(all(feature = "vulkan", not(target_arch = "wasm32")))] - { - all_queue_empty = self.poll_devices::(force_wait, &mut closures)? - && all_queue_empty; - } - #[cfg(all(feature = "metal", any(target_os = "macos", target_os = "ios")))] - { - all_queue_empty = - self.poll_devices::(force_wait, &mut closures)? && all_queue_empty; - } - #[cfg(all(feature = "dx12", windows))] - { - all_queue_empty = - self.poll_devices::(force_wait, &mut closures)? && all_queue_empty; - } - #[cfg(all(feature = "dx11", windows))] - { - all_queue_empty = - self.poll_devices::(force_wait, &mut closures)? && all_queue_empty; - } - #[cfg(feature = "gles")] - { - all_queue_empty = - self.poll_devices::(force_wait, &mut closures)? && all_queue_empty; - } - - closures.fire(); - - Ok(all_queue_empty) - } - - pub fn device_label(&self, id: id::DeviceId) -> String { - A::hub(self).devices.label_for_resource(id) - } - - pub fn device_start_capture(&self, id: id::DeviceId) { - let hub = A::hub(self); - let mut token = Token::root(); - let (device_guard, _) = hub.devices.read(&mut token); - if let Ok(device) = device_guard.get(id) { - unsafe { device.raw.start_capture() }; - } - } - - pub fn device_stop_capture(&self, id: id::DeviceId) { - let hub = A::hub(self); - let mut token = Token::root(); - let (device_guard, _) = hub.devices.read(&mut token); - if let Ok(device) = device_guard.get(id) { - unsafe { device.raw.stop_capture() }; - } - } - - pub fn device_drop(&self, device_id: id::DeviceId) { - profiling::scope!("Device::drop"); - log::debug!("device {:?} is dropped", device_id); - - let hub = A::hub(self); - let mut token = Token::root(); - - // For now, just drop the `RefCount` in `device.life_guard`, which - // stands for the user's reference to the device. We'll take care of - // cleaning up the device when we're polled, once its queue submissions - // have completed and it is no longer needed by other resources. - let (mut device_guard, _) = hub.devices.write(&mut token); - if let Ok(device) = device_guard.get_mut(device_id) { - device.life_guard.ref_count.take().unwrap(); - } - } - - /// Exit the unreferenced, inactive device `device_id`. - fn exit_device(&self, device_id: id::DeviceId) { - let hub = A::hub(self); - let mut token = Token::root(); - let mut free_adapter_id = None; - { - let (device, mut _token) = hub.devices.unregister(device_id, &mut token); - if let Some(mut device) = device { - // The things `Device::prepare_to_die` takes care are mostly - // unnecessary here. We know our queue is empty, so we don't - // need to wait for submissions or triage them. We know we were - // just polled, so `life_tracker.free_resources` is empty. - debug_assert!(device.lock_life(&mut _token).queue_empty()); - device.pending_writes.deactivate(); - - // Adapter is only referenced by the device and itself. - // This isn't a robust way to destroy them, we should find a better one. - if device.adapter_id.ref_count.load() == 1 { - free_adapter_id = Some(device.adapter_id.value.0); - } - - device.dispose(); - } - } - - // Free the adapter now that we've dropped the `Device` token. - if let Some(free_adapter_id) = free_adapter_id { - let _ = hub.adapters.unregister(free_adapter_id, &mut token); - } - } - - pub fn buffer_map_async( - &self, - buffer_id: id::BufferId, - range: Range, - op: BufferMapOperation, - ) -> BufferAccessResult { - // User callbacks must not be called while holding buffer_map_async_inner's locks, so we - // defer the error callback if it needs to be called immediately (typically when running - // into errors). - if let Err((op, err)) = self.buffer_map_async_inner::(buffer_id, range, op) { - op.callback.call(Err(err.clone())); - - return Err(err); - } - - Ok(()) - } - - // Returns the mapping callback in case of error so that the callback can be fired outside - // of the locks that are held in this function. - fn buffer_map_async_inner( - &self, - buffer_id: id::BufferId, - range: Range, - op: BufferMapOperation, - ) -> Result<(), (BufferMapOperation, BufferAccessError)> { - profiling::scope!("Buffer::map_async"); - - let hub = A::hub(self); - let mut token = Token::root(); - let (device_guard, mut token) = hub.devices.read(&mut token); - let (pub_usage, internal_use) = match op.host { - HostMap::Read => (wgt::BufferUsages::MAP_READ, hal::BufferUses::MAP_READ), - HostMap::Write => (wgt::BufferUsages::MAP_WRITE, hal::BufferUses::MAP_WRITE), - }; - - if range.start % wgt::MAP_ALIGNMENT != 0 || range.end % wgt::COPY_BUFFER_ALIGNMENT != 0 { - return Err((op, BufferAccessError::UnalignedRange)); - } - - let (device_id, ref_count) = { - let (mut buffer_guard, _) = hub.buffers.write(&mut token); - let buffer = buffer_guard - .get_mut(buffer_id) - .map_err(|_| BufferAccessError::Invalid); - - let buffer = match buffer { - Ok(b) => b, - Err(e) => { - return Err((op, e)); - } - }; - - if let Err(e) = check_buffer_usage(buffer.usage, pub_usage) { - return Err((op, e.into())); - } - - if range.start > range.end { - return Err(( - op, - BufferAccessError::NegativeRange { - start: range.start, - end: range.end, - }, - )); - } - if range.end > buffer.size { - return Err(( - op, - BufferAccessError::OutOfBoundsOverrun { - index: range.end, - max: buffer.size, - }, - )); - } - - buffer.map_state = match buffer.map_state { - resource::BufferMapState::Init { .. } | resource::BufferMapState::Active { .. } => { - return Err((op, BufferAccessError::AlreadyMapped)); - } - resource::BufferMapState::Waiting(_) => { - return Err((op, BufferAccessError::MapAlreadyPending)); - } - resource::BufferMapState::Idle => { - resource::BufferMapState::Waiting(resource::BufferPendingMapping { - range, - op, - _parent_ref_count: buffer.life_guard.add_ref(), - }) - } - }; - log::debug!("Buffer {:?} map state -> Waiting", buffer_id); - - let device = &device_guard[buffer.device_id.value]; - - let ret = (buffer.device_id.value, buffer.life_guard.add_ref()); - - let mut trackers = device.trackers.lock(); - trackers - .buffers - .set_single(&*buffer_guard, buffer_id, internal_use); - trackers.buffers.drain(); - - ret - }; - - let device = &device_guard[device_id]; - - device - .lock_life(&mut token) - .map(id::Valid(buffer_id), ref_count); - - Ok(()) - } - - pub fn buffer_get_mapped_range( - &self, - buffer_id: id::BufferId, - offset: BufferAddress, - size: Option, - ) -> Result<(*mut u8, u64), BufferAccessError> { - profiling::scope!("Buffer::get_mapped_range"); - - let hub = A::hub(self); - let mut token = Token::root(); - let (buffer_guard, _) = hub.buffers.read(&mut token); - let buffer = buffer_guard - .get(buffer_id) - .map_err(|_| BufferAccessError::Invalid)?; - - let range_size = if let Some(size) = size { - size - } else if offset > buffer.size { - 0 - } else { - buffer.size - offset - }; - - if offset % wgt::MAP_ALIGNMENT != 0 { - return Err(BufferAccessError::UnalignedOffset { offset }); - } - if range_size % wgt::COPY_BUFFER_ALIGNMENT != 0 { - return Err(BufferAccessError::UnalignedRangeSize { range_size }); - } - - match buffer.map_state { - resource::BufferMapState::Init { ptr, .. } => { - // offset (u64) can not be < 0, so no need to validate the lower bound - if offset + range_size > buffer.size { - return Err(BufferAccessError::OutOfBoundsOverrun { - index: offset + range_size - 1, - max: buffer.size, - }); - } - unsafe { Ok((ptr.as_ptr().offset(offset as isize), range_size)) } - } - resource::BufferMapState::Active { ptr, ref range, .. } => { - if offset < range.start { - return Err(BufferAccessError::OutOfBoundsUnderrun { - index: offset, - min: range.start, - }); - } - if offset + range_size > range.end { - return Err(BufferAccessError::OutOfBoundsOverrun { - index: offset + range_size - 1, - max: range.end, - }); - } - // ptr points to the beginning of the range we mapped in map_async - // rather thant the beginning of the buffer. - let relative_offset = (offset - range.start) as isize; - unsafe { Ok((ptr.as_ptr().offset(relative_offset), range_size)) } - } - resource::BufferMapState::Idle | resource::BufferMapState::Waiting(_) => { - Err(BufferAccessError::NotMapped) - } - } - } - - fn buffer_unmap_inner( - &self, - buffer_id: id::BufferId, - buffer: &mut resource::Buffer, - device: &mut Device, - ) -> Result, BufferAccessError> { - log::debug!("Buffer {:?} map state -> Idle", buffer_id); - match mem::replace(&mut buffer.map_state, resource::BufferMapState::Idle) { - resource::BufferMapState::Init { - ptr, - stage_buffer, - needs_flush, - } => { - #[cfg(feature = "trace")] - if let Some(ref trace) = device.trace { - let mut trace = trace.lock(); - let data = trace.make_binary("bin", unsafe { - std::slice::from_raw_parts(ptr.as_ptr(), buffer.size as usize) - }); - trace.add(trace::Action::WriteBuffer { - id: buffer_id, - data, - range: 0..buffer.size, - queued: true, - }); - } - let _ = ptr; - if needs_flush { - unsafe { - device - .raw - .flush_mapped_ranges(&stage_buffer, iter::once(0..buffer.size)); - } - } - - let raw_buf = buffer.raw.as_ref().ok_or(BufferAccessError::Destroyed)?; - - buffer.life_guard.use_at(device.active_submission_index + 1); - let region = wgt::BufferSize::new(buffer.size).map(|size| hal::BufferCopy { - src_offset: 0, - dst_offset: 0, - size, - }); - let transition_src = hal::BufferBarrier { - buffer: &stage_buffer, - usage: hal::BufferUses::MAP_WRITE..hal::BufferUses::COPY_SRC, - }; - let transition_dst = hal::BufferBarrier { - buffer: raw_buf, - usage: hal::BufferUses::empty()..hal::BufferUses::COPY_DST, - }; - let encoder = device.pending_writes.activate(); - unsafe { - encoder.transition_buffers( - iter::once(transition_src).chain(iter::once(transition_dst)), - ); - if buffer.size > 0 { - encoder.copy_buffer_to_buffer(&stage_buffer, raw_buf, region.into_iter()); - } - } - device - .pending_writes - .consume_temp(queue::TempResource::Buffer(stage_buffer)); - device.pending_writes.dst_buffers.insert(buffer_id); - } - resource::BufferMapState::Idle => { - return Err(BufferAccessError::NotMapped); - } - resource::BufferMapState::Waiting(pending) => { - return Ok(Some((pending.op, Err(BufferAccessError::MapAborted)))); - } - resource::BufferMapState::Active { ptr, range, host } => { - if host == HostMap::Write { - #[cfg(feature = "trace")] - if let Some(ref trace) = device.trace { - let mut trace = trace.lock(); - let size = range.end - range.start; - let data = trace.make_binary("bin", unsafe { - std::slice::from_raw_parts(ptr.as_ptr(), size as usize) - }); - trace.add(trace::Action::WriteBuffer { - id: buffer_id, - data, - range: range.clone(), - queued: false, - }); - } - let _ = (ptr, range); - } - unsafe { - device - .raw - .unmap_buffer(buffer.raw.as_ref().unwrap()) - .map_err(DeviceError::from)? - }; - } - } - Ok(None) - } - - pub fn buffer_unmap(&self, buffer_id: id::BufferId) -> BufferAccessResult { - profiling::scope!("unmap", "Buffer"); - - let closure; - { - // Restrict the locks to this scope. - let hub = A::hub(self); - let mut token = Token::root(); - - let (mut device_guard, mut token) = hub.devices.write(&mut token); - let (mut buffer_guard, _) = hub.buffers.write(&mut token); - let buffer = buffer_guard - .get_mut(buffer_id) - .map_err(|_| BufferAccessError::Invalid)?; - let device = &mut device_guard[buffer.device_id.value]; - - closure = self.buffer_unmap_inner(buffer_id, buffer, device) - } - - // Note: outside the scope where locks are held when calling the callback - if let Some((operation, status)) = closure? { - operation.callback.call(status); - } - Ok(()) } } diff --git a/wgpu-core/src/device/queue.rs b/wgpu-core/src/device/queue.rs index 0f9ec753cf..ec11cf09a9 100644 --- a/wgpu-core/src/device/queue.rs +++ b/wgpu-core/src/device/queue.rs @@ -8,17 +8,24 @@ use crate::{ conv, device::{DeviceError, WaitIdleError}, get_lowest_common_denom, - hub::{Global, GlobalIdentityHandlerFactory, HalApi, Input, Token}, + global::Global, + hal_api::HalApi, id, + identity::{GlobalIdentityHandlerFactory, Input}, init_tracker::{has_copy_partial_init_tracker_coverage, TextureInitRange}, - resource::{BufferAccessError, BufferMapState, StagingBuffer, TextureInner}, - track, FastHashSet, SubmissionIndex, + resource::{ + Buffer, BufferAccessError, BufferMapState, Resource, ResourceInfo, StagingBuffer, Texture, + TextureInner, TextureView, + }, + track, FastHashMap, SubmissionIndex, }; use hal::{CommandEncoder as _, Device as _, Queue as _}; -use parking_lot::Mutex; use smallvec::SmallVec; -use std::{iter, mem, ptr}; +use std::{ + iter, mem, ptr, + sync::{atomic::Ordering, Arc}, +}; use thiserror::Error; /// Number of command buffers that we generate from the same pool @@ -103,19 +110,20 @@ pub struct WrappedSubmissionIndex { /// - `LifetimeTracker::free_resources`: resources to be freed in the next /// `maintain` call, no longer used anywhere #[derive(Debug)] -pub enum TempResource { - Buffer(A::Buffer), - Texture(A::Texture, SmallVec<[A::TextureView; 1]>), +pub enum TempResource { + Buffer(Arc>), + StagingBuffer(Arc>), + Texture(Arc>, SmallVec<[Arc>; 1]>), } /// A queue execution for a particular command encoder. -pub(super) struct EncoderInFlight { +pub(crate) struct EncoderInFlight { raw: A::CommandEncoder, cmd_buffers: Vec, } -impl EncoderInFlight { - pub(super) unsafe fn land(mut self) -> A::CommandEncoder { +impl EncoderInFlight { + pub(crate) unsafe fn land(mut self) -> A::CommandEncoder { unsafe { self.raw.reset_all(self.cmd_buffers.into_iter()) }; self.raw } @@ -138,23 +146,23 @@ impl EncoderInFlight { /// /// All uses of [`StagingBuffer`]s end up here. #[derive(Debug)] -pub(crate) struct PendingWrites { +pub(crate) struct PendingWrites { pub command_encoder: A::CommandEncoder, pub is_active: bool, pub temp_resources: Vec>, - pub dst_buffers: FastHashSet, - pub dst_textures: FastHashSet, + pub dst_buffers: FastHashMap>>, + pub dst_textures: FastHashMap>>, pub executing_command_buffers: Vec, } -impl PendingWrites { +impl PendingWrites { pub fn new(command_encoder: A::CommandEncoder) -> Self { Self { command_encoder, is_active: false, temp_resources: Vec::new(), - dst_buffers: FastHashSet::default(), - dst_textures: FastHashSet::default(), + dst_buffers: FastHashMap::default(), + dst_textures: FastHashMap::default(), executing_command_buffers: Vec::new(), } } @@ -169,27 +177,16 @@ impl PendingWrites { device.destroy_command_encoder(self.command_encoder); } - for resource in self.temp_resources { - match resource { - TempResource::Buffer(buffer) => unsafe { - device.destroy_buffer(buffer); - }, - TempResource::Texture(texture, views) => unsafe { - for view in views.into_iter() { - device.destroy_texture_view(view); - } - device.destroy_texture(texture); - }, - } - } + self.temp_resources.clear(); } pub fn consume_temp(&mut self, resource: TempResource) { self.temp_resources.push(resource); } - fn consume(&mut self, buffer: StagingBuffer) { - self.temp_resources.push(TempResource::Buffer(buffer.raw)); + fn consume(&mut self, buffer: Arc>) { + self.temp_resources + .push(TempResource::StagingBuffer(buffer)); } #[must_use] @@ -209,15 +206,12 @@ impl PendingWrites { #[must_use] fn post_submit( &mut self, - command_allocator: &Mutex>, + command_allocator: &mut super::CommandAllocator, device: &A::Device, queue: &A::Queue, ) -> Option> { if self.executing_command_buffers.len() >= WRITE_COMMAND_BUFFERS_PER_POOL { - let new_encoder = command_allocator - .lock() - .acquire_encoder(device, queue) - .unwrap(); + let new_encoder = command_allocator.acquire_encoder(device, queue).unwrap(); Some(EncoderInFlight { raw: mem::replace(&mut self.command_encoder, new_encoder), cmd_buffers: mem::take(&mut self.executing_command_buffers), @@ -250,7 +244,7 @@ impl PendingWrites { } fn prepare_staging_buffer( - device: &mut A::Device, + device: &A::Device, size: wgt::BufferAddress, ) -> Result<(StagingBuffer, *mut u8), DeviceError> { profiling::scope!("prepare_staging_buffer"); @@ -265,15 +259,16 @@ fn prepare_staging_buffer( let mapping = unsafe { device.map_buffer(&buffer, 0..size) }?; let staging_buffer = StagingBuffer { - raw: buffer, + raw: Arc::new(buffer), size, + info: ResourceInfo::new(""), is_coherent: mapping.is_coherent, }; Ok((staging_buffer, mapping.ptr.as_ptr())) } -impl StagingBuffer { +impl StagingBuffer { unsafe fn flush(&self, device: &A::Device) -> Result<(), DeviceError> { if !self.is_coherent { unsafe { device.flush_mapped_ranges(&self.raw, iter::once(0..self.size)) }; @@ -330,11 +325,10 @@ impl Global { profiling::scope!("Queue::write_buffer"); let hub = A::hub(self); - let root_token = &mut Token::root(); - let (mut device_guard, ref mut device_token) = hub.devices.write(root_token); - let device = device_guard - .get_mut(queue_id) + let device = hub + .devices + .get(queue_id) .map_err(|_| DeviceError::Invalid)?; let data_size = data.len() as wgt::BufferAddress; @@ -360,26 +354,35 @@ impl Global { // freed, even if an error occurs. All paths from here must call // `device.pending_writes.consume`. let (staging_buffer, staging_buffer_ptr) = - prepare_staging_buffer(&mut device.raw, data_size)?; + prepare_staging_buffer(device.raw.as_ref().unwrap(), data_size)?; if let Err(flush_error) = unsafe { profiling::scope!("copy"); ptr::copy_nonoverlapping(data.as_ptr(), staging_buffer_ptr, data.len()); - staging_buffer.flush(&device.raw) + staging_buffer.flush(device.raw.as_ref().unwrap()) } { - device.pending_writes.consume(staging_buffer); + device + .pending_writes + .lock() + .as_mut() + .unwrap() + .consume(Arc::new(staging_buffer)); return Err(flush_error.into()); } let result = self.queue_write_staging_buffer_impl( - device, - device_token, + &device, &staging_buffer, buffer_id, buffer_offset, ); - device.pending_writes.consume(staging_buffer); + device + .pending_writes + .lock() + .as_mut() + .unwrap() + .consume(Arc::new(staging_buffer)); result } @@ -391,18 +394,17 @@ impl Global { ) -> Result<(id::StagingBufferId, *mut u8), QueueWriteError> { profiling::scope!("Queue::create_staging_buffer"); let hub = A::hub(self); - let root_token = &mut Token::root(); - let (mut device_guard, ref mut device_token) = hub.devices.write(root_token); - let device = device_guard - .get_mut(queue_id) + let device = hub + .devices + .get(queue_id) .map_err(|_| DeviceError::Invalid)?; let (staging_buffer, staging_buffer_ptr) = - prepare_staging_buffer(&mut device.raw, buffer_size.get())?; + prepare_staging_buffer(device.raw.as_ref().unwrap(), buffer_size.get())?; let fid = hub.staging_buffers.prepare(id_in); - let id = fid.assign(staging_buffer, device_token); + let (id, _) = fid.assign(staging_buffer); Ok((id.0, staging_buffer_ptr)) } @@ -416,37 +418,47 @@ impl Global { ) -> Result<(), QueueWriteError> { profiling::scope!("Queue::write_staging_buffer"); let hub = A::hub(self); - let root_token = &mut Token::root(); - let (mut device_guard, ref mut device_token) = hub.devices.write(root_token); - let device = device_guard - .get_mut(queue_id) + let device = hub + .devices + .get(queue_id) .map_err(|_| DeviceError::Invalid)?; - let staging_buffer = hub - .staging_buffers - .unregister(staging_buffer_id, device_token) - .0 - .ok_or(TransferError::InvalidBuffer(buffer_id))?; + let staging_buffer = hub.staging_buffers.unregister(staging_buffer_id); + if staging_buffer.is_none() { + return Err(QueueWriteError::Transfer(TransferError::InvalidBuffer( + buffer_id, + ))); + } + let staging_buffer = staging_buffer.unwrap(); // At this point, we have taken ownership of the staging_buffer from the // user. Platform validation requires that the staging buffer always // be freed, even if an error occurs. All paths from here must call // `device.pending_writes.consume`. - if let Err(flush_error) = unsafe { staging_buffer.flush(&device.raw) } { - device.pending_writes.consume(staging_buffer); + if let Err(flush_error) = unsafe { staging_buffer.flush(device.raw.as_ref().unwrap()) } { + device + .pending_writes + .lock() + .as_mut() + .unwrap() + .consume(staging_buffer); return Err(flush_error.into()); } let result = self.queue_write_staging_buffer_impl( - device, - device_token, + &device, &staging_buffer, buffer_id, buffer_offset, ); - device.pending_writes.consume(staging_buffer); + device + .pending_writes + .lock() + .as_mut() + .unwrap() + .consume(staging_buffer); result } @@ -459,23 +471,20 @@ impl Global { ) -> Result<(), QueueWriteError> { profiling::scope!("Queue::validate_write_buffer"); let hub = A::hub(self); - let root_token = &mut Token::root(); - - let (_, ref mut device_token) = hub.devices.read(root_token); - let buffer_guard = hub.buffers.read(device_token).0; - let buffer = buffer_guard + let buffer = hub + .buffers .get(buffer_id) .map_err(|_| TransferError::InvalidBuffer(buffer_id))?; - self.queue_validate_write_buffer_impl(buffer, buffer_id, buffer_offset, buffer_size)?; + self.queue_validate_write_buffer_impl(&buffer, buffer_id, buffer_offset, buffer_size)?; Ok(()) } fn queue_validate_write_buffer_impl( &self, - buffer: &super::resource::Buffer, + buffer: &Buffer, buffer_id: id::BufferId, buffer_offset: u64, buffer_size: u64, @@ -506,15 +515,14 @@ impl Global { fn queue_write_staging_buffer_impl( &self, - device: &mut super::Device, - device_token: &mut Token>, + device: &super::Device, staging_buffer: &StagingBuffer, buffer_id: id::BufferId, buffer_offset: u64, ) -> Result<(), QueueWriteError> { let hub = A::hub(self); - let buffer_guard = hub.buffers.read(device_token).0; + let buffer_guard = hub.buffers.read(); let mut trackers = device.trackers.lock(); let (dst, transition) = trackers @@ -529,7 +537,11 @@ impl Global { let src_buffer_size = staging_buffer.size; self.queue_validate_write_buffer_impl(dst, buffer_id, buffer_offset, src_buffer_size)?; - dst.life_guard.use_at(device.active_submission_index + 1); + dst.info.use_at( + device + .active_submission_index + .fetch_add(1, Ordering::Relaxed), + ); let region = wgt::BufferSize::new(src_buffer_size).map(|size| hal::BufferCopy { src_offset: 0, @@ -537,26 +549,30 @@ impl Global { size, }); let barriers = iter::once(hal::BufferBarrier { - buffer: &staging_buffer.raw, + buffer: staging_buffer.raw.as_ref(), usage: hal::BufferUses::MAP_WRITE..hal::BufferUses::COPY_SRC, }) .chain(transition.map(|pending| pending.into_hal(dst))); - let encoder = device.pending_writes.activate(); + let mut pending_writes = device.pending_writes.lock(); + let pending_writes = pending_writes.as_mut().unwrap(); + let encoder = pending_writes.activate(); unsafe { encoder.transition_buffers(barriers); encoder.copy_buffer_to_buffer(&staging_buffer.raw, dst_raw, region.into_iter()); } - device.pending_writes.dst_buffers.insert(buffer_id); + pending_writes + .dst_buffers + .insert(buffer_id, buffer_guard.get(buffer_id).unwrap().clone()); // Ensure the overwritten bytes are marked as initialized so // they don't need to be nulled prior to mapping or binding. { drop(buffer_guard); - let mut buffer_guard = hub.buffers.write(device_token).0; - let dst = buffer_guard.get_mut(buffer_id).unwrap(); + let dst = hub.buffers.get(buffer_id).unwrap(); dst.initialization_status + .write() .drain(buffer_offset..(buffer_offset + src_buffer_size)); } @@ -574,10 +590,10 @@ impl Global { profiling::scope!("Queue::write_texture"); let hub = A::hub(self); - let mut token = Token::root(); - let (mut device_guard, mut token) = hub.devices.write(&mut token); - let device = device_guard - .get_mut(queue_id) + + let device = hub + .devices + .get(queue_id) .map_err(|_| DeviceError::Invalid)?; #[cfg(feature = "trace")] @@ -597,9 +613,9 @@ impl Global { return Ok(()); } - let (mut texture_guard, _) = hub.textures.write(&mut token); // For clear we need write access to the texture. TODO: Can we acquire write lock later? + let texture_guard = hub.textures.read(); let dst = texture_guard - .get_mut(destination.texture) + .get(destination.texture) .map_err(|_| TransferError::InvalidTexture(destination.texture))?; if !dst.desc.usage.contains(wgt::TextureUsages::COPY_DST) { @@ -613,7 +629,7 @@ impl Global { let (hal_copy_size, array_layer_count) = validate_texture_copy_range(destination, &dst.desc, CopySide::Destination, size)?; - let (selector, dst_base) = extract_texture_selector(destination, size, dst)?; + let (selector, dst_base) = extract_texture_selector(destination, size, &dst)?; if !dst_base.aspect.is_one() { return Err(TransferError::CopyAspectNotOne.into()); @@ -671,7 +687,9 @@ impl Global { let stage_size = stage_bytes_per_row as u64 * block_rows_in_copy as u64; let mut trackers = device.trackers.lock(); - let encoder = device.pending_writes.activate(); + let mut pending_writes = device.pending_writes.lock(); + let pending_writes = pending_writes.as_mut().unwrap(); + let encoder = pending_writes.activate(); // If the copy does not fully cover the layers, we need to initialize to // zero *first* as we don't keep track of partial texture layer inits. @@ -684,12 +702,13 @@ impl Global { } else { destination.origin.z..destination.origin.z + size.depth_or_array_layers }; - if dst.initialization_status.mips[destination.mip_level as usize] + let mut dst_initialization_status = dst.initialization_status.write(); + if dst_initialization_status.mips[destination.mip_level as usize] .check(init_layer_range.clone()) .is_some() { if has_copy_partial_init_tracker_coverage(size, destination.mip_level, &dst.desc) { - for layer_range in dst.initialization_status.mips[destination.mip_level as usize] + for layer_range in dst_initialization_status.mips[destination.mip_level as usize] .drain(init_layer_range) .collect::>>() { @@ -703,35 +722,41 @@ impl Global { encoder, &mut trackers.textures, &device.alignments, - &device.zero_buffer, + device.zero_buffer.as_ref().unwrap(), ) .map_err(QueueWriteError::from)?; } } else { - dst.initialization_status.mips[destination.mip_level as usize] + dst_initialization_status.mips[destination.mip_level as usize] .drain(init_layer_range); } } // Re-get `dst` immutably here, so that the mutable borrow of the - // `texture_guard.get_mut` above ends in time for the `clear_texture` + // `texture_guard.get` above ends in time for the `clear_texture` // call above. Since we've held `texture_guard` the whole time, we know // the texture hasn't gone away in the mean time, so we can unwrap. let dst = texture_guard.get(destination.texture).unwrap(); let transition = trackers .textures .set_single( - dst, + &dst, destination.texture, selector, hal::TextureUses::COPY_DST, ) .ok_or(TransferError::InvalidTexture(destination.texture))?; - dst.life_guard.use_at(device.active_submission_index + 1); + dst.info.use_at( + device + .active_submission_index + .fetch_add(1, Ordering::Relaxed), + ); let dst_raw = dst .inner + .as_ref() + .unwrap() .as_raw() .ok_or(TransferError::InvalidTexture(destination.texture))?; @@ -743,7 +768,7 @@ impl Global { // freed, even if an error occurs. All paths from here must call // `device.pending_writes.consume`. let (staging_buffer, staging_buffer_ptr) = - prepare_staging_buffer(&mut device.raw, stage_size)?; + prepare_staging_buffer(device.raw.as_ref().unwrap(), stage_size)?; if stage_bytes_per_row == bytes_per_row { profiling::scope!("copy aligned"); @@ -778,8 +803,8 @@ impl Global { } } - if let Err(e) = unsafe { staging_buffer.flush(&device.raw) } { - device.pending_writes.consume(staging_buffer); + if let Err(e) = unsafe { staging_buffer.flush(device.raw.as_ref().unwrap()) } { + pending_writes.consume(Arc::new(staging_buffer)); return Err(e.into()); } @@ -799,21 +824,20 @@ impl Global { } }); let barrier = hal::BufferBarrier { - buffer: &staging_buffer.raw, + buffer: staging_buffer.raw.as_ref(), usage: hal::BufferUses::MAP_WRITE..hal::BufferUses::COPY_SRC, }; unsafe { - encoder.transition_textures(transition.map(|pending| pending.into_hal(dst))); + encoder.transition_textures(transition.map(|pending| pending.into_hal(&dst))); encoder.transition_buffers(iter::once(barrier)); encoder.copy_buffer_to_texture(&staging_buffer.raw, dst_raw, regions); } - device.pending_writes.consume(staging_buffer); - device - .pending_writes + pending_writes.consume(Arc::new(staging_buffer)); + pending_writes .dst_textures - .insert(destination.texture); + .insert(destination.texture, dst.clone()); Ok(()) } @@ -829,10 +853,10 @@ impl Global { profiling::scope!("Queue::copy_external_image_to_texture"); let hub = A::hub(self); - let mut token = Token::root(); - let (mut device_guard, mut token) = hub.devices.write(&mut token); - let device = device_guard - .get_mut(queue_id) + + let device = hub + .devices + .get(queue_id) .map_err(|_| DeviceError::Invalid)?; if size.width == 0 || size.height == 0 || size.depth_or_array_layers == 0 { @@ -859,8 +883,7 @@ impl Global { let src_width = source.source.width(); let src_height = source.source.height(); - let (mut texture_guard, _) = hub.textures.write(&mut token); // For clear we need write access to the texture. TODO: Can we acquire write lock later? - let dst = texture_guard.get_mut(destination.texture).unwrap(); + let dst = hub.textures.get(destination.texture).unwrap(); if !conv::is_valid_external_image_copy_dst_texture_format(dst.desc.format) { return Err( @@ -935,7 +958,7 @@ impl Global { extract_texture_selector(&destination.to_untagged(), &size, dst)?; let mut trackers = device.trackers.lock(); - let encoder = device.pending_writes.activate(); + let encoder = device.pending_writes.lock().activate(); // If the copy does not fully cover the layers, we need to initialize to // zero *first* as we don't keep track of partial texture layer inits. @@ -948,12 +971,13 @@ impl Global { } else { destination.origin.z..destination.origin.z + size.depth_or_array_layers }; - if dst.initialization_status.mips[destination.mip_level as usize] + let dst_initialization_status = dst.initialization_status.write(); + if dst_initialization_status.mips[destination.mip_level as usize] .check(init_layer_range.clone()) .is_some() { if has_copy_partial_init_tracker_coverage(&size, destination.mip_level, &dst.desc) { - for layer_range in dst.initialization_status.mips[destination.mip_level as usize] + for layer_range in dst_initialization_status.mips[destination.mip_level as usize] .drain(init_layer_range) .collect::>>() { @@ -972,7 +996,7 @@ impl Global { .map_err(QueueWriteError::from)?; } } else { - dst.initialization_status.mips[destination.mip_level as usize] + dst_initialization_status.mips[destination.mip_level as usize] .drain(init_layer_range); } } @@ -989,7 +1013,9 @@ impl Global { ) .ok_or(TransferError::InvalidTexture(destination.texture))?; - dst.life_guard.use_at(device.active_submission_index + 1); + dst.life_guard + .read() + .use_at(device.active_submission_index + 1); let dst_raw = dst .inner @@ -1029,34 +1055,30 @@ impl Global { let (submit_index, callbacks) = { let hub = A::hub(self); - let mut token = Token::root(); - let (mut device_guard, mut token) = hub.devices.write(&mut token); - let device = device_guard - .get_mut(queue_id) + let device = hub + .devices + .get(queue_id) .map_err(|_| DeviceError::Invalid)?; - device.temp_suspected.clear(); - device.active_submission_index += 1; - let submit_index = device.active_submission_index; + + let submit_index = device + .active_submission_index + .fetch_add(1, Ordering::Relaxed); let mut active_executions = Vec::new(); let mut used_surface_textures = track::TextureUsageScope::new(); + let mut pending_writes = device.pending_writes.lock(); + let pending_writes = pending_writes.as_mut().unwrap(); { - let (mut command_buffer_guard, mut token) = hub.command_buffers.write(&mut token); + let mut command_buffer_guard = hub.command_buffers.write(); if !command_buffer_ids.is_empty() { profiling::scope!("prepare"); - let (render_bundle_guard, mut token) = hub.render_bundles.read(&mut token); - let (_, mut token) = hub.pipeline_layouts.read(&mut token); - let (bind_group_guard, mut token) = hub.bind_groups.read(&mut token); - let (compute_pipe_guard, mut token) = hub.compute_pipelines.read(&mut token); - let (render_pipe_guard, mut token) = hub.render_pipelines.read(&mut token); - let (mut buffer_guard, mut token) = hub.buffers.write(&mut token); - let (mut texture_guard, mut token) = hub.textures.write(&mut token); - let (texture_view_guard, mut token) = hub.texture_views.read(&mut token); - let (sampler_guard, mut token) = hub.samplers.read(&mut token); - let (query_set_guard, _) = hub.query_sets.read(&mut token); + let buffer_guard = hub.buffers.write(); + let texture_guard = hub.textures.write(); + let texture_view_guard = hub.texture_views.read(); + let sampler_guard = hub.samplers.read(); //Note: locking the trackers has to be done after the storages let mut trackers = device.trackers.lock(); @@ -1083,125 +1105,164 @@ impl Global { if let Some(ref trace) = device.trace { trace.lock().add(Action::Submit( submit_index, - cmdbuf.commands.take().unwrap(), + cmdbuf.data.lock().commands.take().unwrap(), )); } if !cmdbuf.is_finished() { - device.destroy_command_buffer(cmdbuf); + if let Ok(cmdbuf) = Arc::try_unwrap(cmdbuf) { + device.destroy_command_buffer(cmdbuf); + } else { + panic!( + "Command buffer cannot be destroyed because is still in use" + ); + } continue; } // optimize the tracked states // cmdbuf.trackers.optimize(); - - // update submission IDs - for id in cmdbuf.trackers.buffers.used() { - let buffer = &mut buffer_guard[id]; - let raw_buf = match buffer.raw { - Some(ref raw) => raw, - None => { - return Err(QueueSubmitError::DestroyedBuffer(id.0)); - } - }; - if !buffer.life_guard.use_at(submit_index) { - if let BufferMapState::Active { .. } = buffer.map_state { - log::warn!("Dropped buffer has a pending mapping."); - unsafe { device.raw.unmap_buffer(raw_buf) } + { + let cmd_buf_data = cmdbuf.data.lock(); + let cmd_buf_trackers = &cmd_buf_data.as_ref().unwrap().trackers; + + // update submission IDs + for buffer in cmd_buf_trackers.buffers.used_resources() { + let id = buffer.info.id(); + let raw_buf = match &buffer.raw { + Some(ref raw) => raw, + None => { + return Err(QueueSubmitError::DestroyedBuffer(id.0)); + } + }; + buffer.info.use_at(submit_index); + if buffer.is_unique() { + if let BufferMapState::Active { .. } = *buffer.map_state.lock() + { + log::warn!("Dropped buffer has a pending mapping."); + unsafe { + device.raw.as_ref().unwrap().unmap_buffer(raw_buf) + } .map_err(DeviceError::from)?; - } - device.temp_suspected.buffers.push(id); - } else { - match buffer.map_state { - BufferMapState::Idle => (), - _ => return Err(QueueSubmitError::BufferStillMapped(id.0)), + } + device.temp_suspected.lock().buffers.push(buffer.clone()); + } else { + match *buffer.map_state.lock() { + BufferMapState::Idle => (), + _ => return Err(QueueSubmitError::BufferStillMapped(id.0)), + } } } - } - for id in cmdbuf.trackers.textures.used() { - let texture = &mut texture_guard[id]; - let should_extend = match texture.inner { - TextureInner::Native { raw: None } => { - return Err(QueueSubmitError::DestroyedTexture(id.0)); + for texture in cmd_buf_trackers.textures.used_resources() { + let id = texture.info.id(); + let should_extend = match texture.inner.as_ref().unwrap() { + TextureInner::Native { raw: None } => { + return Err(QueueSubmitError::DestroyedTexture(id.0)); + } + TextureInner::Native { raw: Some(_) } => false, + TextureInner::Surface { ref has_work, .. } => { + has_work.store(true, Ordering::Relaxed); + true + } + }; + texture.info.use_at(submit_index); + if texture.is_unique() { + device.temp_suspected.lock().textures.push(texture.clone()); } - TextureInner::Native { raw: Some(_) } => false, - TextureInner::Surface { - ref mut has_work, .. - } => { - *has_work = true; - true + if should_extend { + unsafe { + used_surface_textures + .merge_single( + &*texture_guard, + id, + None, + hal::TextureUses::PRESENT, + ) + .unwrap(); + }; } - }; - if !texture.life_guard.use_at(submit_index) { - device.temp_suspected.textures.push(id); - } - if should_extend { - unsafe { - let ref_count = cmdbuf.trackers.textures.get_ref_count(id); - used_surface_textures - .merge_single( - &*texture_guard, - id, - None, - ref_count, - hal::TextureUses::PRESENT, - ) - .unwrap(); - }; - } - } - for id in cmdbuf.trackers.views.used() { - if !texture_view_guard[id].life_guard.use_at(submit_index) { - device.temp_suspected.texture_views.push(id); - } - } - for id in cmdbuf.trackers.bind_groups.used() { - let bg = &bind_group_guard[id]; - if !bg.life_guard.use_at(submit_index) { - device.temp_suspected.bind_groups.push(id); } - // We need to update the submission indices for the contained - // state-less (!) resources as well, so that they don't get - // deleted too early if the parent bind group goes out of scope. - for sub_id in bg.used.views.used() { - texture_view_guard[sub_id].life_guard.use_at(submit_index); - } - for sub_id in bg.used.samplers.used() { - sampler_guard[sub_id].life_guard.use_at(submit_index); - } - } - // assert!(cmdbuf.trackers.samplers.is_empty()); - for id in cmdbuf.trackers.compute_pipelines.used() { - if !compute_pipe_guard[id].life_guard.use_at(submit_index) { - device.temp_suspected.compute_pipelines.push(id); + for texture_view in cmd_buf_trackers.views.used_resources() { + texture_view.info.use_at(submit_index); + if texture_view.is_unique() { + device + .temp_suspected + .lock() + .texture_views + .push(texture_view.clone()); + } } - } - for id in cmdbuf.trackers.render_pipelines.used() { - if !render_pipe_guard[id].life_guard.use_at(submit_index) { - device.temp_suspected.render_pipelines.push(id); + for bg in cmd_buf_trackers.bind_groups.used_resources() { + bg.info.use_at(submit_index); + // We need to update the submission indices for the contained + // state-less (!) resources as well, so that they don't get + // deleted too early if the parent bind group goes out of scope. + for sub_id in bg.used.views.used() { + texture_view_guard[sub_id].info.use_at(submit_index); + } + for sub_id in bg.used.samplers.used() { + sampler_guard[sub_id].info.use_at(submit_index); + } + if bg.is_unique() { + device.temp_suspected.lock().bind_groups.push(bg.clone()); + } } - } - for id in cmdbuf.trackers.query_sets.used() { - if !query_set_guard[id].life_guard.use_at(submit_index) { - device.temp_suspected.query_sets.push(id); + // assert!(cmd_buf_trackers.samplers.is_empty()); + for compute_pipeline in + cmd_buf_trackers.compute_pipelines.used_resources() + { + compute_pipeline.info.use_at(submit_index); + if compute_pipeline.is_unique() { + device + .temp_suspected + .lock() + .compute_pipelines + .push(compute_pipeline.clone()); + } } - } - for id in cmdbuf.trackers.bundles.used() { - let bundle = &render_bundle_guard[id]; - if !bundle.life_guard.use_at(submit_index) { - device.temp_suspected.render_bundles.push(id); + for render_pipeline in + cmd_buf_trackers.render_pipelines.used_resources() + { + render_pipeline.info.use_at(submit_index); + if render_pipeline.is_unique() { + device + .temp_suspected + .lock() + .render_pipelines + .push(render_pipeline.clone()); + } } - // We need to update the submission indices for the contained - // state-less (!) resources as well, excluding the bind groups. - // They don't get deleted too early if the bundle goes out of scope. - for sub_id in bundle.used.render_pipelines.used() { - render_pipe_guard[sub_id].life_guard.use_at(submit_index); + for query_set in cmd_buf_trackers.query_sets.used_resources() { + query_set.info.use_at(submit_index); + if query_set.is_unique() { + device + .temp_suspected + .lock() + .query_sets + .push(query_set.clone()); + } } - for sub_id in bundle.used.query_sets.used() { - query_set_guard[sub_id].life_guard.use_at(submit_index); + for bundle in cmd_buf_trackers.bundles.used_resources() { + bundle.info.use_at(submit_index); + // We need to update the submission indices for the contained + // state-less (!) resources as well, excluding the bind groups. + // They don't get deleted too early if the bundle goes out of scope. + for render_pipeline in bundle.used.render_pipelines.used_resources() + { + render_pipeline.info.use_at(submit_index); + } + for query_set in bundle.used.query_sets.used_resources() { + query_set.info.use_at(submit_index); + } + if bundle.is_unique() { + device + .temp_suspected + .lock() + .render_bundles + .push(bundle.clone()); + } } } - - let mut baked = cmdbuf.into_baked(); + let mut baked = cmdbuf.from_arc_into_baked(); // execute resource transitions unsafe { baked @@ -1211,10 +1272,10 @@ impl Global { }; log::trace!("Stitching command buffer {:?} before submission", cmb_id); baked - .initialize_buffer_memory(&mut *trackers, &mut *buffer_guard) + .initialize_buffer_memory(&mut *trackers, &*buffer_guard) .map_err(|err| QueueSubmitError::DestroyedBuffer(err.0))?; baked - .initialize_texture_memory(&mut *trackers, &mut *texture_guard, device) + .initialize_texture_memory(&mut *trackers, &*texture_guard, &device) .map_err(|err| QueueSubmitError::DestroyedTexture(err.0))?; //Note: stateless trackers are not merged: // device already knows these resources exist. @@ -1244,7 +1305,7 @@ impl Global { .set_from_usage_scope(&*texture_guard, &used_surface_textures); let texture_barriers = trackers.textures.drain().map(|pending| { let tex = unsafe { texture_guard.get_unchecked(pending.id) }; - pending.into_hal(tex) + pending.into_hal(&tex) }); let present = unsafe { baked.encoder.transition_textures(texture_barriers); @@ -1264,46 +1325,24 @@ impl Global { log::trace!("Device after submission {}", submit_index); } - let super::Device { - ref mut pending_writes, - ref mut queue, - ref mut fence, - .. - } = *device; - { - // TODO: These blocks have a few organizational issues, and - // should be refactored. - // - // 1) It's similar to the code we have per-command-buffer - // (at the begin and end) Maybe we can merge some? - // - // 2) It's doing the extra locking unconditionally. Maybe we - // can only do so if any surfaces are being written to? - let (_, mut token) = hub.buffers.read(&mut token); // skip token - let (mut texture_guard, _) = hub.textures.write(&mut token); + let texture_guard = hub.textures.read(); used_surface_textures.set_size(texture_guard.len()); - - for &id in pending_writes.dst_textures.iter() { - let texture = texture_guard.get_mut(id).unwrap(); - match texture.inner { + for (&id, texture) in pending_writes.dst_textures.iter() { + match texture.inner.as_ref().unwrap() { TextureInner::Native { raw: None } => { return Err(QueueSubmitError::DestroyedTexture(id)); } TextureInner::Native { raw: Some(_) } => {} - TextureInner::Surface { - ref mut has_work, .. - } => { - *has_work = true; - let ref_count = texture.life_guard.add_ref(); + TextureInner::Surface { ref has_work, .. } => { + has_work.store(true, Ordering::Relaxed); unsafe { used_surface_textures .merge_single( &*texture_guard, id::Valid(id), None, - &ref_count, hal::TextureUses::PRESENT, ) .unwrap() @@ -1320,7 +1359,7 @@ impl Global { .set_from_usage_scope(&*texture_guard, &used_surface_textures); let texture_barriers = trackers.textures.drain().map(|pending| { let tex = unsafe { texture_guard.get_unchecked(pending.id) }; - pending.into_hal(tex) + pending.into_hal(&tex) }); unsafe { @@ -1341,24 +1380,30 @@ impl Global { ) .collect::>(); unsafe { - queue - .submit(&refs, Some((fence, submit_index))) + device + .queue + .as_ref() + .unwrap() + .submit( + &refs, + Some((device.fence.lock().as_mut().unwrap(), submit_index)), + ) .map_err(DeviceError::from)?; } } profiling::scope!("cleanup"); - if let Some(pending_execution) = device.pending_writes.post_submit( - &device.command_allocator, - &device.raw, - &device.queue, + if let Some(pending_execution) = pending_writes.post_submit( + device.command_allocator.lock().as_mut().unwrap(), + device.raw.as_ref().unwrap(), + device.queue.as_ref().unwrap(), ) { active_executions.push(pending_execution); } // this will register the new submission to the life time tracker - let mut pending_write_resources = mem::take(&mut device.pending_writes.temp_resources); - device.lock_life(&mut token).track_submission( + let mut pending_write_resources = mem::take(&mut pending_writes.temp_resources); + device.lock_life().track_submission( submit_index, pending_write_resources.drain(..), active_executions, @@ -1366,7 +1411,7 @@ impl Global { // This will schedule destruction of all resources that are no longer needed // by the user but used in the command stream, among other things. - let (closures, _) = match device.maintain(hub, wgt::Maintain::Poll, &mut token) { + let (closures, _) = match device.maintain(hub, wgt::Maintain::Poll) { Ok(closures) => closures, Err(WaitIdleError::Device(err)) => return Err(QueueSubmitError::Queue(err)), Err(WaitIdleError::StuckGpu) => return Err(QueueSubmitError::StuckGpu), @@ -1375,9 +1420,8 @@ impl Global { // pending_write_resources has been drained, so it's empty, but we // want to retain its heap allocation. - device.pending_writes.temp_resources = pending_write_resources; - device.temp_suspected.clear(); - device.lock_life(&mut token).post_submit(); + pending_writes.temp_resources = pending_write_resources; + device.lock_life().post_submit(); (submit_index, closures) }; @@ -1396,10 +1440,8 @@ impl Global { queue_id: id::QueueId, ) -> Result { let hub = A::hub(self); - let mut token = Token::root(); - let (device_guard, _) = hub.devices.read(&mut token); - match device_guard.get(queue_id) { - Ok(device) => Ok(unsafe { device.queue.get_timestamp_period() }), + match hub.devices.get(queue_id) { + Ok(device) => Ok(unsafe { device.queue.as_ref().unwrap().get_timestamp_period() }), Err(_) => Err(InvalidQueue), } } @@ -1412,10 +1454,8 @@ impl Global { //TODO: flush pending writes let closure_opt = { let hub = A::hub(self); - let mut token = Token::root(); - let (device_guard, mut token) = hub.devices.read(&mut token); - match device_guard.get(queue_id) { - Ok(device) => device.lock_life(&mut token).add_work_done_closure(closure), + match hub.devices.get(queue_id) { + Ok(device) => device.lock_life().add_work_done_closure(closure), Err(_) => return Err(InvalidQueue), } }; diff --git a/wgpu-core/src/error.rs b/wgpu-core/src/error.rs index 596595245b..2b144fdc29 100644 --- a/wgpu-core/src/error.rs +++ b/wgpu-core/src/error.rs @@ -1,10 +1,7 @@ use core::fmt; use std::error::Error; -use crate::{ - gfx_select, - hub::{Global, IdentityManagerFactory}, -}; +use crate::{gfx_select, global::Global, identity::IdentityManagerFactory}; pub struct ErrorFormatter<'a> { writer: &'a mut dyn fmt::Write, @@ -20,7 +17,7 @@ impl<'a> ErrorFormatter<'a> { writeln!(self.writer, " note: {note}").expect("Error formatting error"); } - pub fn label(&mut self, label_key: &str, label_value: &str) { + pub fn label(&mut self, label_key: &str, label_value: &String) { if !label_key.is_empty() && !label_value.is_empty() { self.note(&format!("{label_key} = `{label_value}`")); } diff --git a/wgpu-core/src/global.rs b/wgpu-core/src/global.rs new file mode 100644 index 0000000000..6e224511d9 --- /dev/null +++ b/wgpu-core/src/global.rs @@ -0,0 +1,168 @@ +use std::sync::Arc; + +use crate::{ + hal_api::HalApi, + hub::{HubReport, Hubs}, + id::SurfaceId, + identity::GlobalIdentityHandlerFactory, + instance::{Instance, Surface}, + registry::Registry, + storage::{Element, StorageReport}, +}; + +#[derive(Debug)] +pub struct GlobalReport { + pub surfaces: StorageReport, + #[cfg(all(feature = "vulkan", not(target_arch = "wasm32")))] + pub vulkan: Option, + #[cfg(all(feature = "metal", any(target_os = "macos", target_os = "ios")))] + pub metal: Option, + #[cfg(all(feature = "dx12", windows))] + pub dx12: Option, + #[cfg(all(feature = "dx11", windows))] + pub dx11: Option, + #[cfg(feature = "gles")] + pub gl: Option, +} + +pub struct Global { + pub instance: Instance, + pub surfaces: Registry, + pub(crate) hubs: Hubs, +} + +impl Global { + pub fn new(name: &str, factory: G, instance_desc: wgt::InstanceDescriptor) -> Self { + profiling::scope!("Global::new"); + Self { + instance: Instance::new(name, instance_desc), + surfaces: Registry::without_backend(&factory, "Surface"), + hubs: Hubs::new(&factory), + } + } + + /// # Safety + /// + /// Refer to the creation of wgpu-hal Instance for every backend. + pub unsafe fn from_hal_instance( + name: &str, + factory: G, + hal_instance: A::Instance, + ) -> Self { + profiling::scope!("Global::new"); + Self { + instance: A::create_instance_from_hal(name, hal_instance), + surfaces: Registry::without_backend(&factory, "Surface"), + hubs: Hubs::new(&factory), + } + } + + /// # Safety + /// + /// - The raw instance handle returned must not be manually destroyed. + pub unsafe fn instance_as_hal(&self) -> Option<&A::Instance> { + A::instance_as_hal(&self.instance) + } + + /// # Safety + /// + /// - The raw handles obtained from the Instance must not be manually destroyed + pub unsafe fn from_instance(factory: G, instance: Instance) -> Self { + profiling::scope!("Global::new"); + Self { + instance, + surfaces: Registry::without_backend(&factory, "Surface"), + hubs: Hubs::new(&factory), + } + } + + pub fn clear_backend(&self, _dummy: ()) { + let hub = A::hub(self); + let mut surfaces_locked = self.surfaces.write(); + // this is used for tests, which keep the adapter + hub.clear(&mut surfaces_locked, false); + } + + pub fn generate_report(&self) -> GlobalReport { + GlobalReport { + surfaces: self.surfaces.generate_report(), + #[cfg(all(feature = "vulkan", not(target_arch = "wasm32")))] + vulkan: if self.instance.vulkan.is_some() { + Some(self.hubs.vulkan.generate_report()) + } else { + None + }, + #[cfg(all(feature = "metal", any(target_os = "macos", target_os = "ios")))] + metal: if self.instance.metal.is_some() { + Some(self.hubs.metal.generate_report()) + } else { + None + }, + #[cfg(all(feature = "dx12", windows))] + dx12: if self.instance.dx12.is_some() { + Some(self.hubs.dx12.generate_report()) + } else { + None + }, + #[cfg(all(feature = "dx11", windows))] + dx11: if self.instance.dx11.is_some() { + Some(self.hubs.dx11.generate_report()) + } else { + None + }, + #[cfg(feature = "gles")] + gl: if self.instance.gl.is_some() { + Some(self.hubs.gl.generate_report()) + } else { + None + }, + } + } +} + +impl Drop for Global { + fn drop(&mut self) { + profiling::scope!("Global::drop"); + log::info!("Dropping Global"); + let mut surfaces_locked = self.surfaces.write(); + + // destroy hubs before the instance gets dropped + #[cfg(all(feature = "vulkan", not(target_arch = "wasm32")))] + { + self.hubs.vulkan.clear(&mut surfaces_locked, true); + } + #[cfg(all(feature = "metal", any(target_os = "macos", target_os = "ios")))] + { + self.hubs.metal.clear(&mut surface_guard, true); + } + #[cfg(all(feature = "dx12", windows))] + { + self.hubs.dx12.clear(&mut surfaces_locked, true); + } + #[cfg(all(feature = "dx11", windows))] + { + self.hubs.dx11.clear(&mut surfaces_locked, true); + } + #[cfg(feature = "gles")] + { + self.hubs.gl.clear(&mut surfaces_locked, true); + } + + // destroy surfaces + for element in surfaces_locked.map.drain(..) { + if let Element::Occupied(arc_surface, _) = element { + if let Ok(surface) = Arc::try_unwrap(arc_surface) { + self.instance.destroy_surface(surface); + } else { + panic!("Surface cannot be destroyed because is still in use"); + } + } + } + } +} + +#[cfg(test)] +fn _test_send_sync(global: &Global) { + fn test_internal(_: T) {} + test_internal(global) +} diff --git a/wgpu-core/src/hal_api.rs b/wgpu-core/src/hal_api.rs new file mode 100644 index 0000000000..71151303a4 --- /dev/null +++ b/wgpu-core/src/hal_api.rs @@ -0,0 +1,138 @@ +use wgt::Backend; + +use crate::{ + global::Global, + hub::Hub, + identity::GlobalIdentityHandlerFactory, + instance::{HalSurface, Instance, Surface}, +}; + +pub trait HalApi: hal::Api { + const VARIANT: Backend; + fn create_instance_from_hal(name: &str, hal_instance: Self::Instance) -> Instance; + fn instance_as_hal(instance: &Instance) -> Option<&Self::Instance>; + fn hub(global: &Global) -> &Hub; + fn get_surface(surface: &Surface) -> Option<&HalSurface>; +} + +impl HalApi for hal::api::Empty { + const VARIANT: Backend = Backend::Empty; + fn create_instance_from_hal(_: &str, _: Self::Instance) -> Instance { + unimplemented!("called empty api") + } + fn instance_as_hal(_: &Instance) -> Option<&Self::Instance> { + unimplemented!("called empty api") + } + fn hub(_: &Global) -> &Hub { + unimplemented!("called empty api") + } + fn get_surface(_: &Surface) -> Option<&HalSurface> { + unimplemented!("called empty api") + } +} + +#[cfg(all(feature = "vulkan", not(target_arch = "wasm32")))] +impl HalApi for hal::api::Vulkan { + const VARIANT: Backend = Backend::Vulkan; + fn create_instance_from_hal(name: &str, hal_instance: Self::Instance) -> Instance { + Instance { + name: name.to_owned(), + vulkan: Some(hal_instance), + ..Default::default() + } + } + fn instance_as_hal(instance: &Instance) -> Option<&Self::Instance> { + instance.vulkan.as_ref() + } + fn hub(global: &Global) -> &Hub { + &global.hubs.vulkan + } + fn get_surface(surface: &Surface) -> Option<&HalSurface> { + surface.vulkan.as_ref() + } +} + +#[cfg(all(feature = "metal", any(target_os = "macos", target_os = "ios")))] +impl HalApi for hal::api::Metal { + const VARIANT: Backend = Backend::Metal; + fn create_instance_from_hal(name: &str, hal_instance: Self::Instance) -> Instance { + Instance { + name: name.to_owned(), + metal: Some(hal_instance), + ..Default::default() + } + } + fn instance_as_hal(instance: &Instance) -> Option<&Self::Instance> { + instance.metal.as_ref() + } + fn hub(global: &Global) -> &Hub { + &global.hubs.metal + } + fn get_surface(surface: &Surface) -> Option<&HalSurface> { + surface.metal.as_ref() + } +} + +#[cfg(all(feature = "dx12", windows))] +impl HalApi for hal::api::Dx12 { + const VARIANT: Backend = Backend::Dx12; + fn create_instance_from_hal(name: &str, hal_instance: Self::Instance) -> Instance { + Instance { + name: name.to_owned(), + dx12: Some(hal_instance), + ..Default::default() + } + } + fn instance_as_hal(instance: &Instance) -> Option<&Self::Instance> { + instance.dx12.as_ref() + } + fn hub(global: &Global) -> &Hub { + &global.hubs.dx12 + } + fn get_surface(surface: &Surface) -> Option<&HalSurface> { + surface.dx12.as_ref() + } +} + +#[cfg(all(feature = "dx11", windows))] +impl HalApi for hal::api::Dx11 { + const VARIANT: Backend = Backend::Dx11; + fn create_instance_from_hal(name: &str, hal_instance: Self::Instance) -> Instance { + Instance { + name: name.to_owned(), + dx11: Some(hal_instance), + ..Default::default() + } + } + fn instance_as_hal(instance: &Instance) -> Option<&Self::Instance> { + instance.dx11.as_ref() + } + fn hub(global: &Global) -> &Hub { + &global.hubs.dx11 + } + fn get_surface(surface: &Surface) -> Option<&HalSurface> { + surface.dx11.as_ref() + } +} + +#[cfg(feature = "gles")] +impl HalApi for hal::api::Gles { + const VARIANT: Backend = Backend::Gl; + fn create_instance_from_hal(name: &str, hal_instance: Self::Instance) -> Instance { + #[allow(clippy::needless_update)] + Instance { + name: name.to_owned(), + gl: Some(hal_instance), + ..Default::default() + } + } + fn instance_as_hal(instance: &Instance) -> Option<&Self::Instance> { + instance.gl.as_ref() + } + fn hub(global: &Global) -> &Hub { + &global.hubs.gl + } + fn get_surface(surface: &Surface) -> Option<&HalSurface> { + surface.gl.as_ref() + } +} diff --git a/wgpu-core/src/hub.rs b/wgpu-core/src/hub.rs index 655a47ad18..b349d4ca37 100644 --- a/wgpu-core/src/hub.rs +++ b/wgpu-core/src/hub.rs @@ -153,665 +153,17 @@ use crate::{ binding_model::{BindGroup, BindGroupLayout, PipelineLayout}, command::{CommandBuffer, RenderBundle}, device::Device, + hal_api::HalApi, id, - instance::{Adapter, HalSurface, Instance, Surface}, + identity::GlobalIdentityHandlerFactory, + instance::{Adapter, Surface}, pipeline::{ComputePipeline, RenderPipeline, ShaderModule}, - resource::{Buffer, QuerySet, Sampler, StagingBuffer, Texture, TextureClearMode, TextureView}, - Epoch, Index, + registry::Registry, + resource::{Buffer, QuerySet, Sampler, StagingBuffer, Texture, TextureView}, + storage::{Element, Storage, StorageReport}, }; -use parking_lot::{Mutex, RwLock, RwLockReadGuard, RwLockWriteGuard}; -use wgt::Backend; - -#[cfg(debug_assertions)] -use std::cell::Cell; -use std::{fmt::Debug, marker::PhantomData, mem, ops}; - -/// A simple structure to allocate [`Id`] identifiers. -/// -/// Calling [`alloc`] returns a fresh, never-before-seen id. Calling [`free`] -/// marks an id as dead; it will never be returned again by `alloc`. -/// -/// Use `IdentityManager::default` to construct new instances. -/// -/// `IdentityManager` returns `Id`s whose index values are suitable for use as -/// indices into a `Storage` that holds those ids' referents: -/// -/// - Every live id has a distinct index value. Each live id's index selects a -/// distinct element in the vector. -/// -/// - `IdentityManager` prefers low index numbers. If you size your vector to -/// accommodate the indices produced here, the vector's length will reflect -/// the highwater mark of actual occupancy. -/// -/// - `IdentityManager` reuses the index values of freed ids before returning -/// ids with new index values. Freed vector entries get reused. -/// -/// See the module-level documentation for an overview of how this -/// fits together. -/// -/// [`Id`]: crate::id::Id -/// [`Backend`]: wgt::Backend; -/// [`alloc`]: IdentityManager::alloc -/// [`free`]: IdentityManager::free -#[derive(Debug, Default)] -pub struct IdentityManager { - /// Available index values. If empty, then `epochs.len()` is the next index - /// to allocate. - free: Vec, - - /// The next or currently-live epoch value associated with each `Id` index. - /// - /// If there is a live id with index `i`, then `epochs[i]` is its epoch; any - /// id with the same index but an older epoch is dead. - /// - /// If index `i` is currently unused, `epochs[i]` is the epoch to use in its - /// next `Id`. - epochs: Vec, -} - -impl IdentityManager { - /// Allocate a fresh, never-before-seen id with the given `backend`. - /// - /// The backend is incorporated into the id, so that ids allocated with - /// different `backend` values are always distinct. - pub fn alloc(&mut self, backend: Backend) -> I { - match self.free.pop() { - Some(index) => I::zip(index, self.epochs[index as usize], backend), - None => { - let epoch = 1; - let id = I::zip(self.epochs.len() as Index, epoch, backend); - self.epochs.push(epoch); - id - } - } - } - - /// Free `id`. It will never be returned from `alloc` again. - pub fn free(&mut self, id: I) { - let (index, epoch, _backend) = id.unzip(); - let pe = &mut self.epochs[index as usize]; - assert_eq!(*pe, epoch); - // If the epoch reaches EOL, the index doesn't go - // into the free list, will never be reused again. - if epoch < id::EPOCH_MASK { - *pe = epoch + 1; - self.free.push(index); - } - } -} - -/// An entry in a `Storage::map` table. -#[derive(Debug)] -enum Element { - /// There are no live ids with this index. - Vacant, - - /// There is one live id with this index, allocated at the given - /// epoch. - Occupied(T, Epoch), - - /// Like `Occupied`, but an error occurred when creating the - /// resource. - /// - /// The given `String` is the resource's descriptor label. - Error(Epoch, String), -} - -#[derive(Clone, Debug, Default)] -pub struct StorageReport { - pub num_occupied: usize, - pub num_vacant: usize, - pub num_error: usize, - pub element_size: usize, -} - -impl StorageReport { - pub fn is_empty(&self) -> bool { - self.num_occupied + self.num_vacant + self.num_error == 0 - } -} - -#[derive(Clone, Debug)] -pub(crate) struct InvalidId; - -/// A table of `T` values indexed by the id type `I`. -/// -/// The table is represented as a vector indexed by the ids' index -/// values, so you should use an id allocator like `IdentityManager` -/// that keeps the index values dense and close to zero. -#[derive(Debug)] -pub struct Storage { - map: Vec>, - kind: &'static str, - _phantom: PhantomData, -} - -impl ops::Index> for Storage { - type Output = T; - fn index(&self, id: id::Valid) -> &T { - self.get(id.0).unwrap() - } -} - -impl ops::IndexMut> for Storage { - fn index_mut(&mut self, id: id::Valid) -> &mut T { - self.get_mut(id.0).unwrap() - } -} - -impl Storage { - pub(crate) fn contains(&self, id: I) -> bool { - let (index, epoch, _) = id.unzip(); - match self.map.get(index as usize) { - Some(&Element::Vacant) => false, - Some(&Element::Occupied(_, storage_epoch) | &Element::Error(storage_epoch, _)) => { - storage_epoch == epoch - } - None => false, - } - } - - /// Attempts to get a reference to an item behind a potentially invalid ID. - /// - /// Returns [`None`] if there is an epoch mismatch, or the entry is empty. - /// - /// This function is primarily intended for the `as_hal` family of functions - /// where you may need to fallibly get a object backed by an id that could - /// be in a different hub. - pub(crate) fn try_get(&self, id: I) -> Result, InvalidId> { - let (index, epoch, _) = id.unzip(); - let (result, storage_epoch) = match self.map.get(index as usize) { - Some(&Element::Occupied(ref v, epoch)) => (Ok(Some(v)), epoch), - Some(&Element::Vacant) => return Ok(None), - Some(&Element::Error(epoch, ..)) => (Err(InvalidId), epoch), - None => return Err(InvalidId), - }; - assert_eq!( - epoch, storage_epoch, - "{}[{}] is no longer alive", - self.kind, index - ); - result - } - - /// Get a reference to an item behind a potentially invalid ID. - /// Panics if there is an epoch mismatch, or the entry is empty. - pub(crate) fn get(&self, id: I) -> Result<&T, InvalidId> { - let (index, epoch, _) = id.unzip(); - let (result, storage_epoch) = match self.map.get(index as usize) { - Some(&Element::Occupied(ref v, epoch)) => (Ok(v), epoch), - Some(&Element::Vacant) => panic!("{}[{}] does not exist", self.kind, index), - Some(&Element::Error(epoch, ..)) => (Err(InvalidId), epoch), - None => return Err(InvalidId), - }; - assert_eq!( - epoch, storage_epoch, - "{}[{}] is no longer alive", - self.kind, index - ); - result - } - - /// Get a mutable reference to an item behind a potentially invalid ID. - /// Panics if there is an epoch mismatch, or the entry is empty. - pub(crate) fn get_mut(&mut self, id: I) -> Result<&mut T, InvalidId> { - let (index, epoch, _) = id.unzip(); - let (result, storage_epoch) = match self.map.get_mut(index as usize) { - Some(&mut Element::Occupied(ref mut v, epoch)) => (Ok(v), epoch), - Some(&mut Element::Vacant) | None => panic!("{}[{}] does not exist", self.kind, index), - Some(&mut Element::Error(epoch, ..)) => (Err(InvalidId), epoch), - }; - assert_eq!( - epoch, storage_epoch, - "{}[{}] is no longer alive", - self.kind, index - ); - result - } - - pub(crate) unsafe fn get_unchecked(&self, id: u32) -> &T { - match self.map[id as usize] { - Element::Occupied(ref v, _) => v, - Element::Vacant => panic!("{}[{}] does not exist", self.kind, id), - Element::Error(_, _) => panic!(""), - } - } - - pub(crate) fn label_for_invalid_id(&self, id: I) -> &str { - let (index, _, _) = id.unzip(); - match self.map.get(index as usize) { - Some(&Element::Error(_, ref label)) => label, - _ => "", - } - } - - fn insert_impl(&mut self, index: usize, element: Element) { - if index >= self.map.len() { - self.map.resize_with(index + 1, || Element::Vacant); - } - match std::mem::replace(&mut self.map[index], element) { - Element::Vacant => {} - _ => panic!("Index {index:?} is already occupied"), - } - } - - pub(crate) fn insert(&mut self, id: I, value: T) { - let (index, epoch, _) = id.unzip(); - self.insert_impl(index as usize, Element::Occupied(value, epoch)) - } - - pub(crate) fn insert_error(&mut self, id: I, label: &str) { - let (index, epoch, _) = id.unzip(); - self.insert_impl(index as usize, Element::Error(epoch, label.to_string())) - } - - pub(crate) fn force_replace(&mut self, id: I, value: T) { - let (index, epoch, _) = id.unzip(); - self.map[index as usize] = Element::Occupied(value, epoch); - } - - pub(crate) fn remove(&mut self, id: I) -> Option { - let (index, epoch, _) = id.unzip(); - match std::mem::replace(&mut self.map[index as usize], Element::Vacant) { - Element::Occupied(value, storage_epoch) => { - assert_eq!(epoch, storage_epoch); - Some(value) - } - Element::Error(..) => None, - Element::Vacant => panic!("Cannot remove a vacant resource"), - } - } - - // Prevents panic on out of range access, allows Vacant elements. - pub(crate) fn _try_remove(&mut self, id: I) -> Option { - let (index, epoch, _) = id.unzip(); - if index as usize >= self.map.len() { - None - } else if let Element::Occupied(value, storage_epoch) = - std::mem::replace(&mut self.map[index as usize], Element::Vacant) - { - assert_eq!(epoch, storage_epoch); - Some(value) - } else { - None - } - } - - pub(crate) fn iter(&self, backend: Backend) -> impl Iterator { - self.map - .iter() - .enumerate() - .filter_map(move |(index, x)| match *x { - Element::Occupied(ref value, storage_epoch) => { - Some((I::zip(index as Index, storage_epoch, backend), value)) - } - _ => None, - }) - } - - pub(crate) fn len(&self) -> usize { - self.map.len() - } - - fn generate_report(&self) -> StorageReport { - let mut report = StorageReport { - element_size: mem::size_of::(), - ..Default::default() - }; - for element in self.map.iter() { - match *element { - Element::Occupied(..) => report.num_occupied += 1, - Element::Vacant => report.num_vacant += 1, - Element::Error(..) => report.num_error += 1, - } - } - report - } -} - -/// Type system for enforcing the lock order on shared HUB structures. -/// If type A implements `Access`, that means we are allowed to proceed -/// with locking resource `B` after we lock `A`. -/// -/// The implementations basically describe the edges in a directed graph -/// of lock transitions. As long as it doesn't have loops, we can have -/// multiple concurrent paths on this graph (from multiple threads) without -/// deadlocks, i.e. there is always a path whose next resource is not locked -/// by some other path, at any time. -pub trait Access {} - -pub enum Root {} -//TODO: establish an order instead of declaring all the pairs. -impl Access for Root {} -impl Access for Root {} -impl Access for Instance {} -impl Access> for Root {} -impl Access> for Surface {} -impl Access> for Root {} -impl Access> for Surface {} -impl Access> for Adapter {} -impl Access> for Root {} -impl Access> for Device {} -impl Access> for RenderBundle {} -impl Access> for Root {} -impl Access> for Device {} -impl Access> for PipelineLayout {} -impl Access> for Root {} -impl Access> for Device {} -impl Access> for BindGroupLayout {} -impl Access> for PipelineLayout {} -impl Access> for CommandBuffer {} -impl Access> for Root {} -impl Access> for Device {} -impl Access> for Device {} -impl Access> for CommandBuffer {} -impl Access> for Device {} -impl Access> for BindGroup {} -impl Access> for Device {} -impl Access> for BindGroup {} -impl Access> for ComputePipeline {} -impl Access> for Root {} -impl Access> for Device {} -impl Access> for CommandBuffer {} -impl Access> for RenderPipeline {} -impl Access> for ComputePipeline {} -impl Access> for Sampler {} -impl Access> for Device {} -impl Access> for BindGroupLayout {} -impl Access> for Root {} -impl Access> for Device {} -impl Access> for BindGroupLayout {} -impl Access> for BindGroup {} -impl Access> for CommandBuffer {} -impl Access> for ComputePipeline {} -impl Access> for RenderPipeline {} -impl Access> for QuerySet {} -impl Access> for Device {} -impl Access> for Root {} -impl Access> for Device {} -impl Access> for Buffer {} -impl Access> for Root {} -impl Access> for Device {} -impl Access> for Texture {} -impl Access> for Root {} -impl Access> for Device {} -impl Access> for TextureView {} - -#[cfg(debug_assertions)] -thread_local! { - static ACTIVE_TOKEN: Cell = Cell::new(0); -} - -/// A permission token to lock resource `T` or anything after it, -/// as defined by the `Access` implementations. -/// -/// Note: there can only be one non-borrowed `Token` alive on a thread -/// at a time, which is enforced by `ACTIVE_TOKEN`. -pub(crate) struct Token<'a, T: 'a> { - level: PhantomData<&'a T>, -} - -impl<'a, T> Token<'a, T> { - fn new() -> Self { - #[cfg(debug_assertions)] - ACTIVE_TOKEN.with(|active| { - let old = active.get(); - assert_ne!(old, 0, "Root token was dropped"); - active.set(old + 1); - }); - Self { level: PhantomData } - } -} - -impl Token<'static, Root> { - pub fn root() -> Self { - #[cfg(debug_assertions)] - ACTIVE_TOKEN.with(|active| { - assert_eq!(0, active.replace(1), "Root token is already active"); - }); - - Self { level: PhantomData } - } -} - -impl<'a, T> Drop for Token<'a, T> { - fn drop(&mut self) { - #[cfg(debug_assertions)] - ACTIVE_TOKEN.with(|active| { - let old = active.get(); - active.set(old - 1); - }); - } -} - -/// A type that can build true ids from proto-ids, and free true ids. -/// -/// For some implementations, the true id is based on the proto-id. -/// The caller is responsible for providing well-allocated proto-ids. -/// -/// For other implementations, the proto-id carries no information -/// (it's `()`, say), and this `IdentityHandler` type takes care of -/// allocating a fresh true id. -/// -/// See the module-level documentation for details. -pub trait IdentityHandler: Debug { - /// The type of proto-id consumed by this filter, to produce a true id. - type Input: Clone + Debug; - - /// Given a proto-id value `id`, return a true id for `backend`. - fn process(&self, id: Self::Input, backend: Backend) -> I; - - /// Free the true id `id`. - fn free(&self, id: I); -} - -impl IdentityHandler for Mutex { - type Input = (); - fn process(&self, _id: Self::Input, backend: Backend) -> I { - self.lock().alloc(backend) - } - fn free(&self, id: I) { - self.lock().free(id) - } -} - -/// A type that can produce [`IdentityHandler`] filters for ids of type `I`. -/// -/// See the module-level documentation for details. -pub trait IdentityHandlerFactory { - /// The type of filter this factory constructs. - /// - /// "Filter" and "handler" seem to both mean the same thing here: - /// something that can produce true ids from proto-ids. - type Filter: IdentityHandler; - - /// Create an [`IdentityHandler`] implementation that can - /// transform proto-ids into ids of type `I`. - /// - /// [`IdentityHandler`]: IdentityHandler - fn spawn(&self) -> Self::Filter; -} - -/// A global identity handler factory based on [`IdentityManager`]. -/// -/// Each of this type's `IdentityHandlerFactory::spawn` methods -/// returns a `Mutex>`, which allocates fresh `I` -/// ids itself, and takes `()` as its proto-id type. -#[derive(Debug)] -pub struct IdentityManagerFactory; - -impl IdentityHandlerFactory for IdentityManagerFactory { - type Filter = Mutex; - fn spawn(&self) -> Self::Filter { - Mutex::new(IdentityManager::default()) - } -} - -/// A factory that can build [`IdentityHandler`]s for all resource -/// types. -pub trait GlobalIdentityHandlerFactory: - IdentityHandlerFactory - + IdentityHandlerFactory - + IdentityHandlerFactory - + IdentityHandlerFactory - + IdentityHandlerFactory - + IdentityHandlerFactory - + IdentityHandlerFactory - + IdentityHandlerFactory - + IdentityHandlerFactory - + IdentityHandlerFactory - + IdentityHandlerFactory - + IdentityHandlerFactory - + IdentityHandlerFactory - + IdentityHandlerFactory - + IdentityHandlerFactory - + IdentityHandlerFactory - + IdentityHandlerFactory -{ -} - -impl GlobalIdentityHandlerFactory for IdentityManagerFactory {} - -pub type Input = <>::Filter as IdentityHandler>::Input; - -pub trait Resource { - const TYPE: &'static str; - fn life_guard(&self) -> &crate::LifeGuard; - fn label(&self) -> &str { - #[cfg(debug_assertions)] - return &self.life_guard().label; - #[cfg(not(debug_assertions))] - return ""; - } -} - -#[derive(Debug)] -pub struct Registry> { - identity: F::Filter, - data: RwLock>, - backend: Backend, -} - -impl> Registry { - fn new(backend: Backend, factory: &F) -> Self { - Self { - identity: factory.spawn(), - data: RwLock::new(Storage { - map: Vec::new(), - kind: T::TYPE, - _phantom: PhantomData, - }), - backend, - } - } - - fn without_backend(factory: &F, kind: &'static str) -> Self { - Self { - identity: factory.spawn(), - data: RwLock::new(Storage { - map: Vec::new(), - kind, - _phantom: PhantomData, - }), - backend: Backend::Empty, - } - } -} - -#[must_use] -pub(crate) struct FutureId<'a, I: id::TypedId, T> { - id: I, - data: &'a RwLock>, -} - -impl FutureId<'_, I, T> { - #[cfg(feature = "trace")] - pub fn id(&self) -> I { - self.id - } - - pub fn into_id(self) -> I { - self.id - } - - pub fn assign<'a, A: Access>(self, value: T, _: &'a mut Token) -> id::Valid { - self.data.write().insert(self.id, value); - id::Valid(self.id) - } - - pub fn assign_error<'a, A: Access>(self, label: &str, _: &'a mut Token) -> I { - self.data.write().insert_error(self.id, label); - self.id - } -} - -impl> Registry { - pub(crate) fn prepare( - &self, - id_in: >::Input, - ) -> FutureId { - FutureId { - id: self.identity.process(id_in, self.backend), - data: &self.data, - } - } - - pub(crate) fn read<'a, A: Access>( - &'a self, - _token: &'a mut Token, - ) -> (RwLockReadGuard<'a, Storage>, Token<'a, T>) { - (self.data.read(), Token::new()) - } - - pub(crate) fn write<'a, A: Access>( - &'a self, - _token: &'a mut Token, - ) -> (RwLockWriteGuard<'a, Storage>, Token<'a, T>) { - (self.data.write(), Token::new()) - } - - pub fn unregister_locked(&self, id: I, guard: &mut Storage) -> Option { - let value = guard.remove(id); - //Note: careful about the order here! - self.identity.free(id); - //Returning None is legal if it's an error ID - value - } - - pub(crate) fn unregister<'a, A: Access>( - &self, - id: I, - _token: &'a mut Token, - ) -> (Option, Token<'a, T>) { - let value = self.data.write().remove(id); - //Note: careful about the order here! - self.identity.free(id); - //Returning None is legal if it's an error ID - (value, Token::new()) - } - - pub fn label_for_resource(&self, id: I) -> String { - let guard = self.data.read(); - - let type_name = guard.kind; - match guard.get(id) { - Ok(res) => { - let label = res.label(); - if label.is_empty() { - format!("<{}-{:?}>", type_name, id.unzip()) - } else { - label.to_string() - } - } - Err(_) => format!( - "", - type_name, - guard.label_for_invalid_id(id) - ), - } - } -} +use std::{fmt::Debug, marker::PhantomData}; #[derive(Debug)] pub struct HubReport { @@ -839,22 +191,22 @@ impl HubReport { } pub struct Hub { - pub adapters: Registry, id::AdapterId, F>, - pub devices: Registry, id::DeviceId, F>, - pub pipeline_layouts: Registry, id::PipelineLayoutId, F>, - pub shader_modules: Registry, id::ShaderModuleId, F>, - pub bind_group_layouts: Registry, id::BindGroupLayoutId, F>, - pub bind_groups: Registry, id::BindGroupId, F>, - pub command_buffers: Registry, id::CommandBufferId, F>, - pub render_bundles: Registry, id::RenderBundleId, F>, - pub render_pipelines: Registry, id::RenderPipelineId, F>, - pub compute_pipelines: Registry, id::ComputePipelineId, F>, - pub query_sets: Registry, id::QuerySetId, F>, - pub buffers: Registry, id::BufferId, F>, - pub staging_buffers: Registry, id::StagingBufferId, F>, - pub textures: Registry, id::TextureId, F>, - pub texture_views: Registry, id::TextureViewId, F>, - pub samplers: Registry, id::SamplerId, F>, + pub adapters: Registry, F>, + pub devices: Registry, F>, + pub pipeline_layouts: Registry, F>, + pub shader_modules: Registry, F>, + pub bind_group_layouts: Registry, F>, + pub bind_groups: Registry, F>, + pub command_buffers: Registry, F>, + pub render_bundles: Registry, F>, + pub render_pipelines: Registry, F>, + pub compute_pipelines: Registry, F>, + pub query_sets: Registry, F>, + pub buffers: Registry, F>, + pub staging_buffers: Registry, F>, + pub textures: Registry, F>, + pub texture_views: Registry, F>, + pub samplers: Registry, F>, } impl Hub { @@ -882,195 +234,100 @@ impl Hub { //TODO: instead of having a hacky `with_adapters` parameter, // we should have `clear_device(device_id)` that specifically destroys // everything related to a logical device. - fn clear(&self, surface_guard: &mut Storage, with_adapters: bool) { - use crate::resource::TextureInner; - use hal::{Device as _, Surface as _}; + pub(crate) fn clear( + &self, + surface_guard: &Storage, + with_adapters: bool, + ) { + use hal::Surface; - let mut devices = self.devices.data.write(); + let mut devices = self.devices.write(); for element in devices.map.iter_mut() { - if let Element::Occupied(ref mut device, _) = *element { + if let Element::Occupied(ref device, _) = *element { device.prepare_to_die(); } } - // destroy command buffers first, since otherwise DX12 isn't happy - for element in self.command_buffers.data.write().map.drain(..) { - if let Element::Occupied(command_buffer, _) = element { - let device = &devices[command_buffer.device_id.value]; - device.destroy_command_buffer(command_buffer); - } - } - - for element in self.samplers.data.write().map.drain(..) { - if let Element::Occupied(sampler, _) = element { - unsafe { - devices[sampler.device_id.value] - .raw - .destroy_sampler(sampler.raw); - } - } - } - - for element in self.texture_views.data.write().map.drain(..) { - if let Element::Occupied(texture_view, _) = element { - let device = &devices[texture_view.device_id.value]; - unsafe { - device.raw.destroy_texture_view(texture_view.raw); - } - } - } - - for element in self.textures.data.write().map.drain(..) { - if let Element::Occupied(texture, _) = element { - let device = &devices[texture.device_id.value]; - if let TextureInner::Native { raw: Some(raw) } = texture.inner { - unsafe { - device.raw.destroy_texture(raw); - } - } - if let TextureClearMode::RenderPass { clear_views, .. } = texture.clear_mode { - for view in clear_views { - unsafe { - device.raw.destroy_texture_view(view); - } - } - } - } - } - for element in self.buffers.data.write().map.drain(..) { - if let Element::Occupied(buffer, _) = element { - //TODO: unmap if needed - devices[buffer.device_id.value].destroy_buffer(buffer); - } - } - for element in self.bind_groups.data.write().map.drain(..) { - if let Element::Occupied(bind_group, _) = element { - let device = &devices[bind_group.device_id.value]; - unsafe { - device.raw.destroy_bind_group(bind_group.raw); - } - } - } - - for element in self.shader_modules.data.write().map.drain(..) { - if let Element::Occupied(module, _) = element { - let device = &devices[module.device_id.value]; - unsafe { - device.raw.destroy_shader_module(module.raw); - } - } - } - for element in self.bind_group_layouts.data.write().map.drain(..) { - if let Element::Occupied(bgl, _) = element { - let device = &devices[bgl.device_id.value]; - unsafe { - device.raw.destroy_bind_group_layout(bgl.raw); - } - } - } - for element in self.pipeline_layouts.data.write().map.drain(..) { - if let Element::Occupied(pipeline_layout, _) = element { - let device = &devices[pipeline_layout.device_id.value]; - unsafe { - device.raw.destroy_pipeline_layout(pipeline_layout.raw); - } - } - } - for element in self.compute_pipelines.data.write().map.drain(..) { - if let Element::Occupied(pipeline, _) = element { - let device = &devices[pipeline.device_id.value]; - unsafe { - device.raw.destroy_compute_pipeline(pipeline.raw); - } - } - } - for element in self.render_pipelines.data.write().map.drain(..) { - if let Element::Occupied(pipeline, _) = element { - let device = &devices[pipeline.device_id.value]; - unsafe { - device.raw.destroy_render_pipeline(pipeline.raw); - } - } - } - - for element in surface_guard.map.iter_mut() { - if let Element::Occupied(ref mut surface, _epoch) = *element { + self.command_buffers.write().map.clear(); + self.samplers.write().map.clear(); + self.texture_views.write().map.clear(); + self.textures.write().map.clear(); + self.buffers.write().map.clear(); + self.bind_groups.write().map.clear(); + self.shader_modules.write().map.clear(); + self.bind_group_layouts.write().map.clear(); + self.pipeline_layouts.write().map.clear(); + self.compute_pipelines.write().map.clear(); + self.render_pipelines.write().map.clear(); + self.query_sets.write().map.clear(); + + for element in surface_guard.map.iter() { + if let Element::Occupied(ref surface, _epoch) = *element { if surface .presentation + .lock() .as_ref() .map_or(wgt::Backend::Empty, |p| p.backend()) != A::VARIANT { continue; } - if let Some(present) = surface.presentation.take() { - let device = &devices[present.device_id.value]; - let suf = A::get_surface_mut(surface); + if let Some(present) = surface.presentation.lock().take() { + let device = &devices[present.device_id]; + let suf = A::get_surface(surface); unsafe { - suf.unwrap().raw.unconfigure(&device.raw); + suf.unwrap().raw.unconfigure(device.raw.as_ref().unwrap()); //TODO: we could destroy the surface here } } } } - for element in self.query_sets.data.write().map.drain(..) { - if let Element::Occupied(query_set, _) = element { - let device = &devices[query_set.device_id.value]; - unsafe { - device.raw.destroy_query_set(query_set.raw); - } - } - } - - for element in devices.map.drain(..) { - if let Element::Occupied(device, _) = element { - device.dispose(); - } - } + devices.map.clear(); if with_adapters { drop(devices); - self.adapters.data.write().map.clear(); + self.adapters.write().map.clear(); } } pub fn generate_report(&self) -> HubReport { HubReport { - adapters: self.adapters.data.read().generate_report(), - devices: self.devices.data.read().generate_report(), - pipeline_layouts: self.pipeline_layouts.data.read().generate_report(), - shader_modules: self.shader_modules.data.read().generate_report(), - bind_group_layouts: self.bind_group_layouts.data.read().generate_report(), - bind_groups: self.bind_groups.data.read().generate_report(), - command_buffers: self.command_buffers.data.read().generate_report(), - render_bundles: self.render_bundles.data.read().generate_report(), - render_pipelines: self.render_pipelines.data.read().generate_report(), - compute_pipelines: self.compute_pipelines.data.read().generate_report(), - query_sets: self.query_sets.data.read().generate_report(), - buffers: self.buffers.data.read().generate_report(), - textures: self.textures.data.read().generate_report(), - texture_views: self.texture_views.data.read().generate_report(), - samplers: self.samplers.data.read().generate_report(), + adapters: self.adapters.generate_report(), + devices: self.devices.generate_report(), + pipeline_layouts: self.pipeline_layouts.generate_report(), + shader_modules: self.shader_modules.generate_report(), + bind_group_layouts: self.bind_group_layouts.generate_report(), + bind_groups: self.bind_groups.generate_report(), + command_buffers: self.command_buffers.generate_report(), + render_bundles: self.render_bundles.generate_report(), + render_pipelines: self.render_pipelines.generate_report(), + compute_pipelines: self.compute_pipelines.generate_report(), + query_sets: self.query_sets.generate_report(), + buffers: self.buffers.generate_report(), + textures: self.textures.generate_report(), + texture_views: self.texture_views.generate_report(), + samplers: self.samplers.generate_report(), } } } pub struct Hubs { #[cfg(all(feature = "vulkan", not(target_arch = "wasm32")))] - vulkan: Hub, + pub(crate) vulkan: Hub, #[cfg(all(feature = "metal", any(target_os = "macos", target_os = "ios")))] - metal: Hub, + pub(crate) metal: Hub, #[cfg(all(feature = "dx12", windows))] - dx12: Hub, + pub(crate) dx12: Hub, #[cfg(all(feature = "dx11", windows))] - dx11: Hub, + pub(crate) dx11: Hub, #[cfg(feature = "gles")] - gl: Hub, + pub(crate) gl: Hub, + _phantom_data: PhantomData, } impl Hubs { - fn new(factory: &F) -> Self { + pub(crate) fn new(factory: &F) -> Self { Self { #[cfg(all(feature = "vulkan", not(target_arch = "wasm32")))] vulkan: Hub::new(factory), @@ -1082,322 +339,7 @@ impl Hubs { dx11: Hub::new(factory), #[cfg(feature = "gles")] gl: Hub::new(factory), + _phantom_data: PhantomData, } } } - -#[derive(Debug)] -pub struct GlobalReport { - pub surfaces: StorageReport, - #[cfg(all(feature = "vulkan", not(target_arch = "wasm32")))] - pub vulkan: Option, - #[cfg(all(feature = "metal", any(target_os = "macos", target_os = "ios")))] - pub metal: Option, - #[cfg(all(feature = "dx12", windows))] - pub dx12: Option, - #[cfg(all(feature = "dx11", windows))] - pub dx11: Option, - #[cfg(feature = "gles")] - pub gl: Option, -} - -pub struct Global { - pub instance: Instance, - pub surfaces: Registry, - hubs: Hubs, -} - -impl Global { - pub fn new(name: &str, factory: G, instance_desc: wgt::InstanceDescriptor) -> Self { - profiling::scope!("Global::new"); - Self { - instance: Instance::new(name, instance_desc), - surfaces: Registry::without_backend(&factory, "Surface"), - hubs: Hubs::new(&factory), - } - } - - /// # Safety - /// - /// Refer to the creation of wgpu-hal Instance for every backend. - pub unsafe fn from_hal_instance( - name: &str, - factory: G, - hal_instance: A::Instance, - ) -> Self { - profiling::scope!("Global::new"); - Self { - instance: A::create_instance_from_hal(name, hal_instance), - surfaces: Registry::without_backend(&factory, "Surface"), - hubs: Hubs::new(&factory), - } - } - - /// # Safety - /// - /// - The raw instance handle returned must not be manually destroyed. - pub unsafe fn instance_as_hal(&self) -> Option<&A::Instance> { - A::instance_as_hal(&self.instance) - } - - /// # Safety - /// - /// - The raw handles obtained from the Instance must not be manually destroyed - pub unsafe fn from_instance(factory: G, instance: Instance) -> Self { - profiling::scope!("Global::new"); - Self { - instance, - surfaces: Registry::without_backend(&factory, "Surface"), - hubs: Hubs::new(&factory), - } - } - - pub fn clear_backend(&self, _dummy: ()) { - let mut surface_guard = self.surfaces.data.write(); - let hub = A::hub(self); - // this is used for tests, which keep the adapter - hub.clear(&mut surface_guard, false); - } - - pub fn generate_report(&self) -> GlobalReport { - GlobalReport { - surfaces: self.surfaces.data.read().generate_report(), - #[cfg(all(feature = "vulkan", not(target_arch = "wasm32")))] - vulkan: if self.instance.vulkan.is_some() { - Some(self.hubs.vulkan.generate_report()) - } else { - None - }, - #[cfg(all(feature = "metal", any(target_os = "macos", target_os = "ios")))] - metal: if self.instance.metal.is_some() { - Some(self.hubs.metal.generate_report()) - } else { - None - }, - #[cfg(all(feature = "dx12", windows))] - dx12: if self.instance.dx12.is_some() { - Some(self.hubs.dx12.generate_report()) - } else { - None - }, - #[cfg(all(feature = "dx11", windows))] - dx11: if self.instance.dx11.is_some() { - Some(self.hubs.dx11.generate_report()) - } else { - None - }, - #[cfg(feature = "gles")] - gl: if self.instance.gl.is_some() { - Some(self.hubs.gl.generate_report()) - } else { - None - }, - } - } -} - -impl Drop for Global { - fn drop(&mut self) { - profiling::scope!("Global::drop"); - log::info!("Dropping Global"); - let mut surface_guard = self.surfaces.data.write(); - - // destroy hubs before the instance gets dropped - #[cfg(all(feature = "vulkan", not(target_arch = "wasm32")))] - { - self.hubs.vulkan.clear(&mut surface_guard, true); - } - #[cfg(all(feature = "metal", any(target_os = "macos", target_os = "ios")))] - { - self.hubs.metal.clear(&mut surface_guard, true); - } - #[cfg(all(feature = "dx12", windows))] - { - self.hubs.dx12.clear(&mut surface_guard, true); - } - #[cfg(all(feature = "dx11", windows))] - { - self.hubs.dx11.clear(&mut surface_guard, true); - } - #[cfg(feature = "gles")] - { - self.hubs.gl.clear(&mut surface_guard, true); - } - - // destroy surfaces - for element in surface_guard.map.drain(..) { - if let Element::Occupied(surface, _) = element { - self.instance.destroy_surface(surface); - } - } - } -} - -pub trait HalApi: hal::Api { - const VARIANT: Backend; - fn create_instance_from_hal(name: &str, hal_instance: Self::Instance) -> Instance; - fn instance_as_hal(instance: &Instance) -> Option<&Self::Instance>; - fn hub(global: &Global) -> &Hub; - fn get_surface(surface: &Surface) -> Option<&HalSurface>; - fn get_surface_mut(surface: &mut Surface) -> Option<&mut HalSurface>; -} - -impl HalApi for hal::api::Empty { - const VARIANT: Backend = Backend::Empty; - fn create_instance_from_hal(_: &str, _: Self::Instance) -> Instance { - unimplemented!("called empty api") - } - fn instance_as_hal(_: &Instance) -> Option<&Self::Instance> { - unimplemented!("called empty api") - } - fn hub(_: &Global) -> &Hub { - unimplemented!("called empty api") - } - fn get_surface(_: &Surface) -> Option<&HalSurface> { - unimplemented!("called empty api") - } - fn get_surface_mut(_: &mut Surface) -> Option<&mut HalSurface> { - unimplemented!("called empty api") - } -} - -#[cfg(all(feature = "vulkan", not(target_arch = "wasm32")))] -impl HalApi for hal::api::Vulkan { - const VARIANT: Backend = Backend::Vulkan; - fn create_instance_from_hal(name: &str, hal_instance: Self::Instance) -> Instance { - Instance { - name: name.to_owned(), - vulkan: Some(hal_instance), - ..Default::default() - } - } - fn instance_as_hal(instance: &Instance) -> Option<&Self::Instance> { - instance.vulkan.as_ref() - } - fn hub(global: &Global) -> &Hub { - &global.hubs.vulkan - } - fn get_surface(surface: &Surface) -> Option<&HalSurface> { - surface.vulkan.as_ref() - } - fn get_surface_mut(surface: &mut Surface) -> Option<&mut HalSurface> { - surface.vulkan.as_mut() - } -} - -#[cfg(all(feature = "metal", any(target_os = "macos", target_os = "ios")))] -impl HalApi for hal::api::Metal { - const VARIANT: Backend = Backend::Metal; - fn create_instance_from_hal(name: &str, hal_instance: Self::Instance) -> Instance { - Instance { - name: name.to_owned(), - metal: Some(hal_instance), - ..Default::default() - } - } - fn instance_as_hal(instance: &Instance) -> Option<&Self::Instance> { - instance.metal.as_ref() - } - fn hub(global: &Global) -> &Hub { - &global.hubs.metal - } - fn get_surface(surface: &Surface) -> Option<&HalSurface> { - surface.metal.as_ref() - } - fn get_surface_mut(surface: &mut Surface) -> Option<&mut HalSurface> { - surface.metal.as_mut() - } -} - -#[cfg(all(feature = "dx12", windows))] -impl HalApi for hal::api::Dx12 { - const VARIANT: Backend = Backend::Dx12; - fn create_instance_from_hal(name: &str, hal_instance: Self::Instance) -> Instance { - Instance { - name: name.to_owned(), - dx12: Some(hal_instance), - ..Default::default() - } - } - fn instance_as_hal(instance: &Instance) -> Option<&Self::Instance> { - instance.dx12.as_ref() - } - fn hub(global: &Global) -> &Hub { - &global.hubs.dx12 - } - fn get_surface(surface: &Surface) -> Option<&HalSurface> { - surface.dx12.as_ref() - } - fn get_surface_mut(surface: &mut Surface) -> Option<&mut HalSurface> { - surface.dx12.as_mut() - } -} - -#[cfg(all(feature = "dx11", windows))] -impl HalApi for hal::api::Dx11 { - const VARIANT: Backend = Backend::Dx11; - fn create_instance_from_hal(name: &str, hal_instance: Self::Instance) -> Instance { - Instance { - name: name.to_owned(), - dx11: Some(hal_instance), - ..Default::default() - } - } - fn instance_as_hal(instance: &Instance) -> Option<&Self::Instance> { - instance.dx11.as_ref() - } - fn hub(global: &Global) -> &Hub { - &global.hubs.dx11 - } - fn get_surface(surface: &Surface) -> Option<&HalSurface> { - surface.dx11.as_ref() - } - fn get_surface_mut(surface: &mut Surface) -> Option<&mut HalSurface> { - surface.dx11.as_mut() - } -} - -#[cfg(feature = "gles")] -impl HalApi for hal::api::Gles { - const VARIANT: Backend = Backend::Gl; - fn create_instance_from_hal(name: &str, hal_instance: Self::Instance) -> Instance { - #[allow(clippy::needless_update)] - Instance { - name: name.to_owned(), - gl: Some(hal_instance), - ..Default::default() - } - } - fn instance_as_hal(instance: &Instance) -> Option<&Self::Instance> { - instance.gl.as_ref() - } - fn hub(global: &Global) -> &Hub { - &global.hubs.gl - } - fn get_surface(surface: &Surface) -> Option<&HalSurface> { - surface.gl.as_ref() - } - fn get_surface_mut(surface: &mut Surface) -> Option<&mut HalSurface> { - surface.gl.as_mut() - } -} - -#[cfg(test)] -fn _test_send_sync(global: &Global) { - fn test_internal(_: T) {} - test_internal(global) -} - -#[test] -fn test_epoch_end_of_life() { - use id::TypedId as _; - let mut man = IdentityManager::default(); - man.epochs.push(id::EPOCH_MASK); - man.free.push(0); - let id1 = man.alloc::(Backend::Empty); - assert_eq!(id1.unzip().0, 0); - man.free(id1); - let id2 = man.alloc::(Backend::Empty); - // confirm that the index 0 is no longer re-used - assert_eq!(id2.unzip().0, 1); -} diff --git a/wgpu-core/src/identity.rs b/wgpu-core/src/identity.rs new file mode 100644 index 0000000000..5f7cc0dc5a --- /dev/null +++ b/wgpu-core/src/identity.rs @@ -0,0 +1,183 @@ +use parking_lot::Mutex; +use wgt::Backend; + +use crate::{id, Epoch, Index}; +use std::fmt::Debug; + +/// A simple structure to allocate [`Id`] identifiers. +/// +/// Calling [`alloc`] returns a fresh, never-before-seen id. Calling [`free`] +/// marks an id as dead; it will never be returned again by `alloc`. +/// +/// Use `IdentityManager::default` to construct new instances. +/// +/// `IdentityManager` returns `Id`s whose index values are suitable for use as +/// indices into a `Storage` that holds those ids' referents: +/// +/// - Every live id has a distinct index value. Each live id's index selects a +/// distinct element in the vector. +/// +/// - `IdentityManager` prefers low index numbers. If you size your vector to +/// accommodate the indices produced here, the vector's length will reflect +/// the highwater mark of actual occupancy. +/// +/// - `IdentityManager` reuses the index values of freed ids before returning +/// ids with new index values. Freed vector entries get reused. +/// +/// See the module-level documentation for an overview of how this +/// fits together. +/// +/// [`Id`]: crate::id::Id +/// [`Backend`]: wgt::Backend; +/// [`alloc`]: IdentityManager::alloc +/// [`free`]: IdentityManager::free +#[derive(Debug, Default)] +pub struct IdentityManager { + /// Available index values. If empty, then `epochs.len()` is the next index + /// to allocate. + free: Vec, + + /// The next or currently-live epoch value associated with each `Id` index. + /// + /// If there is a live id with index `i`, then `epochs[i]` is its epoch; any + /// id with the same index but an older epoch is dead. + /// + /// If index `i` is currently unused, `epochs[i]` is the epoch to use in its + /// next `Id`. + epochs: Vec, +} + +impl IdentityManager { + /// Allocate a fresh, never-before-seen id with the given `backend`. + /// + /// The backend is incorporated into the id, so that ids allocated with + /// different `backend` values are always distinct. + pub fn alloc(&mut self, backend: Backend) -> I { + match self.free.pop() { + Some(index) => I::zip(index, self.epochs[index as usize], backend), + None => { + let epoch = 1; + let id = I::zip(self.epochs.len() as Index, epoch, backend); + self.epochs.push(epoch); + id + } + } + } + + /// Free `id`. It will never be returned from `alloc` again. + pub fn free(&mut self, id: I) { + let (index, epoch, _backend) = id.unzip(); + let pe = &mut self.epochs[index as usize]; + assert_eq!(*pe, epoch); + // If the epoch reaches EOL, the index doesn't go + // into the free list, will never be reused again. + if epoch < id::EPOCH_MASK { + *pe = epoch + 1; + self.free.push(index); + } + } +} + +/// A type that can build true ids from proto-ids, and free true ids. +/// +/// For some implementations, the true id is based on the proto-id. +/// The caller is responsible for providing well-allocated proto-ids. +/// +/// For other implementations, the proto-id carries no information +/// (it's `()`, say), and this `IdentityHandler` type takes care of +/// allocating a fresh true id. +/// +/// See the module-level documentation for details. +pub trait IdentityHandler: Debug { + /// The type of proto-id consumed by this filter, to produce a true id. + type Input: Clone + Debug; + + /// Given a proto-id value `id`, return a true id for `backend`. + fn process(&self, id: Self::Input, backend: Backend) -> I; + + /// Free the true id `id`. + fn free(&self, id: I); +} + +impl IdentityHandler for Mutex { + type Input = (); + fn process(&self, _id: Self::Input, backend: Backend) -> I { + self.lock().alloc(backend) + } + fn free(&self, id: I) { + self.lock().free(id) + } +} + +/// A type that can produce [`IdentityHandler`] filters for ids of type `I`. +/// +/// See the module-level documentation for details. +pub trait IdentityHandlerFactory { + /// The type of filter this factory constructs. + /// + /// "Filter" and "handler" seem to both mean the same thing here: + /// something that can produce true ids from proto-ids. + type Filter: IdentityHandler; + + /// Create an [`IdentityHandler`] implementation that can + /// transform proto-ids into ids of type `I`. + /// + /// [`IdentityHandler`]: IdentityHandler + fn spawn(&self) -> Self::Filter; +} + +/// A global identity handler factory based on [`IdentityManager`]. +/// +/// Each of this type's `IdentityHandlerFactory::spawn` methods +/// returns a `Mutex>`, which allocates fresh `I` +/// ids itself, and takes `()` as its proto-id type. +#[derive(Debug)] +pub struct IdentityManagerFactory; + +impl IdentityHandlerFactory for IdentityManagerFactory { + type Filter = Mutex; + fn spawn(&self) -> Self::Filter { + Mutex::new(IdentityManager::default()) + } +} + +/// A factory that can build [`IdentityHandler`]s for all resource +/// types. +pub trait GlobalIdentityHandlerFactory: + IdentityHandlerFactory + + IdentityHandlerFactory + + IdentityHandlerFactory + + IdentityHandlerFactory + + IdentityHandlerFactory + + IdentityHandlerFactory + + IdentityHandlerFactory + + IdentityHandlerFactory + + IdentityHandlerFactory + + IdentityHandlerFactory + + IdentityHandlerFactory + + IdentityHandlerFactory + + IdentityHandlerFactory + + IdentityHandlerFactory + + IdentityHandlerFactory + + IdentityHandlerFactory + + IdentityHandlerFactory +{ +} + +impl GlobalIdentityHandlerFactory for IdentityManagerFactory {} + +pub type Input = <>::Filter as IdentityHandler>::Input; + +#[test] +fn test_epoch_end_of_life() { + use id::TypedId as _; + let mut man = IdentityManager::default(); + man.epochs.push(id::EPOCH_MASK); + man.free.push(0); + let id1 = man.alloc::(Backend::Empty); + assert_eq!(id1.unzip().0, 0); + man.free(id1); + let id2 = man.alloc::(Backend::Empty); + // confirm that the index 0 is no longer re-used + assert_eq!(id2.unzip().0, 1); +} diff --git a/wgpu-core/src/instance.rs b/wgpu-core/src/instance.rs index d987f45edc..803e733c42 100644 --- a/wgpu-core/src/instance.rs +++ b/wgpu-core/src/instance.rs @@ -1,11 +1,17 @@ +use std::sync::Arc; + use crate::{ - device::{Device, DeviceDescriptor}, - hub::{Global, GlobalIdentityHandlerFactory, HalApi, Input, Token}, + device::{device::Device, DeviceDescriptor}, + global::Global, + hal_api::HalApi, id::{AdapterId, DeviceId, SurfaceId, Valid}, + identity::{GlobalIdentityHandlerFactory, Input}, present::Presentation, - LabelHelpers, LifeGuard, Stored, DOWNLEVEL_WARNING_MESSAGE, + resource::{Resource, ResourceInfo}, + LabelHelpers, DOWNLEVEL_WARNING_MESSAGE, }; +use parking_lot::Mutex; use wgt::{Backend, Backends, PowerPreference}; use hal::{Adapter as _, Instance as _}; @@ -15,7 +21,7 @@ pub type RequestAdapterOptions = wgt::RequestAdapterOptions; type HalInstance = ::Instance; //TODO: remove this pub struct HalSurface { - pub raw: A::Surface, + pub raw: Arc, //pub acquired_texture: Option, } @@ -109,7 +115,11 @@ impl Instance { ) { unsafe { if let Some(suf) = surface { - instance.as_ref().unwrap().destroy_surface(suf.raw); + if let Ok(raw) = Arc::try_unwrap(suf.raw) { + instance.as_ref().unwrap().destroy_surface(raw); + } else { + panic!("Surface cannot be destroyed because is still in use"); + } } } } @@ -127,7 +137,8 @@ impl Instance { } pub struct Surface { - pub(crate) presentation: Option, + pub(crate) presentation: Mutex>, + pub(crate) info: ResourceInfo, #[cfg(all(feature = "vulkan", not(target_arch = "wasm32")))] pub vulkan: Option>, #[cfg(all(feature = "metal", any(target_os = "macos", target_os = "ios")))] @@ -140,15 +151,15 @@ pub struct Surface { pub gl: Option>, } -impl crate::hub::Resource for Surface { +impl Resource for Surface { const TYPE: &'static str = "Surface"; - fn life_guard(&self) -> &LifeGuard { - unreachable!() + fn info(&self) -> &ResourceInfo { + &self.info } - fn label(&self) -> &str { - "" + fn label(&self) -> String { + String::from("") } } @@ -173,7 +184,7 @@ impl Surface { pub struct Adapter { pub(crate) raw: hal::ExposedAdapter, - life_guard: LifeGuard, + info: ResourceInfo, } impl Adapter { @@ -192,7 +203,7 @@ impl Adapter { Self { raw, - life_guard: LifeGuard::new(""), + info: ResourceInfo::new(""), } } @@ -287,10 +298,7 @@ impl Adapter { let caps = &self.raw.capabilities; Device::new( open, - Stored { - value: Valid(self_id), - ref_count: self.life_guard.add_ref(), - }, + Valid(self_id), caps.alignments.clone(), caps.downlevel.clone(), desc, @@ -356,11 +364,11 @@ impl Adapter { } } -impl crate::hub::Resource for Adapter { +impl Resource for Adapter { const TYPE: &'static str = "Adapter"; - fn life_guard(&self) -> &LifeGuard { - &self.life_guard + fn info(&self) -> &ResourceInfo { + &self.info } } @@ -451,7 +459,7 @@ impl Global { inst.as_ref().and_then(|inst| unsafe { match inst.create_surface(display_handle, window_handle) { Ok(raw) => Some(HalSurface { - raw, + raw: Arc::new(raw), //acquired_texture: None, }), Err(e) => { @@ -463,7 +471,8 @@ impl Global { } let surface = Surface { - presentation: None, + presentation: Mutex::new(None), + info: ResourceInfo::new(""), #[cfg(all(feature = "vulkan", not(target_arch = "wasm32")))] vulkan: init::(&self.instance.vulkan, display_handle, window_handle), #[cfg(all(feature = "metal", any(target_os = "macos", target_os = "ios")))] @@ -476,8 +485,7 @@ impl Global { gl: init::(&self.instance.gl, display_handle, window_handle), }; - let mut token = Token::root(); - let id = self.surfaces.prepare(id_in).assign(surface, &mut token); + let (id, _) = self.surfaces.prepare(id_in).assign(surface); id.0 } @@ -493,14 +501,14 @@ impl Global { profiling::scope!("Instance::create_surface_metal"); let surface = Surface { - presentation: None, + presentation: Mutex::new(None), + info: ResourceInfo::new(""), metal: self.instance.metal.as_ref().map(|inst| HalSurface { - raw: { + raw: Arc::new( // we don't want to link to metal-rs for this #[allow(clippy::transmute_ptr_to_ref)] - inst.create_surface_from_layer(unsafe { std::mem::transmute(layer) }) - }, - //acquired_texture: None, + inst.create_surface_from_layer(unsafe { std::mem::transmute(layer) }), + ), //acquired_texture: None, }), #[cfg(all(feature = "vulkan", not(target_arch = "wasm32")))] vulkan: None, @@ -508,8 +516,7 @@ impl Global { gl: None, }; - let mut token = Token::root(); - let id = self.surfaces.prepare(id_in).assign(surface, &mut token); + let (id, _) = self.surfaces.prepare(id_in).assign(surface); id.0 } @@ -522,21 +529,21 @@ impl Global { profiling::scope!("Instance::create_surface_webgl_canvas"); let surface = Surface { - presentation: None, + presentation: Mutex::new(None), + info: ResourceInfo::new(""), gl: self .instance .gl .as_ref() .map(|inst| { Ok(HalSurface { - raw: inst.create_surface_from_canvas(canvas)?, + raw: Arc::new(inst.create_surface_from_canvas(canvas)?), }) }) .transpose()?, }; - let mut token = Token::root(); - let id = self.surfaces.prepare(id_in).assign(surface, &mut token); + let (id, _) = self.surfaces.prepare(id_in).assign(surface); Ok(id.0) } @@ -549,21 +556,21 @@ impl Global { profiling::scope!("Instance::create_surface_webgl_offscreen_canvas"); let surface = Surface { - presentation: None, + presentation: Mutex::new(None), + info: ResourceInfo::new(""), gl: self .instance .gl .as_ref() .map(|inst| { Ok(HalSurface { - raw: inst.create_surface_from_offscreen_canvas(canvas)?, + raw: Arc::new(inst.create_surface_from_offscreen_canvas(canvas)?), }) }) .transpose()?, }; - let mut token = Token::root(); - let id = self.surfaces.prepare(id_in).assign(surface, &mut token); + let (id, _) = self.surfaces.prepare(id_in).assign(surface); Ok(id.0) } @@ -579,19 +586,19 @@ impl Global { profiling::scope!("Instance::instance_create_surface_from_visual"); let surface = Surface { - presentation: None, + presentation: Mutex::new(None), + info: ResourceInfo::new(""), #[cfg(all(feature = "vulkan", not(target_arch = "wasm32")))] vulkan: None, dx12: self.instance.dx12.as_ref().map(|inst| HalSurface { - raw: unsafe { inst.create_surface_from_visual(visual as _) }, + raw: Arc::new(unsafe { inst.create_surface_from_visual(visual as _) }), }), dx11: None, #[cfg(feature = "gles")] gl: None, }; - let mut token = Token::root(); - let id = self.surfaces.prepare(id_in).assign(surface, &mut token); + let (id, _) = self.surfaces.prepare(id_in).assign(surface); id.0 } @@ -607,27 +614,30 @@ impl Global { profiling::scope!("Instance::instance_create_surface_from_surface_handle"); let surface = Surface { - presentation: None, + presentation: Mutex::new(None), + info: ResourceInfo::new(""), #[cfg(all(feature = "vulkan", not(target_arch = "wasm32")))] vulkan: None, dx12: self.instance.dx12.as_ref().map(|inst| HalSurface { - raw: unsafe { inst.create_surface_from_surface_handle(surface_handle) }, + raw: Arc::new(unsafe { inst.create_surface_from_surface_handle(surface_handle) }), }), dx11: None, #[cfg(feature = "gles")] gl: None, }; - let mut token = Token::root(); - let id = self.surfaces.prepare(id_in).assign(surface, &mut token); + let (id, _) = self.surfaces.prepare(id_in).assign(surface); id.0 } pub fn surface_drop(&self, id: SurfaceId) { profiling::scope!("Surface::drop"); - let mut token = Token::root(); - let (surface, _) = self.surfaces.unregister(id, &mut token); - self.instance.destroy_surface(surface.unwrap()); + let surface = self.surfaces.unregister(id); + if let Ok(surface) = Arc::try_unwrap(surface.unwrap()) { + self.instance.destroy_surface(surface); + } else { + panic!("Surface cannot be destroyed because is still in use"); + } } fn enumerate( @@ -648,16 +658,12 @@ impl Global { profiling::scope!("enumerating", &*format!("{:?}", A::VARIANT)); let hub = HalApi::hub(self); - let mut token = Token::root(); let hal_adapters = unsafe { inst.enumerate_adapters() }; for raw in hal_adapters { let adapter = Adapter::new(raw); log::info!("Adapter {:?} {:?}", A::VARIANT, adapter.raw.info); - let id = hub - .adapters - .prepare(id_backend.clone()) - .assign(adapter, &mut token); + let (id, _) = hub.adapters.prepare(id_backend.clone()).assign(adapter); list.push(id.0); } } @@ -703,13 +709,12 @@ impl Global { None } None => { - let mut token = Token::root(); let adapter = Adapter::new(list.swap_remove(*selected)); log::info!("Adapter {:?} {:?}", A::VARIANT, adapter.raw.info); - let id = HalApi::hub(self) + let (id, _) = HalApi::hub(self) .adapters .prepare(new_id.unwrap()) - .assign(adapter, &mut token); + .assign(adapter); Some(id.0) } } @@ -756,16 +761,16 @@ impl Global { } } - let mut token = Token::root(); - let (surface_guard, _) = self.surfaces.read(&mut token); let compatible_surface = desc .compatible_surface .map(|id| { - surface_guard + self.surfaces .get(id) .map_err(|_| RequestAdapterError::InvalidSurface(id)) + .map(|v| v) }) .transpose()?; + let compatible_surface = compatible_surface.as_ref().map(|surface| surface.as_ref()); let mut device_types = Vec::new(); #[cfg(all(feature = "vulkan", not(target_arch = "wasm32")))] @@ -814,9 +819,6 @@ impl Global { &mut device_types, ); - // need to free the token to be used by `select` - drop(surface_guard); - drop(token); if device_types.is_empty() { return Err(RequestAdapterError::NotFound); } @@ -893,20 +895,19 @@ impl Global { ) -> AdapterId { profiling::scope!("Instance::create_adapter_from_hal"); - let mut token = Token::root(); let fid = A::hub(self).adapters.prepare(input); match A::VARIANT { #[cfg(all(feature = "vulkan", not(target_arch = "wasm32")))] - Backend::Vulkan => fid.assign(Adapter::new(hal_adapter), &mut token).0, + Backend::Vulkan => fid.assign(Adapter::new(hal_adapter)).0 .0, #[cfg(all(feature = "metal", any(target_os = "macos", target_os = "ios")))] - Backend::Metal => fid.assign(Adapter::new(hal_adapter), &mut token).0, + Backend::Metal => fid.assign(Adapter::new(hal_adapter)).0 .0, #[cfg(all(feature = "dx12", windows))] - Backend::Dx12 => fid.assign(Adapter::new(hal_adapter), &mut token).0, + Backend::Dx12 => fid.assign(Adapter::new(hal_adapter)).0 .0, #[cfg(all(feature = "dx11", windows))] - Backend::Dx11 => fid.assign(Adapter::new(hal_adapter), &mut token).0, + Backend::Dx11 => fid.assign(Adapter::new(hal_adapter)).0 .0, #[cfg(feature = "gles")] - Backend::Gl => fid.assign(Adapter::new(hal_adapter), &mut token).0, + Backend::Gl => fid.assign(Adapter::new(hal_adapter)).0 .0, _ => unreachable!(), } } @@ -916,9 +917,8 @@ impl Global { adapter_id: AdapterId, ) -> Result { let hub = A::hub(self); - let mut token = Token::root(); - let (adapter_guard, _) = hub.adapters.read(&mut token); - adapter_guard + + hub.adapters .get(adapter_id) .map(|adapter| adapter.raw.info.clone()) .map_err(|_| InvalidAdapter) @@ -930,9 +930,8 @@ impl Global { format: wgt::TextureFormat, ) -> Result { let hub = A::hub(self); - let mut token = Token::root(); - let (adapter_guard, _) = hub.adapters.read(&mut token); - adapter_guard + + hub.adapters .get(adapter_id) .map(|adapter| adapter.get_texture_format_features(format)) .map_err(|_| InvalidAdapter) @@ -943,9 +942,8 @@ impl Global { adapter_id: AdapterId, ) -> Result { let hub = A::hub(self); - let mut token = Token::root(); - let (adapter_guard, _) = hub.adapters.read(&mut token); - adapter_guard + + hub.adapters .get(adapter_id) .map(|adapter| adapter.raw.features) .map_err(|_| InvalidAdapter) @@ -956,9 +954,8 @@ impl Global { adapter_id: AdapterId, ) -> Result { let hub = A::hub(self); - let mut token = Token::root(); - let (adapter_guard, _) = hub.adapters.read(&mut token); - adapter_guard + + hub.adapters .get(adapter_id) .map(|adapter| adapter.raw.capabilities.limits.clone()) .map_err(|_| InvalidAdapter) @@ -969,9 +966,8 @@ impl Global { adapter_id: AdapterId, ) -> Result { let hub = A::hub(self); - let mut token = Token::root(); - let (adapter_guard, _) = hub.adapters.read(&mut token); - adapter_guard + + hub.adapters .get(adapter_id) .map(|adapter| adapter.raw.capabilities.downlevel.clone()) .map_err(|_| InvalidAdapter) @@ -982,9 +978,8 @@ impl Global { adapter_id: AdapterId, ) -> Result { let hub = A::hub(self); - let mut token = Token::root(); - let (adapter_guard, _) = hub.adapters.read(&mut token); - let adapter = adapter_guard.get(adapter_id).map_err(|_| InvalidAdapter)?; + + let adapter = hub.adapters.get(adapter_id).map_err(|_| InvalidAdapter)?; Ok(unsafe { adapter.raw.adapter.get_presentation_timestamp() }) } @@ -993,16 +988,15 @@ impl Global { profiling::scope!("Adapter::drop"); let hub = A::hub(self); - let mut token = Token::root(); - let (mut adapter_guard, _) = hub.adapters.write(&mut token); + let mut adapters_locked = hub.adapters.write(); - let free = match adapter_guard.get_mut(adapter_id) { - Ok(adapter) => adapter.life_guard.ref_count.take().unwrap().load() == 1, + let free = match adapters_locked.get(adapter_id) { + Ok(adapter) => Arc::strong_count(adapter) == 1, Err(_) => true, }; if free { hub.adapters - .unregister_locked(adapter_id, &mut *adapter_guard); + .unregister_locked(adapter_id, &mut *adapters_locked); } } } @@ -1018,12 +1012,10 @@ impl Global { profiling::scope!("Adapter::request_device"); let hub = A::hub(self); - let mut token = Token::root(); let fid = hub.devices.prepare(id_in); let error = loop { - let (adapter_guard, mut token) = hub.adapters.read(&mut token); - let adapter = match adapter_guard.get(adapter_id) { + let adapter = match hub.adapters.get(adapter_id) { Ok(adapter) => adapter, Err(_) => break RequestDeviceError::InvalidAdapter, }; @@ -1031,11 +1023,11 @@ impl Global { Ok(device) => device, Err(e) => break e, }; - let id = fid.assign(device, &mut token); + let (id, _) = fid.assign(device); return (id.0, None); }; - let id = fid.assign_error(desc.label.borrow_or_default(), &mut token); + let id = fid.assign_error(desc.label.borrow_or_default()); (id, Some(error)) } @@ -1054,12 +1046,10 @@ impl Global { profiling::scope!("Adapter::create_device_from_hal"); let hub = A::hub(self); - let mut token = Token::root(); let fid = hub.devices.prepare(id_in); let error = loop { - let (adapter_guard, mut token) = hub.adapters.read(&mut token); - let adapter = match adapter_guard.get(adapter_id) { + let adapter = match hub.adapters.get(adapter_id) { Ok(adapter) => adapter, Err(_) => break RequestDeviceError::InvalidAdapter, }; @@ -1068,11 +1058,11 @@ impl Global { Ok(device) => device, Err(e) => break e, }; - let id = fid.assign(device, &mut token); + let (id, _) = fid.assign(device); return (id.0, None); }; - let id = fid.assign_error(desc.label.borrow_or_default(), &mut token); + let id = fid.assign_error(desc.label.borrow_or_default()); (id, Some(error)) } } diff --git a/wgpu-core/src/lib.rs b/wgpu-core/src/lib.rs index fc80f0017d..83158f8b2b 100644 --- a/wgpu-core/src/lib.rs +++ b/wgpu-core/src/lib.rs @@ -42,21 +42,24 @@ pub mod command; mod conv; pub mod device; pub mod error; +pub mod global; +pub mod hal_api; pub mod hub; pub mod id; +pub mod identity; mod init_tracker; pub mod instance; pub mod pipeline; pub mod present; +pub mod registry; pub mod resource; +pub mod storage; mod track; mod validation; pub use hal::{api, MAX_BIND_GROUPS, MAX_COLOR_ATTACHMENTS, MAX_VERTEX_BUFFERS}; -use atomic::{AtomicUsize, Ordering}; - -use std::{borrow::Cow, os::raw::c_char, ptr, sync::atomic}; +use std::{borrow::Cow, os::raw::c_char}; /// The index of a queue submission. /// @@ -82,155 +85,6 @@ impl<'a> LabelHelpers<'a> for Label<'a> { } } -/// Reference count object that is 1:1 with each reference. -/// -/// All the clones of a given `RefCount` point to the same -/// heap-allocated atomic reference count. When the count drops to -/// zero, only the count is freed. No other automatic cleanup takes -/// place; this is just a reference count, not a smart pointer. -/// -/// `RefCount` values are created only by [`LifeGuard::new`] and by -/// `Clone`, so every `RefCount` is implicitly tied to some -/// [`LifeGuard`]. -#[derive(Debug)] -struct RefCount(ptr::NonNull); - -unsafe impl Send for RefCount {} -unsafe impl Sync for RefCount {} - -impl RefCount { - const MAX: usize = 1 << 24; - - /// Construct a new `RefCount`, with an initial count of 1. - fn new() -> RefCount { - let bx = Box::new(AtomicUsize::new(1)); - Self(unsafe { ptr::NonNull::new_unchecked(Box::into_raw(bx)) }) - } - - fn load(&self) -> usize { - unsafe { self.0.as_ref() }.load(Ordering::Acquire) - } -} - -impl Clone for RefCount { - fn clone(&self) -> Self { - let old_size = unsafe { self.0.as_ref() }.fetch_add(1, Ordering::AcqRel); - assert!(old_size < Self::MAX); - Self(self.0) - } -} - -impl Drop for RefCount { - fn drop(&mut self) { - unsafe { - if self.0.as_ref().fetch_sub(1, Ordering::AcqRel) == 1 { - drop(Box::from_raw(self.0.as_ptr())); - } - } - } -} - -/// Reference count object that tracks multiple references. -/// Unlike `RefCount`, it's manually inc()/dec() called. -#[derive(Debug)] -struct MultiRefCount(AtomicUsize); - -impl MultiRefCount { - fn new() -> Self { - Self(AtomicUsize::new(1)) - } - - fn inc(&self) { - self.0.fetch_add(1, Ordering::AcqRel); - } - - fn dec_and_check_empty(&self) -> bool { - self.0.fetch_sub(1, Ordering::AcqRel) == 1 - } -} - -/// Information needed to decide when it's safe to free some wgpu-core -/// resource. -/// -/// Each type representing a `wgpu-core` resource, like [`Device`], -/// [`Buffer`], etc., contains a `LifeGuard` which indicates whether -/// it is safe to free. -/// -/// A resource may need to be retained for any of several reasons: -/// -/// - The user may hold a reference to it (via a `wgpu::Buffer`, say). -/// -/// - Other resources may depend on it (a texture view's backing -/// texture, for example). -/// -/// - It may be used by commands sent to the GPU that have not yet -/// finished execution. -/// -/// [`Device`]: device::Device -/// [`Buffer`]: resource::Buffer -#[derive(Debug)] -pub struct LifeGuard { - /// `RefCount` for the user's reference to this resource. - /// - /// When the user first creates a `wgpu-core` resource, this `RefCount` is - /// created along with the resource's `LifeGuard`. When the user drops the - /// resource, we swap this out for `None`. Note that the resource may - /// still be held alive by other resources. - /// - /// Any `Stored` value holds a clone of this `RefCount` along with the id - /// of a `T` resource. - ref_count: Option, - - /// The index of the last queue submission in which the resource - /// was used. - /// - /// Each queue submission is fenced and assigned an index number - /// sequentially. Thus, when a queue submission completes, we know any - /// resources used in that submission and any lower-numbered submissions are - /// no longer in use by the GPU. - submission_index: AtomicUsize, - - /// The `label` from the descriptor used to create the resource. - #[cfg(debug_assertions)] - pub(crate) label: String, -} - -impl LifeGuard { - #[allow(unused_variables)] - fn new(label: &str) -> Self { - Self { - ref_count: Some(RefCount::new()), - submission_index: AtomicUsize::new(0), - #[cfg(debug_assertions)] - label: label.to_string(), - } - } - - fn add_ref(&self) -> RefCount { - self.ref_count.clone().unwrap() - } - - /// Record that this resource will be used by the queue submission with the - /// given index. - /// - /// Returns `true` if the resource is still held by the user. - fn use_at(&self, submit_index: SubmissionIndex) -> bool { - self.submission_index - .store(submit_index as _, Ordering::Release); - self.ref_count.is_some() - } - - fn life_count(&self) -> SubmissionIndex { - self.submission_index.load(Ordering::Acquire) as _ - } -} - -#[derive(Clone, Debug)] -struct Stored { - value: id::Valid, - ref_count: RefCount, -} - const DOWNLEVEL_WARNING_MESSAGE: &str = "The underlying API or device in use does not \ support enough features to be a fully compliant implementation of WebGPU. A subset of the features can still be used. \ If you are running this program on native and not in a browser and wish to limit the features you use to the supported subset, \ diff --git a/wgpu-core/src/pipeline.rs b/wgpu-core/src/pipeline.rs index 6923be65fb..f3f1133df0 100644 --- a/wgpu-core/src/pipeline.rs +++ b/wgpu-core/src/pipeline.rs @@ -1,13 +1,14 @@ use crate::{ binding_model::{CreateBindGroupLayoutError, CreatePipelineLayoutError}, command::ColorAttachmentError, - device::{DeviceError, MissingDownlevelFlags, MissingFeatures, RenderPassContext}, - hub::Resource, - id::{DeviceId, PipelineLayoutId, ShaderModuleId}, - validation, Label, LifeGuard, Stored, + device::{Device, DeviceError, MissingDownlevelFlags, MissingFeatures, RenderPassContext}, + hal_api::HalApi, + id::{ComputePipelineId, PipelineLayoutId, RenderPipelineId, ShaderModuleId, Valid}, + resource::{Resource, ResourceInfo}, + validation, Label, }; use arrayvec::ArrayVec; -use std::{borrow::Cow, error::Error, fmt, marker::PhantomData, num::NonZeroU32}; +use std::{borrow::Cow, error::Error, fmt, marker::PhantomData, num::NonZeroU32, sync::Arc}; use thiserror::Error; /// Information about buffer bindings, which @@ -40,26 +41,47 @@ pub struct ShaderModuleDescriptor<'a> { } #[derive(Debug)] -pub struct ShaderModule { - pub(crate) raw: A::ShaderModule, - pub(crate) device_id: Stored, +pub struct ShaderModule { + pub(crate) raw: Option>, + pub(crate) device: Arc>, pub(crate) interface: Option, + pub(crate) info: ResourceInfo, #[cfg(debug_assertions)] pub(crate) label: String, } -impl Resource for ShaderModule { +impl Drop for ShaderModule { + fn drop(&mut self) { + #[cfg(feature = "trace")] + if let Some(ref trace) = self.device.trace { + trace + .lock() + .add(trace::Action::DestroyShaderModule(self.info.id())); + } + let raw = self.raw.take().unwrap(); + if let Ok(raw) = Arc::try_unwrap(raw) { + unsafe { + use hal::Device; + self.device.raw.as_ref().unwrap().destroy_shader_module(raw); + } + } else { + panic!("ShaderModule raw cannot be destroyed because is still in use"); + } + } +} + +impl Resource for ShaderModule { const TYPE: &'static str = "ShaderModule"; - fn life_guard(&self) -> &LifeGuard { - unreachable!() + fn info(&self) -> &ResourceInfo { + &self.info } - fn label(&self) -> &str { + fn label(&self) -> String { #[cfg(debug_assertions)] - return &self.label; + return self.label.clone(); #[cfg(not(debug_assertions))] - return ""; + return String::new(""); } } @@ -209,19 +231,37 @@ pub enum CreateComputePipelineError { } #[derive(Debug)] -pub struct ComputePipeline { - pub(crate) raw: A::ComputePipeline, - pub(crate) layout_id: Stored, - pub(crate) device_id: Stored, +pub struct ComputePipeline { + pub(crate) raw: Option>, + pub(crate) layout_id: Valid, + pub(crate) device: Arc>, pub(crate) late_sized_buffer_groups: ArrayVec, - pub(crate) life_guard: LifeGuard, + pub(crate) info: ResourceInfo, } -impl Resource for ComputePipeline { +impl Drop for ComputePipeline { + fn drop(&mut self) { + let raw = self.raw.take().unwrap(); + if let Ok(raw) = Arc::try_unwrap(raw) { + unsafe { + use hal::Device; + self.device + .raw + .as_ref() + .unwrap() + .destroy_compute_pipeline(raw); + } + } else { + panic!("ComputePipeline raw cannot be destroyed because is still in use"); + } + } +} + +impl Resource for ComputePipeline { const TYPE: &'static str = "ComputePipeline"; - fn life_guard(&self) -> &LifeGuard { - &self.life_guard + fn info(&self) -> &ResourceInfo { + &self.info } } @@ -410,22 +450,40 @@ impl Default for VertexStep { } #[derive(Debug)] -pub struct RenderPipeline { - pub(crate) raw: A::RenderPipeline, - pub(crate) layout_id: Stored, - pub(crate) device_id: Stored, +pub struct RenderPipeline { + pub(crate) raw: Option>, + pub(crate) layout_id: Valid, + pub(crate) device: Arc>, pub(crate) pass_context: RenderPassContext, pub(crate) flags: PipelineFlags, pub(crate) strip_index_format: Option, pub(crate) vertex_steps: Vec, pub(crate) late_sized_buffer_groups: ArrayVec, - pub(crate) life_guard: LifeGuard, + pub(crate) info: ResourceInfo, +} + +impl Drop for RenderPipeline { + fn drop(&mut self) { + let raw = self.raw.take().unwrap(); + if let Ok(raw) = Arc::try_unwrap(raw) { + unsafe { + use hal::Device; + self.device + .raw + .as_ref() + .unwrap() + .destroy_render_pipeline(raw); + } + } else { + panic!("RenderPipeline raw cannot be destroyed because is still in use"); + } + } } -impl Resource for RenderPipeline { +impl Resource for RenderPipeline { const TYPE: &'static str = "RenderPipeline"; - fn life_guard(&self) -> &LifeGuard { - &self.life_guard + fn info(&self) -> &ResourceInfo { + &self.info } } diff --git a/wgpu-core/src/present.rs b/wgpu-core/src/present.rs index 5fe64d449b..38c0a6d6ae 100644 --- a/wgpu-core/src/present.rs +++ b/wgpu-core/src/present.rs @@ -9,20 +9,30 @@ When this texture is presented, we remove it from the device tracker as well as extract it from the hub. !*/ -use std::borrow::Borrow; +use std::{ + borrow::{Borrow, Cow}, + sync::{ + atomic::{AtomicBool, Ordering}, + Arc, + }, +}; #[cfg(feature = "trace")] use crate::device::trace::Action; use crate::{ conv, device::{DeviceError, MissingDownlevelFlags}, - hub::{Global, GlobalIdentityHandlerFactory, HalApi, Input, Token}, + global::Global, + hal_api::HalApi, id::{DeviceId, SurfaceId, TextureId, Valid}, + identity::{GlobalIdentityHandlerFactory, Input}, init_tracker::TextureInitTracker, - resource, track, LifeGuard, Stored, + resource::{self, ResourceInfo}, + track, }; use hal::{Queue as _, Surface as _}; +use parking_lot::RwLock; use thiserror::Error; use wgt::SurfaceStatus as Status; @@ -31,16 +41,16 @@ pub const DESIRED_NUM_FRAMES: u32 = 3; #[derive(Debug)] pub(crate) struct Presentation { - pub(crate) device_id: Stored, + pub(crate) device_id: Valid, pub(crate) config: wgt::SurfaceConfiguration>, #[allow(unused)] pub(crate) num_frames: u32, - pub(crate) acquired_texture: Option>, + pub(crate) acquired_texture: Option>, } impl Presentation { pub(crate) fn backend(&self) -> wgt::Backend { - crate::id::TypedId::unzip(self.device_id.value.0).2 + crate::id::TypedId::unzip(self.device_id.0).2 } } @@ -109,18 +119,17 @@ impl Global { profiling::scope!("SwapChain::get_next_texture"); let hub = A::hub(self); - let mut token = Token::root(); + let fid = hub.textures.prepare(texture_id_in); - let (mut surface_guard, mut token) = self.surfaces.write(&mut token); - let surface = surface_guard - .get_mut(surface_id) + let surface = self + .surfaces + .get(surface_id) .map_err(|_| SurfaceError::Invalid)?; - let (device_guard, mut token) = hub.devices.read(&mut token); - let (device, config) = match surface.presentation { - Some(ref present) => { - let device = &device_guard[present.device_id.value]; + let (device, config) = match surface.presentation.lock().as_ref() { + Some(present) => { + let device = hub.devices.get(present.device_id.0).unwrap().clone(); (device, present.config.clone()) } None => return Err(SurfaceError::NotConfigured), @@ -136,7 +145,7 @@ impl Global { #[cfg(not(feature = "trace"))] let _ = device; - let suf = A::get_surface_mut(surface); + let suf = A::get_surface(surface.as_ref()); let (texture_id, status) = match unsafe { suf.unwrap() .raw @@ -145,74 +154,78 @@ impl Global { ))) } { Ok(Some(ast)) => { - let clear_view_desc = hal::TextureViewDescriptor { - label: Some("(wgpu internal) clear surface texture view"), + let texture_desc = wgt::TextureDescriptor { + label: (), + size: wgt::Extent3d { + width: config.width, + height: config.height, + depth_or_array_layers: 1, + }, + sample_count: 1, + mip_level_count: 1, format: config.format, - dimension: wgt::TextureViewDimension::D2, - usage: hal::TextureUses::COLOR_TARGET, - range: wgt::ImageSubresourceRange::default(), + dimension: wgt::TextureDimension::D2, + usage: config.usage, + view_formats: config.view_formats, + }; + let hal_usage = conv::map_texture_usage(config.usage, config.format.into()); + let format_features = wgt::TextureFormatFeatures { + allowed_usages: wgt::TextureUsages::RENDER_ATTACHMENT, + flags: wgt::TextureFormatFeatureFlags::MULTISAMPLE_X4 + | wgt::TextureFormatFeatureFlags::MULTISAMPLE_RESOLVE, }; let mut clear_views = smallvec::SmallVec::new(); - clear_views.push( - unsafe { - hal::Device::create_texture_view( - &device.raw, - ast.texture.borrow(), - &clear_view_desc, - ) - } - .map_err(DeviceError::from)?, - ); - let present = surface.presentation.as_mut().unwrap(); + let descriptor = resource::TextureViewDescriptor { + label: Some(Cow::Borrowed("(wgpu internal) clear surface texture view")), + format: Some(config.format), + dimension: Some(wgt::TextureViewDimension::D2), + range: wgt::ImageSubresourceRange::default(), + }; + let view = device + .create_texture_inner_view( + ast.texture.borrow(), + fid.id(), + &texture_desc, + &hal::TextureUses::COLOR_TARGET, + &format_features, + &descriptor, + ) + .unwrap(); + clear_views.push(Arc::new(view)); + + let mut presentation = surface.presentation.lock(); + let present = presentation.as_mut().unwrap(); let texture = resource::Texture { - inner: resource::TextureInner::Surface { + inner: Some(resource::TextureInner::Surface { raw: ast.texture, parent_id: Valid(surface_id), - has_work: false, - }, - device_id: present.device_id.clone(), - desc: wgt::TextureDescriptor { - label: (), - size: wgt::Extent3d { - width: config.width, - height: config.height, - depth_or_array_layers: 1, - }, - sample_count: 1, - mip_level_count: 1, - format: config.format, - dimension: wgt::TextureDimension::D2, - usage: config.usage, - view_formats: config.view_formats, - }, - hal_usage: conv::map_texture_usage(config.usage, config.format.into()), - format_features: wgt::TextureFormatFeatures { - allowed_usages: wgt::TextureUsages::RENDER_ATTACHMENT, - flags: wgt::TextureFormatFeatureFlags::MULTISAMPLE_X4 - | wgt::TextureFormatFeatureFlags::MULTISAMPLE_RESOLVE, - }, - initialization_status: TextureInitTracker::new(1, 1), + has_work: AtomicBool::new(false), + }), + device: device.clone(), + desc: texture_desc, + hal_usage, + format_features, + initialization_status: RwLock::new(TextureInitTracker::new(1, 1)), full_range: track::TextureSelector { layers: 0..1, mips: 0..1, }, - life_guard: LifeGuard::new(""), - clear_mode: resource::TextureClearMode::RenderPass { + info: ResourceInfo::new(""), + clear_mode: RwLock::new(resource::TextureClearMode::RenderPass { clear_views, is_color: true, - }, + }), }; - let ref_count = texture.life_guard.add_ref(); - let id = fid.assign(texture, &mut token); + let (id, resource) = fid.assign(texture); { // register it in the device tracker as uninitialized let mut trackers = device.trackers.lock(); trackers.textures.insert_single( id.0, - ref_count.clone(), + resource, hal::TextureUses::UNINITIALIZED, ); } @@ -220,10 +233,7 @@ impl Global { if present.acquired_texture.is_some() { return Err(SurfaceError::AlreadyAcquired); } - present.acquired_texture = Some(Stored { - value: id, - ref_count, - }); + present.acquired_texture = Some(id); let status = if ast.suboptimal { Status::Suboptimal @@ -259,20 +269,19 @@ impl Global { profiling::scope!("SwapChain::present"); let hub = A::hub(self); - let mut token = Token::root(); - let (mut surface_guard, mut token) = self.surfaces.write(&mut token); - let surface = surface_guard - .get_mut(surface_id) + let surface = self + .surfaces + .get(surface_id) .map_err(|_| SurfaceError::Invalid)?; - let (mut device_guard, mut token) = hub.devices.write(&mut token); - let present = match surface.presentation { - Some(ref mut present) => present, + let mut presentation = surface.presentation.lock(); + let present = match presentation.as_mut() { + Some(present) => present, None => return Err(SurfaceError::NotConfigured), }; - let device = &mut device_guard[present.device_id.value]; + let device = hub.devices.get(present.device_id.0).unwrap().clone(); #[cfg(feature = "trace")] if let Some(ref trace) = device.trace { @@ -289,41 +298,50 @@ impl Global { // and now we are moving it away. log::debug!( "Removing swapchain texture {:?} from the device tracker", - texture_id.value + texture_id ); - device.trackers.lock().textures.remove(texture_id.value); + device.trackers.lock().textures.remove(texture_id); - let (texture, _) = hub.textures.unregister(texture_id.value.0, &mut token); + let texture = hub.textures.unregister(texture_id.0); if let Some(texture) = texture { - if let resource::TextureClearMode::RenderPass { clear_views, .. } = - texture.clear_mode - { - for clear_view in clear_views { - unsafe { - hal::Device::destroy_texture_view(&device.raw, clear_view); - } + if let Ok(mut texture) = Arc::try_unwrap(texture) { + let mut clear_mode = texture.clear_mode.write(); + if let resource::TextureClearMode::RenderPass { clear_views, .. } = + &mut *clear_mode + { + clear_views.clear(); } - } - let suf = A::get_surface_mut(surface); - match texture.inner { - resource::TextureInner::Surface { - raw, - parent_id, - has_work, - } => { - if surface_id != parent_id.0 { - log::error!("Presented frame is from a different surface"); - Err(hal::SurfaceError::Lost) - } else if !has_work { - log::error!("No work has been submitted for this frame"); - unsafe { suf.unwrap().raw.discard_texture(raw) }; - Err(hal::SurfaceError::Outdated) - } else { - unsafe { device.queue.present(&mut suf.unwrap().raw, raw) } + let suf = A::get_surface(&surface); + match texture.inner.take().unwrap() { + resource::TextureInner::Surface { + raw, + parent_id, + has_work, + } => { + if surface_id != parent_id.0 { + log::error!("Presented frame is from a different surface"); + Err(hal::SurfaceError::Lost) + } else if !has_work.load(Ordering::Relaxed) { + log::error!("No work has been submitted for this frame"); + unsafe { suf.unwrap().raw.discard_texture(raw) }; + Err(hal::SurfaceError::Outdated) + } else { + unsafe { + device + .queue + .as_ref() + .unwrap() + .present(&suf.unwrap().raw, raw) + } + } } + resource::TextureInner::Native { .. } => unreachable!(), } - resource::TextureInner::Native { .. } => unreachable!(), + } else { + Err(hal::SurfaceError::Other( + "Surface cannot be destroyed because is still in use", + )) } } else { Err(hal::SurfaceError::Outdated) //TODO? @@ -353,20 +371,18 @@ impl Global { profiling::scope!("SwapChain::discard"); let hub = A::hub(self); - let mut token = Token::root(); - let (mut surface_guard, mut token) = self.surfaces.write(&mut token); - let surface = surface_guard - .get_mut(surface_id) + let surface = self + .surfaces + .get(surface_id) .map_err(|_| SurfaceError::Invalid)?; - let (mut device_guard, mut token) = hub.devices.write(&mut token); - - let present = match surface.presentation { - Some(ref mut present) => present, + let mut presentation = surface.presentation.lock(); + let present = match presentation.as_mut() { + Some(present) => present, None => return Err(SurfaceError::NotConfigured), }; - let device = &mut device_guard[present.device_id.value]; + let device = hub.devices.get(present.device_id.0).unwrap().clone(); #[cfg(feature = "trace")] if let Some(ref trace) = device.trace { @@ -381,24 +397,28 @@ impl Global { // The texture ID got added to the device tracker by `submit()`, // and now we are moving it away. - device.trackers.lock().textures.remove(texture_id.value); + device.trackers.lock().textures.remove(texture_id); - let (texture, _) = hub.textures.unregister(texture_id.value.0, &mut token); + let texture = hub.textures.unregister(texture_id.0); if let Some(texture) = texture { - let suf = A::get_surface_mut(surface); - match texture.inner { - resource::TextureInner::Surface { - raw, - parent_id, - has_work: _, - } => { - if surface_id == parent_id.0 { - unsafe { suf.unwrap().raw.discard_texture(raw) }; - } else { - log::warn!("Surface texture is outdated"); + if let Ok(mut texture) = Arc::try_unwrap(texture) { + let suf = A::get_surface(&surface); + match texture.inner.take().unwrap() { + resource::TextureInner::Surface { + raw, + parent_id, + has_work: _, + } => { + if surface_id == parent_id.0 { + unsafe { suf.unwrap().raw.discard_texture(raw) }; + } else { + log::warn!("Surface texture is outdated"); + } } + resource::TextureInner::Native { .. } => unreachable!(), } - resource::TextureInner::Native { .. } => unreachable!(), + } else { + return Err(SurfaceError::StillReferenced); } } } diff --git a/wgpu-core/src/registry.rs b/wgpu-core/src/registry.rs new file mode 100644 index 0000000000..6d5c450050 --- /dev/null +++ b/wgpu-core/src/registry.rs @@ -0,0 +1,132 @@ +use std::sync::Arc; + +use parking_lot::{RwLock, RwLockReadGuard, RwLockWriteGuard}; +use wgt::Backend; + +use crate::{ + id, + identity::{IdentityHandler, IdentityHandlerFactory}, + resource::Resource, + storage::{InvalidId, Storage, StorageReport}, +}; + +#[derive(Debug)] +pub struct Registry, F: IdentityHandlerFactory> { + identity: F::Filter, + storage: RwLock>, + backend: Backend, +} + +impl, F: IdentityHandlerFactory> Registry { + pub(crate) fn new(backend: Backend, factory: &F) -> Self { + Self { + identity: factory.spawn(), + storage: RwLock::new(Storage::new()), + backend, + } + } + + pub(crate) fn without_backend(factory: &F, kind: &'static str) -> Self { + Self { + identity: factory.spawn(), + storage: RwLock::new(Storage::from_kind(kind)), + backend: Backend::Empty, + } + } +} + +#[must_use] +pub(crate) struct FutureId<'a, I: id::TypedId, T: Resource> { + id: I, + data: &'a RwLock>, +} + +impl> FutureId<'_, I, T> { + pub fn id(&self) -> I { + self.id + } + + pub fn into_id(self) -> I { + self.id + } + + pub fn assign(self, value: T) -> (id::Valid, Arc) { + value.info().set_id(self.id); + self.data.write().insert(self.id, value); + ( + id::Valid(self.id), + self.data.read().get(self.id).unwrap().clone(), + ) + } + + pub fn assign_error(self, label: &str) -> I { + self.data.write().insert_error(self.id, label); + self.id + } +} + +impl, F: IdentityHandlerFactory> Registry { + pub(crate) fn prepare( + &self, + id_in: >::Input, + ) -> FutureId { + FutureId { + id: self.identity.process(id_in, self.backend), + data: &self.storage, + } + } + pub(crate) fn try_get(&self, id: I) -> Result>, InvalidId> { + self.storage + .read() + .try_get(id) + .map(|o| o.map(|v| v.clone())) + } + pub(crate) fn get(&self, id: I) -> Result, InvalidId> { + self.storage.read().get(id).map(|v| v.clone()) + } + pub(crate) fn read<'a>(&'a self) -> RwLockReadGuard<'a, Storage> { + self.storage.read() + } + pub(crate) fn write<'a>(&'a self) -> RwLockWriteGuard<'a, Storage> { + self.storage.write() + } + pub fn unregister_locked(&self, id: I, storage: &mut Storage) -> Option> { + let value = storage.remove(id); + //Note: careful about the order here! + self.identity.free(id); + //Returning None is legal if it's an error ID + value + } + pub(crate) fn unregister(&self, id: I) -> Option> { + let value = self.storage.write().remove(id); + //Note: careful about the order here! + self.identity.free(id); + //Returning None is legal if it's an error ID + value + } + + pub fn label_for_resource(&self, id: I) -> String { + let guard = self.storage.read(); + + let type_name = guard.kind(); + match guard.get(id) { + Ok(res) => { + let label = res.label(); + if label.is_empty() { + format!("<{}-{:?}>", type_name, id.unzip()) + } else { + label.to_string() + } + } + Err(_) => format!( + "", + type_name, + guard.label_for_invalid_id(id) + ), + } + } + + pub(crate) fn generate_report(&self) -> StorageReport { + self.storage.read().generate_report() + } +} diff --git a/wgpu-core/src/resource.rs b/wgpu-core/src/resource.rs index 9dbf1b3357..c7f2006731 100644 --- a/wgpu-core/src/resource.rs +++ b/wgpu-core/src/resource.rs @@ -1,17 +1,117 @@ use crate::{ - device::{DeviceError, HostMap, MissingDownlevelFlags, MissingFeatures}, - hub::{Global, GlobalIdentityHandlerFactory, HalApi, Resource, Token}, - id::{AdapterId, DeviceId, SurfaceId, TextureId, Valid}, + device::{Device, DeviceError, HostMap, MissingDownlevelFlags, MissingFeatures}, + global::Global, + hal_api::HalApi, + id::{ + AdapterId, BufferId, DeviceId, QuerySetId, SamplerId, StagingBufferId, SurfaceId, + TextureId, TextureViewId, TypedId, Valid, + }, + identity::GlobalIdentityHandlerFactory, init_tracker::{BufferInitTracker, TextureInitTracker}, track::TextureSelector, validation::MissingBufferUsageError, - Label, LifeGuard, RefCount, Stored, + Label, SubmissionIndex, }; +use parking_lot::{Mutex, RwLock}; use smallvec::SmallVec; use thiserror::Error; -use std::{borrow::Borrow, ops::Range, ptr::NonNull}; +use std::{ + borrow::Borrow, + fmt::Debug, + ops::Range, + ptr::NonNull, + sync::{ + atomic::{AtomicBool, AtomicUsize, Ordering}, + Arc, + }, +}; + +/// Information about the wgpu-core resource. +/// +/// Each type representing a `wgpu-core` resource, like [`Device`], +/// [`Buffer`], etc., contains a `ResourceInfo` which contains +/// its latest submission index and label. +/// +/// A resource may need to be retained for any of several reasons: +/// and any lifetime logic will be handled by Arc refcount +/// +/// - The user may hold a reference to it (via a `wgpu::Buffer`, say). +/// +/// - Other resources may depend on it (a texture view's backing +/// texture, for example). +/// +/// - It may be used by commands sent to the GPU that have not yet +/// finished execution. +/// +/// [`Device`]: device::Device +/// [`Buffer`]: resource::Buffer +#[derive(Debug)] +pub struct ResourceInfo { + id: RwLock>>, + /// The index of the last queue submission in which the resource + /// was used. + /// + /// Each queue submission is fenced and assigned an index number + /// sequentially. Thus, when a queue submission completes, we know any + /// resources used in that submission and any lower-numbered submissions are + /// no longer in use by the GPU. + submission_index: AtomicUsize, + + /// The `label` from the descriptor used to create the resource. + #[cfg(debug_assertions)] + pub(crate) label: String, +} + +impl ResourceInfo { + #[allow(unused_variables)] + pub(crate) fn new(label: &str) -> Self { + Self { + id: RwLock::new(None), + submission_index: AtomicUsize::new(0), + #[cfg(debug_assertions)] + label: label.to_string(), + } + } + + pub(crate) fn id(&self) -> Valid { + self.id.read().unwrap() + } + + pub(crate) fn set_id(&self, id: Id) { + let mut value = self.id.write(); + *value = Some(Valid(id)); + } + + /// Record that this resource will be used by the queue submission with the + /// given index. + pub(crate) fn use_at(&self, submit_index: SubmissionIndex) { + self.submission_index + .store(submit_index as _, Ordering::Release); + } + + pub(crate) fn submission_index(&self) -> SubmissionIndex { + self.submission_index.load(Ordering::Acquire) as _ + } +} + +pub trait Resource { + const TYPE: &'static str; + fn info(&self) -> &ResourceInfo; + fn label(&self) -> String { + #[cfg(debug_assertions)] + return self.info().label.clone(); + #[cfg(not(debug_assertions))] + return String::new(); + } + fn ref_count(self: &Arc) -> usize { + Arc::strong_count(self) + } + fn is_unique(self: &Arc) -> bool { + self.ref_count() == 1 + } +} /// The status code provided to the buffer mapping callback. /// @@ -46,15 +146,16 @@ pub enum BufferMapAsyncStatus { InvalidUsageFlags, } -pub(crate) enum BufferMapState { +#[derive(Debug)] +pub(crate) enum BufferMapState { /// Mapped at creation. Init { ptr: NonNull, - stage_buffer: A::Buffer, + stage_buffer: Arc>, needs_flush: bool, }, /// Waiting for GPU to be done before mapping - Waiting(BufferPendingMapping), + Waiting(BufferPendingMapping), /// Mapped Active { ptr: NonNull, @@ -65,8 +166,8 @@ pub(crate) enum BufferMapState { Idle, } -unsafe impl Send for BufferMapState {} -unsafe impl Sync for BufferMapState {} +unsafe impl Send for BufferMapState {} +unsafe impl Sync for BufferMapState {} #[repr(C)] pub struct BufferMapCallbackC { @@ -76,6 +177,7 @@ pub struct BufferMapCallbackC { unsafe impl Send for BufferMapCallbackC {} +#[derive(Debug)] pub struct BufferMapCallback { // We wrap this so creating the enum in the C variant can be unsafe, // allowing our call function to be safe. @@ -91,6 +193,15 @@ enum BufferMapCallbackInner { }, } +impl Debug for BufferMapCallbackInner { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + BufferMapCallbackInner::Rust { callback: _ } => f.debug_struct("Rust").finish(), + BufferMapCallbackInner::C { inner: _ } => f.debug_struct("C").finish(), + } + } +} + impl BufferMapCallback { pub fn from_rust(callback: Box) -> Self { Self { @@ -161,6 +272,7 @@ impl Drop for BufferMapCallback { } } +#[derive(Debug)] pub struct BufferMapOperation { pub host: HostMap, pub callback: BufferMapCallback, @@ -214,24 +326,42 @@ pub enum BufferAccessError { } pub type BufferAccessResult = Result<(), BufferAccessError>; -pub(crate) struct BufferPendingMapping { + +#[derive(Debug)] +pub(crate) struct BufferPendingMapping { pub range: Range, pub op: BufferMapOperation, // hold the parent alive while the mapping is active - pub _parent_ref_count: RefCount, + pub _parent_buffer: Arc>, } pub type BufferDescriptor<'a> = wgt::BufferDescriptor>; -pub struct Buffer { - pub(crate) raw: Option, - pub(crate) device_id: Stored, +#[derive(Debug)] +pub struct Buffer { + pub(crate) raw: Option>, + pub(crate) device: Arc>, pub(crate) usage: wgt::BufferUsages, pub(crate) size: wgt::BufferAddress, - pub(crate) initialization_status: BufferInitTracker, - pub(crate) sync_mapped_writes: Option, - pub(crate) life_guard: LifeGuard, - pub(crate) map_state: BufferMapState, + pub(crate) initialization_status: RwLock, + pub(crate) sync_mapped_writes: Mutex>, + pub(crate) info: ResourceInfo, + pub(crate) map_state: Mutex>, +} + +impl Drop for Buffer { + fn drop(&mut self) { + if let Some(buffer) = self.raw.take() { + if let Ok(raw) = Arc::try_unwrap(buffer) { + unsafe { + use hal::Device; + self.device.raw.as_ref().unwrap().destroy_buffer(raw); + } + } else { + panic!("Buffer cannot be destroyed because is still in use"); + } + } + } } #[derive(Clone, Debug, Error)] @@ -252,11 +382,11 @@ pub enum CreateBufferError { MissingDownlevelFlags(#[from] MissingDownlevelFlags), } -impl Resource for Buffer { +impl Resource for Buffer { const TYPE: &'static str = "Buffer"; - fn life_guard(&self) -> &LifeGuard { - &self.life_guard + fn info(&self) -> &ResourceInfo { + &self.info } } @@ -279,42 +409,44 @@ impl Resource for Buffer { /// [`queue_write_staging_buffer`]: Global::queue_write_staging_buffer /// [`queue_write_texture`]: Global::queue_write_texture /// [`Device::pending_writes`]: crate::device::Device -pub struct StagingBuffer { - pub(crate) raw: A::Buffer, +#[derive(Debug)] +pub struct StagingBuffer { + pub(crate) raw: Arc, pub(crate) size: wgt::BufferAddress, pub(crate) is_coherent: bool, + pub(crate) info: ResourceInfo, } -impl Resource for StagingBuffer { +impl Resource for StagingBuffer { const TYPE: &'static str = "StagingBuffer"; - fn life_guard(&self) -> &LifeGuard { - unreachable!() + fn info(&self) -> &ResourceInfo { + &self.info } - fn label(&self) -> &str { - "" + fn label(&self) -> String { + String::from("") } } pub type TextureDescriptor<'a> = wgt::TextureDescriptor, Vec>; #[derive(Debug)] -pub(crate) enum TextureInner { +pub(crate) enum TextureInner { Native { - raw: Option, + raw: Option>, }, Surface { raw: A::SurfaceTexture, parent_id: Valid, - has_work: bool, + has_work: AtomicBool, }, } -impl TextureInner { +impl TextureInner { pub fn as_raw(&self) -> Option<&A::Texture> { match *self { - Self::Native { raw: Some(ref tex) } => Some(tex), + Self::Native { raw: Some(ref tex) } => Some(&tex), Self::Native { raw: None } => None, Self::Surface { ref raw, .. } => Some(raw.borrow()), } @@ -322,11 +454,11 @@ impl TextureInner { } #[derive(Debug)] -pub enum TextureClearMode { +pub enum TextureClearMode { BufferCopy, // View for clear via RenderPass for every subsurface (mip/layer/slice) RenderPass { - clear_views: SmallVec<[A::TextureView; 1]>, + clear_views: SmallVec<[Arc>; 1]>, is_color: bool, }, // Texture can't be cleared, attempting to do so will cause panic. @@ -335,21 +467,48 @@ pub enum TextureClearMode { } #[derive(Debug)] -pub struct Texture { - pub(crate) inner: TextureInner, - pub(crate) device_id: Stored, +pub struct Texture { + pub(crate) inner: Option>, + pub(crate) device: Arc>, pub(crate) desc: wgt::TextureDescriptor<(), Vec>, pub(crate) hal_usage: hal::TextureUses, pub(crate) format_features: wgt::TextureFormatFeatures, - pub(crate) initialization_status: TextureInitTracker, + pub(crate) initialization_status: RwLock, pub(crate) full_range: TextureSelector, - pub(crate) life_guard: LifeGuard, - pub(crate) clear_mode: TextureClearMode, + pub(crate) info: ResourceInfo, + pub(crate) clear_mode: RwLock>, +} + +impl Drop for Texture { + fn drop(&mut self) { + use hal::Device; + let mut clear_mode = self.clear_mode.write(); + if let TextureClearMode::RenderPass { clear_views, .. } = &mut *clear_mode { + clear_views.clear(); + } + if let Some(inner) = self.inner.take() { + if let TextureInner::Native { raw: Some(raw) } = inner { + let raw = Arc::try_unwrap(raw); + unsafe { + self.device + .raw + .as_ref() + .unwrap() + .destroy_texture(raw.unwrap()); + } + } + } + } } -impl Texture { - pub(crate) fn get_clear_view(&self, mip_level: u32, depth_or_layer: u32) -> &A::TextureView { - match self.clear_mode { +impl Texture { + pub(crate) fn get_clear_view<'a>( + clear_mode: &'a TextureClearMode, + desc: &'a wgt::TextureDescriptor<(), Vec>, + mip_level: u32, + depth_or_layer: u32, + ) -> &'a A::TextureView { + match clear_mode { TextureClearMode::BufferCopy => { panic!("Given texture is cleared with buffer copies, not render passes") } @@ -359,14 +518,14 @@ impl Texture { TextureClearMode::RenderPass { ref clear_views, .. } => { - let index = if self.desc.dimension == wgt::TextureDimension::D3 { + let index = if desc.dimension == wgt::TextureDimension::D3 { (0..mip_level).fold(0, |acc, mip| { - acc + (self.desc.size.depth_or_array_layers >> mip).max(1) + acc + (desc.size.depth_or_array_layers >> mip).max(1) }) } else { - mip_level * self.desc.size.depth_or_array_layers + mip_level * desc.size.depth_or_array_layers } + depth_or_layer; - &clear_views[index as usize] + clear_views[index as usize].raw.as_ref().unwrap() } } } @@ -384,10 +543,10 @@ impl Global { profiling::scope!("Texture::as_hal"); let hub = A::hub(self); - let mut token = Token::root(); - let (guard, _) = hub.textures.read(&mut token); - let texture = guard.try_get(id).ok().flatten(); - let hal_texture = texture.and_then(|tex| tex.inner.as_raw()); + let texture = { hub.textures.try_get(id).ok().flatten() }; + let hal_texture = texture + .as_ref() + .and_then(|tex| tex.inner.as_ref().unwrap().as_raw()); hal_texture_callback(hal_texture); } @@ -403,11 +562,8 @@ impl Global { profiling::scope!("Adapter::as_hal"); let hub = A::hub(self); - let mut token = Token::root(); - - let (guard, _) = hub.adapters.read(&mut token); - let adapter = guard.try_get(id).ok().flatten(); - let hal_adapter = adapter.map(|adapter| &adapter.raw.adapter); + let adapter = hub.adapters.try_get(id).ok().flatten(); + let hal_adapter = adapter.as_ref().map(|adapter| &adapter.raw.adapter); hal_adapter_callback(hal_adapter) } @@ -423,29 +579,26 @@ impl Global { profiling::scope!("Device::as_hal"); let hub = A::hub(self); - let mut token = Token::root(); - let (guard, _) = hub.devices.read(&mut token); - let device = guard.try_get(id).ok().flatten(); - let hal_device = device.map(|device| &device.raw); + let device = hub.devices.try_get(id).ok().flatten(); + let hal_device = device.as_ref().map(|device| device.raw.as_ref().unwrap()); hal_device_callback(hal_device) } /// # Safety /// - The raw surface handle must not be manually destroyed - pub unsafe fn surface_as_hal_mut) -> R, R>( + pub unsafe fn surface_as_hal) -> R, R>( &self, id: SurfaceId, hal_surface_callback: F, ) -> R { - profiling::scope!("Surface::as_hal_mut"); + profiling::scope!("Surface::as_hal"); - let mut token = Token::root(); - let (mut guard, _) = self.surfaces.write(&mut token); - let surface = guard.get_mut(id).ok(); + let surface = self.surfaces.get(id).ok(); let hal_surface = surface - .and_then(|surface| A::get_surface_mut(surface)) - .map(|surface| &mut surface.raw); + .as_ref() + .and_then(|surface| A::get_surface(&surface)) + .map(|surface| &*surface.raw); hal_surface_callback(hal_surface) } @@ -525,15 +678,15 @@ pub enum CreateTextureError { MissingDownlevelFlags(#[from] MissingDownlevelFlags), } -impl Resource for Texture { +impl Resource for Texture { const TYPE: &'static str = "Texture"; - fn life_guard(&self) -> &LifeGuard { - &self.life_guard + fn info(&self) -> &ResourceInfo { + &self.info } } -impl Borrow for Texture { +impl Borrow for Texture { fn borrow(&self) -> &TextureSelector { &self.full_range } @@ -593,12 +746,12 @@ pub enum TextureViewNotRenderableReason { } #[derive(Debug)] -pub struct TextureView { - pub(crate) raw: A::TextureView, +pub struct TextureView { + pub(crate) raw: Option>, // The parent's refcount is held alive, but the parent may still be deleted // if it's a surface texture. TODO: make this cleaner. - pub(crate) parent_id: Stored, - pub(crate) device_id: Stored, + pub(crate) parent_id: Valid, + pub(crate) device: Arc>, //TODO: store device_id for quick access? pub(crate) desc: HalTextureViewDescriptor, pub(crate) format_features: wgt::TextureFormatFeatures, @@ -606,7 +759,21 @@ pub struct TextureView { pub(crate) render_extent: Result, pub(crate) samples: u32, pub(crate) selector: TextureSelector, - pub(crate) life_guard: LifeGuard, + pub(crate) info: ResourceInfo, +} + +impl Drop for TextureView { + fn drop(&mut self) { + use hal::Device; + let raw = Arc::try_unwrap(self.raw.take().unwrap()); + unsafe { + self.device + .raw + .as_ref() + .unwrap() + .destroy_texture_view(raw.unwrap()); + } + } } #[derive(Clone, Debug, Error)] @@ -658,11 +825,11 @@ pub enum CreateTextureViewError { #[derive(Clone, Debug, Error)] pub enum TextureViewDestroyError {} -impl Resource for TextureView { +impl Resource for TextureView { const TYPE: &'static str = "TextureView"; - fn life_guard(&self) -> &LifeGuard { - &self.life_guard + fn info(&self) -> &ResourceInfo { + &self.info } } @@ -697,16 +864,30 @@ pub struct SamplerDescriptor<'a> { } #[derive(Debug)] -pub struct Sampler { - pub(crate) raw: A::Sampler, - pub(crate) device_id: Stored, - pub(crate) life_guard: LifeGuard, +pub struct Sampler { + pub(crate) raw: Option>, + pub(crate) device: Arc>, + pub(crate) info: ResourceInfo, /// `true` if this is a comparison sampler pub(crate) comparison: bool, /// `true` if this is a filtering sampler pub(crate) filtering: bool, } +impl Drop for Sampler { + fn drop(&mut self) { + let raw = self.raw.take().unwrap(); + if let Ok(raw) = Arc::try_unwrap(raw) { + unsafe { + use hal::Device; + self.device.raw.as_ref().unwrap().destroy_sampler(raw); + } + } else { + panic!("Sampler raw cannot be destroyed because is still in use"); + } + } +} + #[derive(Copy, Clone)] pub enum SamplerFilterErrorType { MagFilter, @@ -714,7 +895,7 @@ pub enum SamplerFilterErrorType { MipmapFilter, } -impl std::fmt::Debug for SamplerFilterErrorType { +impl Debug for SamplerFilterErrorType { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match *self { SamplerFilterErrorType::MagFilter => write!(f, "magFilter"), @@ -750,11 +931,11 @@ pub enum CreateSamplerError { MissingFeatures(#[from] MissingFeatures), } -impl Resource for Sampler { +impl Resource for Sampler { const TYPE: &'static str = "Sampler"; - fn life_guard(&self) -> &LifeGuard { - &self.life_guard + fn info(&self) -> &ResourceInfo { + &self.info } } @@ -773,18 +954,32 @@ pub enum CreateQuerySetError { pub type QuerySetDescriptor<'a> = wgt::QuerySetDescriptor>; #[derive(Debug)] -pub struct QuerySet { - pub(crate) raw: A::QuerySet, - pub(crate) device_id: Stored, - pub(crate) life_guard: LifeGuard, +pub struct QuerySet { + pub(crate) raw: Option>, + pub(crate) device: Arc>, + pub(crate) info: ResourceInfo, pub(crate) desc: wgt::QuerySetDescriptor<()>, } -impl Resource for QuerySet { +impl Drop for QuerySet { + fn drop(&mut self) { + let raw = self.raw.take().unwrap(); + if let Ok(raw) = Arc::try_unwrap(raw) { + unsafe { + use hal::Device; + self.device.raw.as_ref().unwrap().destroy_query_set(raw); + } + } else { + panic!("QuerySet raw cannot be destroyed because is still in use"); + } + } +} + +impl Resource for QuerySet { const TYPE: &'static str = "QuerySet"; - fn life_guard(&self) -> &LifeGuard { - &self.life_guard + fn info(&self) -> &ResourceInfo { + &self.info } } diff --git a/wgpu-core/src/storage.rs b/wgpu-core/src/storage.rs new file mode 100644 index 0000000000..70cb30b5c5 --- /dev/null +++ b/wgpu-core/src/storage.rs @@ -0,0 +1,252 @@ +use std::{marker::PhantomData, mem, ops, sync::Arc}; + +use wgt::Backend; + +use crate::{id, resource::Resource, Epoch, Index}; + +/// An entry in a `Storage::map` table. +#[derive(Debug)] +pub(crate) enum Element { + /// There are no live ids with this index. + Vacant, + + /// There is one live id with this index, allocated at the given + /// epoch. + Occupied(Arc, Epoch), + + /// Like `Occupied`, but an error occurred when creating the + /// resource. + /// + /// The given `String` is the resource's descriptor label. + Error(Epoch, String), +} + +#[derive(Clone, Debug, Default)] +pub struct StorageReport { + pub num_occupied: usize, + pub num_vacant: usize, + pub num_error: usize, + pub element_size: usize, +} + +impl StorageReport { + pub fn is_empty(&self) -> bool { + self.num_occupied + self.num_vacant + self.num_error == 0 + } +} + +#[derive(Clone, Debug)] +pub(crate) struct InvalidId; + +/// A table of `T` values indexed by the id type `I`. +/// +/// The table is represented as a vector indexed by the ids' index +/// values, so you should use an id allocator like `IdentityManager` +/// that keeps the index values dense and close to zero. +#[derive(Debug)] +pub struct Storage { + pub(crate) map: Vec>, + kind: &'static str, + _phantom: PhantomData, +} + +impl ops::Index> for Storage { + type Output = Arc; + fn index(&self, id: id::Valid) -> &Arc { + self.get(id.0).unwrap() + } +} +impl Storage +where + T: Resource, +{ + pub(crate) fn new() -> Self { + Self { + map: Vec::new(), + kind: T::TYPE, + _phantom: PhantomData, + } + } +} + +impl Storage { + pub(crate) fn from_kind(kind: &'static str) -> Self { + Self { + map: Vec::new(), + kind, + _phantom: PhantomData, + } + } + pub(crate) fn contains(&self, id: I) -> bool { + let (index, epoch, _) = id.unzip(); + match self.map.get(index as usize) { + Some(&Element::Vacant) => false, + Some(&Element::Occupied(_, storage_epoch) | &Element::Error(storage_epoch, _)) => { + storage_epoch == epoch + } + None => false, + } + } + + /// Attempts to get a reference to an item behind a potentially invalid ID. + /// + /// Returns [`None`] if there is an epoch mismatch, or the entry is empty. + /// + /// This function is primarily intended for the `as_hal` family of functions + /// where you may need to fallibly get a object backed by an id that could + /// be in a different hub. + pub(crate) fn try_get(&self, id: I) -> Result>, InvalidId> { + let (index, epoch, _) = id.unzip(); + let (result, storage_epoch) = match self.map.get(index as usize) { + Some(&Element::Occupied(ref v, epoch)) => (Ok(Some(v)), epoch), + Some(&Element::Vacant) => return Ok(None), + Some(&Element::Error(epoch, ..)) => (Err(InvalidId), epoch), + None => return Err(InvalidId), + }; + assert_eq!( + epoch, storage_epoch, + "{}[{}] is no longer alive", + self.kind, index + ); + result + } + + /// Get refcount of an item with specified ID + /// And return true if it's 1 or false otherwise + pub(crate) fn is_unique(&self, id: I) -> Result { + let (index, epoch, _) = id.unzip(); + let (result, storage_epoch) = match self.map.get(index as usize) { + Some(&Element::Occupied(ref v, epoch)) => (Ok(Arc::strong_count(v) == 1), epoch), + Some(&Element::Vacant) => panic!("{}[{}] does not exist", self.kind, index), + Some(&Element::Error(epoch, ..)) => (Err(InvalidId), epoch), + None => return Err(InvalidId), + }; + assert_eq!( + epoch, storage_epoch, + "{}[{}] is no longer alive", + self.kind, index + ); + result + } + + /// Get a reference to an item behind a potentially invalid ID. + /// Panics if there is an epoch mismatch, or the entry is empty. + pub(crate) fn get(&self, id: I) -> Result<&Arc, InvalidId> { + let (index, epoch, _) = id.unzip(); + let (result, storage_epoch) = match self.map.get(index as usize) { + Some(&Element::Occupied(ref v, epoch)) => (Ok(v), epoch), + Some(&Element::Vacant) => panic!("{}[{}] does not exist", self.kind, index), + Some(&Element::Error(epoch, ..)) => (Err(InvalidId), epoch), + None => return Err(InvalidId), + }; + assert_eq!( + epoch, storage_epoch, + "{}[{}] is no longer alive", + self.kind, index + ); + result + } + + pub(crate) unsafe fn get_unchecked(&self, id: u32) -> &Arc { + match self.map[id as usize] { + Element::Occupied(ref v, _) => v, + Element::Vacant => panic!("{}[{}] does not exist", self.kind, id), + Element::Error(_, _) => panic!(""), + } + } + + pub(crate) fn label_for_invalid_id(&self, id: I) -> &str { + let (index, _, _) = id.unzip(); + match self.map.get(index as usize) { + Some(&Element::Error(_, ref label)) => label, + _ => "", + } + } + + fn insert_impl(&mut self, index: usize, element: Element) { + if index >= self.map.len() { + self.map.resize_with(index + 1, || Element::Vacant); + } + match std::mem::replace(&mut self.map[index], element) { + Element::Vacant => {} + _ => panic!("Index {index:?} is already occupied"), + } + } + + pub(crate) fn insert(&mut self, id: I, value: T) { + let (index, epoch, _) = id.unzip(); + self.insert_impl(index as usize, Element::Occupied(Arc::new(value), epoch)) + } + + pub(crate) fn insert_error(&mut self, id: I, label: &str) { + let (index, epoch, _) = id.unzip(); + self.insert_impl(index as usize, Element::Error(epoch, label.to_string())) + } + + pub(crate) fn force_replace(&mut self, id: I, value: T) { + let (index, epoch, _) = id.unzip(); + self.map[index as usize] = Element::Occupied(Arc::new(value), epoch); + } + + pub(crate) fn remove(&mut self, id: I) -> Option> { + let (index, epoch, _) = id.unzip(); + match std::mem::replace(&mut self.map[index as usize], Element::Vacant) { + Element::Occupied(value, storage_epoch) => { + assert_eq!(epoch, storage_epoch); + Some(value) + } + Element::Error(..) => None, + Element::Vacant => panic!("Cannot remove a vacant resource"), + } + } + + // Prevents panic on out of range access, allows Vacant elements. + pub(crate) fn _try_remove(&mut self, id: I) -> Option> { + let (index, epoch, _) = id.unzip(); + if index as usize >= self.map.len() { + None + } else if let Element::Occupied(value, storage_epoch) = + std::mem::replace(&mut self.map[index as usize], Element::Vacant) + { + assert_eq!(epoch, storage_epoch); + Some(value) + } else { + None + } + } + + pub(crate) fn iter(&self, backend: Backend) -> impl Iterator)> { + self.map + .iter() + .enumerate() + .filter_map(move |(index, x)| match *x { + Element::Occupied(ref value, storage_epoch) => { + Some((I::zip(index as Index, storage_epoch, backend), value)) + } + _ => None, + }) + } + + pub(crate) fn kind(&self) -> &str { + self.kind + } + + pub(crate) fn len(&self) -> usize { + self.map.len() + } + + pub(crate) fn generate_report(&self) -> StorageReport { + let mut report = StorageReport { + element_size: mem::size_of::(), + ..Default::default() + }; + for element in self.map.iter() { + match *element { + Element::Occupied(..) => report.num_occupied += 1, + Element::Vacant => report.num_vacant += 1, + Element::Error(..) => report.num_error += 1, + } + } + report + } +} diff --git a/wgpu-core/src/track/buffer.rs b/wgpu-core/src/track/buffer.rs index b7682968b2..2022fa0015 100644 --- a/wgpu-core/src/track/buffer.rs +++ b/wgpu-core/src/track/buffer.rs @@ -5,18 +5,18 @@ * one subresource, they have no selector. !*/ -use std::{borrow::Cow, marker::PhantomData, vec::Drain}; +use std::{borrow::Cow, marker::PhantomData, sync::Arc, vec::Drain}; use super::PendingTransition; use crate::{ - hub, + hal_api::HalApi, id::{BufferId, TypedId, Valid}, resource::Buffer, + storage::Storage, track::{ invalid_resource_state, skip_barrier, ResourceMetadata, ResourceMetadataProvider, ResourceUses, UsageConflict, }, - LifeGuard, RefCount, }; use hal::BufferUses; use wgt::{strict_assert, strict_assert_eq}; @@ -41,12 +41,13 @@ impl ResourceUses for BufferUses { } /// Stores all the buffers that a bind group stores. -pub(crate) struct BufferBindGroupState { - buffers: Vec<(Valid, RefCount, BufferUses)>, +#[derive(Debug)] +pub(crate) struct BufferBindGroupState { + buffers: Vec<(Valid, Arc>, BufferUses)>, _phantom: PhantomData, } -impl BufferBindGroupState { +impl BufferBindGroupState { pub fn new() -> Self { Self { buffers: Vec::new(), @@ -65,35 +66,39 @@ impl BufferBindGroupState { } /// Returns a list of all buffers tracked. May contain duplicates. - pub fn used(&self) -> impl Iterator> + '_ { + pub fn used_ids(&self) -> impl Iterator> + '_ { self.buffers.iter().map(|&(id, _, _)| id) } + /// Returns a list of all buffers tracked. May contain duplicates. + pub fn used_resources(&self) -> impl Iterator>> + '_ { + self.buffers.iter().map(|(_, buffer, _)| buffer) + } + /// Adds the given resource with the given state. pub fn add_single<'a>( &mut self, - storage: &'a hub::Storage, BufferId>, + storage: &'a Storage, BufferId>, id: BufferId, state: BufferUses, ) -> Option<&'a Buffer> { let buffer = storage.get(id).ok()?; - self.buffers - .push((Valid(id), buffer.life_guard.add_ref(), state)); + self.buffers.push((Valid(id), buffer.clone(), state)); - Some(buffer) + Some(&buffer) } } /// Stores all buffer state within a single usage scope. #[derive(Debug)] -pub(crate) struct BufferUsageScope { +pub(crate) struct BufferUsageScope { state: Vec, - metadata: ResourceMetadata, + metadata: ResourceMetadata>, } -impl BufferUsageScope { +impl BufferUsageScope { pub fn new() -> Self { Self { state: Vec::new(), @@ -124,8 +129,8 @@ impl BufferUsageScope { } /// Returns a list of all buffers tracked. - pub fn used(&self) -> impl Iterator> + '_ { - self.metadata.owned_ids() + pub fn used_resources(&self) -> impl Iterator>> + '_ { + self.metadata.owned_resources() } /// Merge the list of buffer states in the given bind group into this usage scope. @@ -144,13 +149,12 @@ impl BufferUsageScope { &mut self, bind_group: &BufferBindGroupState, ) -> Result<(), UsageConflict> { - for &(id, ref ref_count, state) in &bind_group.buffers { + for &(id, ref resource, state) in &bind_group.buffers { let (index32, epoch, _) = id.0.unzip(); let index = index32 as usize; unsafe { insert_or_merge( - None, None, &mut self.state, &mut self.metadata, @@ -159,7 +163,7 @@ impl BufferUsageScope { BufferStateProvider::Direct { state }, ResourceMetadataProvider::Direct { epoch, - ref_count: Cow::Borrowed(ref_count), + resource: Cow::Borrowed(resource), }, )? }; @@ -187,7 +191,6 @@ impl BufferUsageScope { unsafe { insert_or_merge( - None, None, &mut self.state, &mut self.metadata, @@ -215,7 +218,7 @@ impl BufferUsageScope { /// the vectors will be extended. A call to set_size is not needed. pub fn merge_single<'a>( &mut self, - storage: &'a hub::Storage, BufferId>, + storage: &'a Storage, BufferId>, id: BufferId, new_state: BufferUses, ) -> Result<&'a Buffer, UsageConflict> { @@ -232,31 +235,33 @@ impl BufferUsageScope { unsafe { insert_or_merge( - Some(&buffer.life_guard), None, &mut self.state, &mut self.metadata, index32, index, BufferStateProvider::Direct { state: new_state }, - ResourceMetadataProvider::Resource { epoch }, + ResourceMetadataProvider::Resource { + epoch, + resource: buffer.clone(), + }, )?; } - Ok(buffer) + Ok(&buffer) } } /// Stores all buffer state within a command buffer or device. -pub(crate) struct BufferTracker { +pub(crate) struct BufferTracker { start: Vec, end: Vec, - metadata: ResourceMetadata, + metadata: ResourceMetadata>, temp: Vec>, } -impl BufferTracker { +impl BufferTracker { pub fn new() -> Self { Self { start: Vec::new(), @@ -293,8 +298,8 @@ impl BufferTracker { } /// Returns a list of all buffers tracked. - pub fn used(&self) -> impl Iterator> + '_ { - self.metadata.owned_ids() + pub fn used_resources(&self) -> impl Iterator>> + '_ { + self.metadata.owned_resources() } /// Drains all currently pending transitions. @@ -308,7 +313,12 @@ impl BufferTracker { /// /// If the ID is higher than the length of internal vectors, /// the vectors will be extended. A call to set_size is not needed. - pub fn insert_single(&mut self, id: Valid, ref_count: RefCount, state: BufferUses) { + pub fn insert_single( + &mut self, + id: Valid, + resource: Arc>, + state: BufferUses, + ) { let (index32, epoch, _) = id.0.unzip(); let index = index32 as usize; @@ -324,7 +334,6 @@ impl BufferTracker { } insert( - None, Some(&mut self.start), &mut self.end, &mut self.metadata, @@ -333,7 +342,7 @@ impl BufferTracker { None, ResourceMetadataProvider::Direct { epoch, - ref_count: Cow::Owned(ref_count), + resource: Cow::Owned(resource.clone()), }, ) } @@ -348,11 +357,11 @@ impl BufferTracker { /// the vectors will be extended. A call to set_size is not needed. pub fn set_single<'a>( &mut self, - storage: &'a hub::Storage, BufferId>, + storage: &'a Storage, BufferId>, id: BufferId, state: BufferUses, ) -> Option<(&'a Buffer, Option>)> { - let value = storage.get(id).ok()?; + let buffer = storage.get(id).ok()?; let (index32, epoch, _) = id.unzip(); let index = index32 as usize; @@ -363,7 +372,6 @@ impl BufferTracker { unsafe { insert_or_barrier_update( - Some(&value.life_guard), Some(&mut self.start), &mut self.end, &mut self.metadata, @@ -371,14 +379,17 @@ impl BufferTracker { index, BufferStateProvider::Direct { state }, None, - ResourceMetadataProvider::Resource { epoch }, + ResourceMetadataProvider::Resource { + epoch, + resource: buffer.clone(), + }, &mut self.temp, ) }; strict_assert!(self.temp.len() <= 1); - Some((value, self.temp.pop())) + Some((&buffer, self.temp.pop())) } /// Sets the given state for all buffers in the given tracker. @@ -400,7 +411,6 @@ impl BufferTracker { tracker.tracker_assert_in_bounds(index); unsafe { insert_or_barrier_update( - None, Some(&mut self.start), &mut self.end, &mut self.metadata, @@ -440,7 +450,6 @@ impl BufferTracker { scope.tracker_assert_in_bounds(index); unsafe { insert_or_barrier_update( - None, Some(&mut self.start), &mut self.end, &mut self.metadata, @@ -498,7 +507,6 @@ impl BufferTracker { } unsafe { insert_or_barrier_update( - None, Some(&mut self.start), &mut self.end, &mut self.metadata, @@ -541,7 +549,7 @@ impl BufferTracker { let existing_epoch = self.metadata.get_epoch_unchecked(index); let existing_ref_count = self.metadata.get_ref_count_unchecked(index); - if existing_epoch == epoch && existing_ref_count.load() == 1 { + if existing_epoch == epoch && existing_ref_count == 1 { self.metadata.remove(index); return true; } @@ -588,22 +596,20 @@ impl BufferStateProvider<'_> { /// Indexes must be valid indexes into all arrays passed in /// to this function, either directly or via metadata or provider structs. #[inline(always)] -unsafe fn insert_or_merge( - life_guard: Option<&LifeGuard>, +unsafe fn insert_or_merge( start_states: Option<&mut [BufferUses]>, current_states: &mut [BufferUses], - resource_metadata: &mut ResourceMetadata, + resource_metadata: &mut ResourceMetadata>, index32: u32, index: usize, state_provider: BufferStateProvider<'_>, - metadata_provider: ResourceMetadataProvider<'_, A>, + metadata_provider: ResourceMetadataProvider<'_, A, BufferId, Buffer>, ) -> Result<(), UsageConflict> { let currently_owned = unsafe { resource_metadata.contains_unchecked(index) }; if !currently_owned { unsafe { insert( - life_guard, start_states, current_states, resource_metadata, @@ -645,16 +651,15 @@ unsafe fn insert_or_merge( /// Indexes must be valid indexes into all arrays passed in /// to this function, either directly or via metadata or provider structs. #[inline(always)] -unsafe fn insert_or_barrier_update( - life_guard: Option<&LifeGuard>, +unsafe fn insert_or_barrier_update( start_states: Option<&mut [BufferUses]>, current_states: &mut [BufferUses], - resource_metadata: &mut ResourceMetadata, + resource_metadata: &mut ResourceMetadata>, index32: u32, index: usize, start_state_provider: BufferStateProvider<'_>, end_state_provider: Option>, - metadata_provider: ResourceMetadataProvider<'_, A>, + metadata_provider: ResourceMetadataProvider<'_, A, BufferId, Buffer>, barriers: &mut Vec>, ) { let currently_owned = unsafe { resource_metadata.contains_unchecked(index) }; @@ -662,7 +667,6 @@ unsafe fn insert_or_barrier_update( if !currently_owned { unsafe { insert( - life_guard, start_states, current_states, resource_metadata, @@ -690,15 +694,14 @@ unsafe fn insert_or_barrier_update( } #[inline(always)] -unsafe fn insert( - life_guard: Option<&LifeGuard>, +unsafe fn insert( start_states: Option<&mut [BufferUses]>, current_states: &mut [BufferUses], - resource_metadata: &mut ResourceMetadata, + resource_metadata: &mut ResourceMetadata>, index: usize, start_state_provider: BufferStateProvider<'_>, end_state_provider: Option>, - metadata_provider: ResourceMetadataProvider<'_, A>, + metadata_provider: ResourceMetadataProvider<'_, A, BufferId, Buffer>, ) { let new_start_state = unsafe { start_state_provider.get_state(index) }; let new_end_state = @@ -717,18 +720,18 @@ unsafe fn insert( } *current_states.get_unchecked_mut(index) = new_end_state; - let (epoch, ref_count) = metadata_provider.get_own(life_guard, index); - resource_metadata.insert(index, epoch, ref_count); + let (epoch, resource) = metadata_provider.get_own(index); + resource_metadata.insert(index, epoch, resource); } } #[inline(always)] -unsafe fn merge( +unsafe fn merge( current_states: &mut [BufferUses], index32: u32, index: usize, state_provider: BufferStateProvider<'_>, - metadata_provider: ResourceMetadataProvider<'_, A>, + metadata_provider: ResourceMetadataProvider<'_, A, BufferId, Buffer>, ) -> Result<(), UsageConflict> { let current_state = unsafe { current_states.get_unchecked_mut(index) }; let new_state = unsafe { state_provider.get_state(index) }; diff --git a/wgpu-core/src/track/metadata.rs b/wgpu-core/src/track/metadata.rs index 73da0d6c5d..56bbf5cd2f 100644 --- a/wgpu-core/src/track/metadata.rs +++ b/wgpu-core/src/track/metadata.rs @@ -1,12 +1,8 @@ //! The `ResourceMetadata` type. -use crate::{ - hub, - id::{self, TypedId}, - Epoch, LifeGuard, RefCount, -}; +use crate::{hal_api::HalApi, id::TypedId, resource::Resource, Epoch}; use bit_vec::BitVec; -use std::{borrow::Cow, marker::PhantomData, mem}; +use std::{borrow::Cow, marker::PhantomData, mem, sync::Arc}; use wgt::strict_assert; /// A set of resources, holding a [`RefCount`] and epoch for each member. @@ -17,25 +13,25 @@ use wgt::strict_assert; /// members, but a bit vector tracks occupancy, so iteration touches /// only occupied elements. #[derive(Debug)] -pub(super) struct ResourceMetadata { +pub(super) struct ResourceMetadata> { /// If the resource with index `i` is a member, `owned[i]` is `true`. owned: BitVec, /// A vector parallel to `owned`, holding clones of members' `RefCount`s. - ref_counts: Vec>, + resources: Vec>>, /// A vector parallel to `owned`, holding the epoch of each members' id. epochs: Vec, /// This tells Rust that this type should be covariant with `A`. - _phantom: PhantomData, + _phantom: PhantomData<(A, I)>, } -impl ResourceMetadata { +impl> ResourceMetadata { pub(super) fn new() -> Self { Self { owned: BitVec::default(), - ref_counts: Vec::new(), + resources: Vec::new(), epochs: Vec::new(), _phantom: PhantomData, @@ -48,7 +44,7 @@ impl ResourceMetadata { } pub(super) fn set_size(&mut self, size: usize) { - self.ref_counts.resize(size, None); + self.resources.resize(size, None); self.epochs.resize(size, u32::MAX); resize_bitvec(&mut self.owned, size); @@ -61,11 +57,11 @@ impl ResourceMetadata { #[cfg_attr(not(feature = "strict_asserts"), allow(unused_variables))] pub(super) fn tracker_assert_in_bounds(&self, index: usize) { strict_assert!(index < self.owned.len()); - strict_assert!(index < self.ref_counts.len()); + strict_assert!(index < self.resources.len()); strict_assert!(index < self.epochs.len()); strict_assert!(if self.contains(index) { - self.ref_counts[index].is_some() + self.resources[index].is_some() } else { true }); @@ -104,30 +100,48 @@ impl ResourceMetadata { /// The given `index` must be in bounds for this `ResourceMetadata`'s /// existing tables. See `tracker_assert_in_bounds`. #[inline(always)] - pub(super) unsafe fn insert(&mut self, index: usize, epoch: Epoch, ref_count: RefCount) { + pub(super) unsafe fn insert(&mut self, index: usize, epoch: Epoch, resource: Arc) { self.owned.set(index, true); unsafe { *self.epochs.get_unchecked_mut(index) = epoch; - *self.ref_counts.get_unchecked_mut(index) = Some(ref_count); + *self.resources.get_unchecked_mut(index) = Some(resource); } } - /// Get the [`RefCount`] of the resource with the given index. + /// Get the resource with the given index. /// /// # Safety /// /// The given `index` must be in bounds for this `ResourceMetadata`'s /// existing tables. See `tracker_assert_in_bounds`. #[inline(always)] - pub(super) unsafe fn get_ref_count_unchecked(&self, index: usize) -> &RefCount { + pub(super) unsafe fn get_resource_unchecked(&self, index: usize) -> &Arc { unsafe { - self.ref_counts + self.resources .get_unchecked(index) .as_ref() .unwrap_unchecked() } } + /// Get the [`RefCount`] of the resource with the given index. + /// + /// # Safety + /// + /// The given `index` must be in bounds for this `ResourceMetadata`'s + /// existing tables. See `tracker_assert_in_bounds`. + #[inline(always)] + pub(super) unsafe fn get_ref_count_unchecked(&self, index: usize) -> usize { + unsafe { + Arc::strong_count( + self.resources + .get_unchecked(index) + .as_ref() + .unwrap_unchecked(), + ) + } + } + /// Get the [`Epoch`] of the id of the resource with the given index. /// /// # Safety @@ -139,14 +153,14 @@ impl ResourceMetadata { unsafe { *self.epochs.get_unchecked(index) } } - /// Returns an iterator over the ids for all resources owned by `self`. - pub(super) fn owned_ids(&self) -> impl Iterator> + '_ { + /// Returns an iterator over the resources owned by `self`. + pub(super) fn owned_resources(&self) -> impl Iterator> + '_ { if !self.owned.is_empty() { self.tracker_assert_in_bounds(self.owned.len() - 1) }; iterate_bitvec_indices(&self.owned).map(move |index| { - let epoch = unsafe { *self.epochs.get_unchecked(index) }; - id::Valid(Id::zip(index as u32, epoch, A::VARIANT)) + let resource = unsafe { self.resources.get_unchecked(index) }; + resource.as_ref().unwrap() }) } @@ -161,7 +175,7 @@ impl ResourceMetadata { /// Remove the resource with the given index from the set. pub(super) unsafe fn remove(&mut self, index: usize) { unsafe { - *self.ref_counts.get_unchecked_mut(index) = None; + *self.resources.get_unchecked_mut(index) = None; *self.epochs.get_unchecked_mut(index) = u32::MAX; } self.owned.set(index, false); @@ -172,45 +186,40 @@ impl ResourceMetadata { /// /// This is used to abstract over the various places /// trackers can get new resource metadata from. -pub(super) enum ResourceMetadataProvider<'a, A: hub::HalApi> { +pub(super) enum ResourceMetadataProvider<'a, A: HalApi, I: TypedId, T: Resource> { /// Comes directly from explicit values. Direct { epoch: Epoch, - ref_count: Cow<'a, RefCount>, + resource: Cow<'a, Arc>, }, /// Comes from another metadata tracker. - Indirect { metadata: &'a ResourceMetadata }, + Indirect { + metadata: &'a ResourceMetadata, + }, /// The epoch is given directly, but the life count comes from the resource itself. - Resource { epoch: Epoch }, + Resource { epoch: Epoch, resource: Arc }, } -impl ResourceMetadataProvider<'_, A> { +impl> ResourceMetadataProvider<'_, A, I, T> { /// Get the epoch and an owned refcount from this. /// /// # Safety /// /// - The index must be in bounds of the metadata tracker if this uses an indirect source. - /// - life_guard must be Some if this uses a Resource source. + /// - info must be Some if this uses a Resource source. #[inline(always)] - pub(super) unsafe fn get_own( - self, - life_guard: Option<&LifeGuard>, - index: usize, - ) -> (Epoch, RefCount) { + pub(super) unsafe fn get_own(self, index: usize) -> (Epoch, Arc) { match self { - ResourceMetadataProvider::Direct { epoch, ref_count } => { - (epoch, ref_count.into_owned()) + ResourceMetadataProvider::Direct { epoch, resource } => { + (epoch, resource.into_owned().clone()) } ResourceMetadataProvider::Indirect { metadata } => { metadata.tracker_assert_in_bounds(index); (unsafe { *metadata.epochs.get_unchecked(index) }, { - let ref_count = unsafe { metadata.ref_counts.get_unchecked(index) }; - unsafe { ref_count.clone().unwrap_unchecked() } + let resource = unsafe { metadata.resources.get_unchecked(index) }; + unsafe { resource.clone().unwrap_unchecked() } }) } - ResourceMetadataProvider::Resource { epoch } => { - strict_assert!(life_guard.is_some()); - (epoch, unsafe { life_guard.unwrap_unchecked() }.add_ref()) - } + ResourceMetadataProvider::Resource { epoch, resource } => (epoch, resource), } } /// Get the epoch from this. diff --git a/wgpu-core/src/track/mod.rs b/wgpu-core/src/track/mod.rs index b2548f08ae..272d71ccec 100644 --- a/wgpu-core/src/track/mod.rs +++ b/wgpu-core/src/track/mod.rs @@ -100,9 +100,11 @@ mod stateless; mod texture; use crate::{ - binding_model, command, conv, hub, + binding_model, command, conv, + hal_api::HalApi, id::{self, TypedId}, pipeline, resource, + storage::Storage, }; use std::{fmt, ops}; @@ -128,7 +130,7 @@ pub(crate) struct PendingTransition { impl PendingTransition { /// Produce the hal barrier corresponding to the transition. - pub fn into_hal<'a, A: hal::Api>( + pub fn into_hal<'a, A: HalApi>( self, buf: &'a resource::Buffer, ) -> hal::BufferBarrier<'a, A> { @@ -142,11 +144,16 @@ impl PendingTransition { impl PendingTransition { /// Produce the hal barrier corresponding to the transition. - pub fn into_hal<'a, A: hal::Api>( + pub fn into_hal<'a, A: HalApi>( self, tex: &'a resource::Texture, ) -> hal::TextureBarrier<'a, A> { - let texture = tex.inner.as_raw().expect("Texture is destroyed"); + let texture = tex + .inner + .as_ref() + .unwrap() + .as_raw() + .expect("Texture is destroyed"); // These showing up in a barrier is always a bug strict_assert_ne!(self.usage.start, hal::TextureUses::UNKNOWN); @@ -312,14 +319,15 @@ impl fmt::Display for InvalidUse { /// /// All bind group states are sorted by their ID so that when adding to a tracker, /// they are added in the most efficient order possible (assending order). -pub(crate) struct BindGroupStates { +#[derive(Debug)] +pub(crate) struct BindGroupStates { pub buffers: BufferBindGroupState, pub textures: TextureBindGroupState, - pub views: StatelessBindGroupSate, id::TextureViewId>, - pub samplers: StatelessBindGroupSate, id::SamplerId>, + pub views: StatelessBindGroupSate>, + pub samplers: StatelessBindGroupSate>, } -impl BindGroupStates { +impl BindGroupStates { pub fn new() -> Self { Self { buffers: BufferBindGroupState::new(), @@ -344,23 +352,24 @@ impl BindGroupStates { /// This is a render bundle specific usage scope. It includes stateless resources /// that are not normally included in a usage scope, but are used by render bundles /// and need to be owned by the render bundles. -pub(crate) struct RenderBundleScope { +#[derive(Debug)] +pub(crate) struct RenderBundleScope { pub buffers: BufferUsageScope, pub textures: TextureUsageScope, // Don't need to track views and samplers, they are never used directly, only by bind groups. - pub bind_groups: StatelessTracker, id::BindGroupId>, - pub render_pipelines: StatelessTracker, id::RenderPipelineId>, - pub query_sets: StatelessTracker, id::QuerySetId>, + pub bind_groups: StatelessTracker>, + pub render_pipelines: StatelessTracker>, + pub query_sets: StatelessTracker>, } -impl RenderBundleScope { +impl RenderBundleScope { /// Create the render bundle scope and pull the maximum IDs from the hubs. pub fn new( - buffers: &hub::Storage, id::BufferId>, - textures: &hub::Storage, id::TextureId>, - bind_groups: &hub::Storage, id::BindGroupId>, - render_pipelines: &hub::Storage, id::RenderPipelineId>, - query_sets: &hub::Storage, id::QuerySetId>, + buffers: &Storage, id::BufferId>, + textures: &Storage, id::TextureId>, + bind_groups: &Storage, id::BindGroupId>, + render_pipelines: &Storage, id::RenderPipelineId>, + query_sets: &Storage, id::QuerySetId>, ) -> Self { let mut value = Self { buffers: BufferUsageScope::new(), @@ -390,7 +399,7 @@ impl RenderBundleScope { /// length of the storage given at the call to `new`. pub unsafe fn merge_bind_group( &mut self, - textures: &hub::Storage, id::TextureId>, + textures: &Storage, id::TextureId>, bind_group: &BindGroupStates, ) -> Result<(), UsageConflict> { unsafe { self.buffers.merge_bind_group(&bind_group.buffers)? }; @@ -406,16 +415,16 @@ impl RenderBundleScope { /// A usage scope tracker. Only needs to store stateful resources as stateless /// resources cannot possibly have a usage conflict. #[derive(Debug)] -pub(crate) struct UsageScope { +pub(crate) struct UsageScope { pub buffers: BufferUsageScope, pub textures: TextureUsageScope, } -impl UsageScope { +impl UsageScope { /// Create the render bundle scope and pull the maximum IDs from the hubs. pub fn new( - buffers: &hub::Storage, id::BufferId>, - textures: &hub::Storage, id::TextureId>, + buffers: &Storage, id::BufferId>, + textures: &Storage, id::TextureId>, ) -> Self { let mut value = Self { buffers: BufferUsageScope::new(), @@ -439,7 +448,7 @@ impl UsageScope { /// length of the storage given at the call to `new`. pub unsafe fn merge_bind_group( &mut self, - textures: &hub::Storage, id::TextureId>, + textures: &Storage, id::TextureId>, bind_group: &BindGroupStates, ) -> Result<(), UsageConflict> { unsafe { @@ -462,7 +471,7 @@ impl UsageScope { /// length of the storage given at the call to `new`. pub unsafe fn merge_render_bundle( &mut self, - textures: &hub::Storage, id::TextureId>, + textures: &Storage, id::TextureId>, render_bundle: &RenderBundleScope, ) -> Result<(), UsageConflict> { self.buffers.merge_usage_scope(&render_bundle.buffers)?; @@ -474,19 +483,19 @@ impl UsageScope { } /// A full double sided tracker used by CommandBuffers and the Device. -pub(crate) struct Tracker { +pub(crate) struct Tracker { pub buffers: BufferTracker, pub textures: TextureTracker, - pub views: StatelessTracker, id::TextureViewId>, - pub samplers: StatelessTracker, id::SamplerId>, - pub bind_groups: StatelessTracker, id::BindGroupId>, - pub compute_pipelines: StatelessTracker, id::ComputePipelineId>, - pub render_pipelines: StatelessTracker, id::RenderPipelineId>, - pub bundles: StatelessTracker, id::RenderBundleId>, - pub query_sets: StatelessTracker, id::QuerySetId>, + pub views: StatelessTracker>, + pub samplers: StatelessTracker>, + pub bind_groups: StatelessTracker>, + pub compute_pipelines: StatelessTracker>, + pub render_pipelines: StatelessTracker>, + pub bundles: StatelessTracker>, + pub query_sets: StatelessTracker>, } -impl Tracker { +impl Tracker { pub fn new() -> Self { Self { buffers: BufferTracker::new(), @@ -504,17 +513,15 @@ impl Tracker { /// Pull the maximum IDs from the hubs. pub fn set_size( &mut self, - buffers: Option<&hub::Storage, id::BufferId>>, - textures: Option<&hub::Storage, id::TextureId>>, - views: Option<&hub::Storage, id::TextureViewId>>, - samplers: Option<&hub::Storage, id::SamplerId>>, - bind_groups: Option<&hub::Storage, id::BindGroupId>>, - compute_pipelines: Option< - &hub::Storage, id::ComputePipelineId>, - >, - render_pipelines: Option<&hub::Storage, id::RenderPipelineId>>, - bundles: Option<&hub::Storage, id::RenderBundleId>>, - query_sets: Option<&hub::Storage, id::QuerySetId>>, + buffers: Option<&Storage, id::BufferId>>, + textures: Option<&Storage, id::TextureId>>, + views: Option<&Storage, id::TextureViewId>>, + samplers: Option<&Storage, id::SamplerId>>, + bind_groups: Option<&Storage, id::BindGroupId>>, + compute_pipelines: Option<&Storage, id::ComputePipelineId>>, + render_pipelines: Option<&Storage, id::RenderPipelineId>>, + bundles: Option<&Storage, id::RenderBundleId>>, + query_sets: Option<&Storage, id::QuerySetId>>, ) { if let Some(buffers) = buffers { self.buffers.set_size(buffers.len()); @@ -569,14 +576,14 @@ impl Tracker { /// value given to `set_size` pub unsafe fn set_and_remove_from_usage_scope_sparse( &mut self, - textures: &hub::Storage, id::TextureId>, + textures: &Storage, id::TextureId>, scope: &mut UsageScope, bind_group: &BindGroupStates, ) { unsafe { self.buffers.set_and_remove_from_usage_scope_sparse( &mut scope.buffers, - bind_group.buffers.used(), + bind_group.buffers.used_ids(), ) }; unsafe { diff --git a/wgpu-core/src/track/stateless.rs b/wgpu-core/src/track/stateless.rs index 1d0fd5997a..989ff69d7e 100644 --- a/wgpu-core/src/track/stateless.rs +++ b/wgpu-core/src/track/stateless.rs @@ -4,28 +4,26 @@ * distinction between a usage scope and a full tracker. !*/ -use std::marker::PhantomData; +use std::{marker::PhantomData, sync::Arc}; use crate::{ - hub, + hal_api::HalApi, id::{TypedId, Valid}, + resource::Resource, + storage::Storage, track::ResourceMetadata, - RefCount, }; /// Stores all the resources that a bind group stores. -pub(crate) struct StatelessBindGroupSate { - resources: Vec<(Valid, RefCount)>, - - _phantom: PhantomData, +#[derive(Debug)] +pub(crate) struct StatelessBindGroupSate> { + resources: Vec<(Valid, Arc)>, } -impl StatelessBindGroupSate { +impl> StatelessBindGroupSate { pub fn new() -> Self { Self { resources: Vec::new(), - - _phantom: PhantomData, } } @@ -43,25 +41,30 @@ impl StatelessBindGroupSate { self.resources.iter().map(|&(id, _)| id) } + /// Returns a list of all resources tracked. May contain duplicates. + pub fn used_resources(&self) -> impl Iterator> + '_ { + self.resources.iter().map(|(_, resource)| resource.clone()) + } + /// Adds the given resource. - pub fn add_single<'a>(&mut self, storage: &'a hub::Storage, id: Id) -> Option<&'a T> { + pub fn add_single<'a>(&mut self, storage: &'a Storage, id: Id) -> Option<&'a T> { let resource = storage.get(id).ok()?; - self.resources - .push((Valid(id), resource.life_guard().add_ref())); + self.resources.push((Valid(id), resource.clone())); - Some(resource) + Some(&resource) } } /// Stores all resource state within a command buffer or device. -pub(crate) struct StatelessTracker { - metadata: ResourceMetadata, +#[derive(Debug)] +pub(crate) struct StatelessTracker> { + metadata: ResourceMetadata, - _phantom: PhantomData<(T, Id)>, + _phantom: PhantomData, } -impl StatelessTracker { +impl> StatelessTracker { pub fn new() -> Self { Self { metadata: ResourceMetadata::new(), @@ -90,8 +93,8 @@ impl StatelessTracker { } /// Returns a list of all resources tracked. - pub fn used(&self) -> impl Iterator> + '_ { - self.metadata.owned_ids() + pub fn used_resources(&self) -> impl Iterator> + '_ { + self.metadata.owned_resources() } /// Inserts a single resource into the resource tracker. @@ -100,7 +103,7 @@ impl StatelessTracker { /// /// If the ID is higher than the length of internal vectors, /// the vectors will be extended. A call to set_size is not needed. - pub fn insert_single(&mut self, id: Valid, ref_count: RefCount) { + pub fn insert_single(&mut self, id: Valid, resource: Arc) { let (index32, epoch, _) = id.0.unzip(); let index = index32 as usize; @@ -109,7 +112,7 @@ impl StatelessTracker { self.tracker_assert_in_bounds(index); unsafe { - self.metadata.insert(index, epoch, ref_count); + self.metadata.insert(index, epoch, resource); } } @@ -117,8 +120,8 @@ impl StatelessTracker { /// /// If the ID is higher than the length of internal vectors, /// the vectors will be extended. A call to set_size is not needed. - pub fn add_single<'a>(&mut self, storage: &'a hub::Storage, id: Id) -> Option<&'a T> { - let item = storage.get(id).ok()?; + pub fn add_single<'a>(&mut self, storage: &'a Storage, id: Id) -> Option<&'a T> { + let resource = storage.get(id).ok()?; let (index32, epoch, _) = id.unzip(); let index = index32 as usize; @@ -128,11 +131,10 @@ impl StatelessTracker { self.tracker_assert_in_bounds(index); unsafe { - self.metadata - .insert(index, epoch, item.life_guard().add_ref()); + self.metadata.insert(index, epoch, resource.clone()); } - Some(item) + Some(&resource) } /// Adds the given resources from the given tracker. @@ -153,8 +155,8 @@ impl StatelessTracker { if !previously_owned { let epoch = other.metadata.get_epoch_unchecked(index); - let other_ref_count = other.metadata.get_ref_count_unchecked(index); - self.metadata.insert(index, epoch, other_ref_count.clone()); + let other_resource = other.metadata.get_resource_unchecked(index); + self.metadata.insert(index, epoch, other_resource.clone()); } } } @@ -182,7 +184,7 @@ impl StatelessTracker { let existing_epoch = self.metadata.get_epoch_unchecked(index); let existing_ref_count = self.metadata.get_ref_count_unchecked(index); - if existing_epoch == epoch && existing_ref_count.load() == 1 { + if existing_epoch == epoch && existing_ref_count == 1 { self.metadata.remove(index); return true; } diff --git a/wgpu-core/src/track/texture.rs b/wgpu-core/src/track/texture.rs index 6db2bab725..8f7355d9f2 100644 --- a/wgpu-core/src/track/texture.rs +++ b/wgpu-core/src/track/texture.rs @@ -21,22 +21,23 @@ use super::{range::RangedStates, PendingTransition}; use crate::{ - hub, + hal_api::HalApi, id::{TextureId, TypedId, Valid}, resource::Texture, + storage::Storage, track::{ invalid_resource_state, skip_barrier, ResourceMetadata, ResourceMetadataProvider, ResourceUses, UsageConflict, }, - LifeGuard, RefCount, }; use hal::TextureUses; use arrayvec::ArrayVec; use naga::FastHashMap; + use wgt::{strict_assert, strict_assert_eq}; -use std::{borrow::Cow, iter, marker::PhantomData, ops::Range, vec::Drain}; +use std::{borrow::Cow, iter, marker::PhantomData, ops::Range, sync::Arc, vec::Drain}; /// Specifies a particular set of subresources in a texture. #[derive(Clone, Debug, PartialEq, Eq)] @@ -148,17 +149,18 @@ impl ComplexTextureState { } /// Stores all the textures that a bind group stores. -pub(crate) struct TextureBindGroupState { +#[derive(Debug)] +pub(crate) struct TextureBindGroupState { textures: Vec<( Valid, Option, - RefCount, + Arc>, TextureUses, )>, _phantom: PhantomData, } -impl TextureBindGroupState { +impl TextureBindGroupState { pub fn new() -> Self { Self { textures: Vec::new(), @@ -176,25 +178,25 @@ impl TextureBindGroupState { .sort_unstable_by_key(|&(id, _, _, _)| id.0.unzip().0); } - /// Returns a list of all buffers tracked. May contain duplicates. - pub fn used(&self) -> impl Iterator> + '_ { - self.textures.iter().map(|&(id, _, _, _)| id) + /// Returns a list of all textures tracked. May contain duplicates. + pub fn used_resources(&self) -> impl Iterator>> + '_ { + self.textures.iter().map(|(_, _, texture, _)| texture) } /// Adds the given resource with the given state. pub fn add_single<'a>( &mut self, - storage: &'a hub::Storage, TextureId>, + storage: &'a Storage, TextureId>, id: TextureId, - ref_count: RefCount, selector: Option, state: TextureUses, ) -> Option<&'a Texture> { - let value = storage.get(id).ok()?; + let resource = storage.get(id).ok()?; - self.textures.push((Valid(id), selector, ref_count, state)); + self.textures + .push((Valid(id), selector, resource.clone(), state)); - Some(value) + Some(&resource) } } @@ -219,13 +221,13 @@ impl TextureStateSet { /// Stores all texture state within a single usage scope. #[derive(Debug)] -pub(crate) struct TextureUsageScope { +pub(crate) struct TextureUsageScope { set: TextureStateSet, - metadata: ResourceMetadata, + metadata: ResourceMetadata>, } -impl TextureUsageScope { +impl TextureUsageScope { pub fn new() -> Self { Self { set: TextureStateSet::new(), @@ -258,8 +260,8 @@ impl TextureUsageScope { } /// Returns a list of all textures tracked. - pub fn used(&self) -> impl Iterator> + '_ { - self.metadata.owned_ids() + pub(crate) fn used_resources(&self) -> impl Iterator>> + '_ { + self.metadata.owned_resources() } /// Returns true if the tracker owns no resources. @@ -278,7 +280,7 @@ impl TextureUsageScope { /// the vectors will be extended. A call to set_size is not needed. pub fn merge_usage_scope( &mut self, - storage: &hub::Storage, TextureId>, + storage: &Storage, TextureId>, scope: &Self, ) -> Result<(), UsageConflict> { let incoming_size = scope.set.simple.len(); @@ -292,10 +294,10 @@ impl TextureUsageScope { self.tracker_assert_in_bounds(index); scope.tracker_assert_in_bounds(index); - let texture_data = unsafe { texture_data_from_texture(storage, index32) }; + let texture_selector = unsafe { texture_selector_from_texture(storage, index32) }; unsafe { insert_or_merge( - texture_data, + texture_selector, &mut self.set, &mut self.metadata, index32, @@ -325,11 +327,11 @@ impl TextureUsageScope { /// method is called. pub unsafe fn merge_bind_group( &mut self, - storage: &hub::Storage, TextureId>, + storage: &Storage, TextureId>, bind_group: &TextureBindGroupState, ) -> Result<(), UsageConflict> { - for &(id, ref selector, ref ref_count, state) in &bind_group.textures { - unsafe { self.merge_single(storage, id, selector.clone(), ref_count, state)? }; + for &(id, ref selector, ref _texture, state) in &bind_group.textures { + unsafe { self.merge_single(storage, id, selector.clone(), state)? }; } Ok(()) @@ -350,21 +352,21 @@ impl TextureUsageScope { /// method is called. pub unsafe fn merge_single( &mut self, - storage: &hub::Storage, TextureId>, + storage: &Storage, TextureId>, id: Valid, selector: Option, - ref_count: &RefCount, new_state: TextureUses, ) -> Result<(), UsageConflict> { let (index32, epoch, _) = id.0.unzip(); let index = index32 as usize; + let resource = storage.get(id.0).unwrap(); self.tracker_assert_in_bounds(index); - let texture_data = unsafe { texture_data_from_texture(storage, index32) }; + let texture_selector = unsafe { texture_selector_from_texture(storage, index32) }; unsafe { insert_or_merge( - texture_data, + texture_selector, &mut self.set, &mut self.metadata, index32, @@ -372,7 +374,7 @@ impl TextureUsageScope { TextureStateProvider::from_option(selector, new_state), ResourceMetadataProvider::Direct { epoch, - ref_count: Cow::Borrowed(ref_count), + resource: Cow::Borrowed(resource), }, )? }; @@ -382,17 +384,17 @@ impl TextureUsageScope { } /// Stores all texture state within a command buffer or device. -pub(crate) struct TextureTracker { +pub(crate) struct TextureTracker { start_set: TextureStateSet, end_set: TextureStateSet, - metadata: ResourceMetadata, + metadata: ResourceMetadata>, temp: Vec>, _phantom: PhantomData, } -impl TextureTracker { +impl TextureTracker { pub fn new() -> Self { Self { start_set: TextureStateSet::new(), @@ -447,8 +449,8 @@ impl TextureTracker { } /// Returns a list of all textures tracked. - pub fn used(&self) -> impl Iterator> + '_ { - self.metadata.owned_ids() + pub fn used_resources(&self) -> impl Iterator>> + '_ { + self.metadata.owned_resources() } /// Drains all currently pending transitions. @@ -456,30 +458,13 @@ impl TextureTracker { self.temp.drain(..) } - /// Get the refcount of the given resource. - /// - /// # Safety - /// - /// [`Self::set_size`] must be called with the maximum possible Buffer ID before this - /// method is called. - /// - /// The resource must be tracked by this tracker. - pub unsafe fn get_ref_count(&self, id: Valid) -> &RefCount { - let (index32, _, _) = id.0.unzip(); - let index = index32 as usize; - - self.tracker_assert_in_bounds(index); - - unsafe { self.metadata.get_ref_count_unchecked(index) } - } - /// Inserts a single texture and a state into the resource tracker. /// /// If the resource already exists in the tracker, this will panic. /// /// If the ID is higher than the length of internal vectors, /// the vectors will be extended. A call to set_size is not needed. - pub fn insert_single(&mut self, id: TextureId, ref_count: RefCount, usage: TextureUses) { + pub fn insert_single(&mut self, id: TextureId, resource: Arc>, usage: TextureUses) { let (index32, epoch, _) = id.unzip(); let index = index32 as usize; @@ -505,7 +490,7 @@ impl TextureTracker { None, ResourceMetadataProvider::Direct { epoch, - ref_count: Cow::Owned(ref_count), + resource: Cow::Owned(resource.clone()), }, ) }; @@ -520,7 +505,7 @@ impl TextureTracker { /// the vectors will be extended. A call to set_size is not needed. pub fn set_single( &mut self, - texture: &Texture, + texture: &Arc>, id: TextureId, selector: TextureSelector, new_state: TextureUses, @@ -534,7 +519,7 @@ impl TextureTracker { unsafe { insert_or_barrier_update( - (&texture.life_guard, &texture.full_range), + &texture.full_range, Some(&mut self.start_set), &mut self.end_set, &mut self.metadata, @@ -545,7 +530,10 @@ impl TextureTracker { state: new_state, }, None, - ResourceMetadataProvider::Resource { epoch }, + ResourceMetadataProvider::Resource { + epoch, + resource: texture.clone(), + }, &mut self.temp, ) } @@ -561,11 +549,7 @@ impl TextureTracker { /// /// If the ID is higher than the length of internal vectors, /// the vectors will be extended. A call to set_size is not needed. - pub fn set_from_tracker( - &mut self, - storage: &hub::Storage, TextureId>, - tracker: &Self, - ) { + pub fn set_from_tracker(&mut self, storage: &Storage, TextureId>, tracker: &Self) { let incoming_size = tracker.start_set.simple.len(); if incoming_size > self.start_set.simple.len() { self.set_size(incoming_size); @@ -577,8 +561,9 @@ impl TextureTracker { self.tracker_assert_in_bounds(index); tracker.tracker_assert_in_bounds(index); unsafe { + let texture_selector = texture_selector_from_texture(storage, index32); insert_or_barrier_update( - texture_data_from_texture(storage, index32), + texture_selector, Some(&mut self.start_set), &mut self.end_set, &mut self.metadata, @@ -609,7 +594,7 @@ impl TextureTracker { /// the vectors will be extended. A call to set_size is not needed. pub fn set_from_usage_scope( &mut self, - storage: &hub::Storage, TextureId>, + storage: &Storage, TextureId>, scope: &TextureUsageScope, ) { let incoming_size = scope.set.simple.len(); @@ -623,8 +608,9 @@ impl TextureTracker { self.tracker_assert_in_bounds(index); scope.tracker_assert_in_bounds(index); unsafe { + let texture_selector = texture_selector_from_texture(storage, index32); insert_or_barrier_update( - texture_data_from_texture(storage, index32), + texture_selector, Some(&mut self.start_set), &mut self.end_set, &mut self.metadata, @@ -661,7 +647,7 @@ impl TextureTracker { /// method is called. pub unsafe fn set_and_remove_from_usage_scope_sparse( &mut self, - storage: &hub::Storage, TextureId>, + storage: &Storage, TextureId>, scope: &mut TextureUsageScope, bind_group_state: &TextureBindGroupState, ) { @@ -678,10 +664,10 @@ impl TextureTracker { if unsafe { !scope.metadata.contains_unchecked(index) } { continue; } - let texture_data = unsafe { texture_data_from_texture(storage, index32) }; + let texture_selector = unsafe { texture_selector_from_texture(storage, index32) }; unsafe { insert_or_barrier_update( - texture_data, + texture_selector, Some(&mut self.start_set), &mut self.end_set, &mut self.metadata, @@ -755,7 +741,7 @@ impl TextureTracker { let existing_epoch = self.metadata.get_epoch_unchecked(index); let existing_ref_count = self.metadata.get_ref_count_unchecked(index); - if existing_epoch == epoch && existing_ref_count.load() == 1 { + if existing_epoch == epoch && existing_ref_count == 1 { self.start_set.complex.remove(&index32); self.end_set.complex.remove(&index32); @@ -827,7 +813,7 @@ impl<'a> TextureStateProvider<'a> { /// /// # Panics /// - /// Panics if texture_data is None and this uses a Selector source. + /// Panics if texture_selector is None and this uses a Selector source. /// /// # Safety /// @@ -835,7 +821,7 @@ impl<'a> TextureStateProvider<'a> { #[inline(always)] unsafe fn get_state( self, - texture_data: Option<(&LifeGuard, &TextureSelector)>, + texture_selector: Option<&TextureSelector>, index32: u32, index: usize, ) -> SingleOrManyStates< @@ -849,7 +835,7 @@ impl<'a> TextureStateProvider<'a> { // and if it is we promote to a simple state. This allows upstream // code to specify selectors willy nilly, and all that are really // single states are promoted here. - if *texture_data.unwrap().1 == selector { + if *texture_selector.unwrap() == selector { SingleOrManyStates::Single(state) } else { SingleOrManyStates::Many(EitherIter::Left(iter::once((selector, state)))) @@ -875,12 +861,12 @@ impl<'a> TextureStateProvider<'a> { /// Helper function that gets what is needed from the texture storage /// out of the texture storage. #[inline(always)] -unsafe fn texture_data_from_texture( - storage: &hub::Storage, TextureId>, +unsafe fn texture_selector_from_texture( + storage: &Storage, TextureId>, index32: u32, -) -> (&LifeGuard, &TextureSelector) { +) -> &TextureSelector { let texture = unsafe { storage.get_unchecked(index32) }; - (&texture.life_guard, &texture.full_range) + &texture.full_range } /// Does an insertion operation if the index isn't tracked @@ -893,21 +879,21 @@ unsafe fn texture_data_from_texture( /// Indexes must be valid indexes into all arrays passed in /// to this function, either directly or via metadata or provider structs. #[inline(always)] -unsafe fn insert_or_merge( - texture_data: (&LifeGuard, &TextureSelector), +unsafe fn insert_or_merge( + texture_selector: &TextureSelector, current_state_set: &mut TextureStateSet, - resource_metadata: &mut ResourceMetadata, + resource_metadata: &mut ResourceMetadata>, index32: u32, index: usize, state_provider: TextureStateProvider<'_>, - metadata_provider: ResourceMetadataProvider<'_, A>, + metadata_provider: ResourceMetadataProvider<'_, A, TextureId, Texture>, ) -> Result<(), UsageConflict> { let currently_owned = unsafe { resource_metadata.contains_unchecked(index) }; if !currently_owned { unsafe { insert( - Some(texture_data), + Some(texture_selector), None, current_state_set, resource_metadata, @@ -923,7 +909,7 @@ unsafe fn insert_or_merge( unsafe { merge( - texture_data, + texture_selector, current_state_set, index32, index, @@ -951,16 +937,16 @@ unsafe fn insert_or_merge( /// Indexes must be valid indexes into all arrays passed in /// to this function, either directly or via metadata or provider structs. #[inline(always)] -unsafe fn insert_or_barrier_update( - texture_data: (&LifeGuard, &TextureSelector), +unsafe fn insert_or_barrier_update( + texture_selector: &TextureSelector, start_state: Option<&mut TextureStateSet>, current_state_set: &mut TextureStateSet, - resource_metadata: &mut ResourceMetadata, + resource_metadata: &mut ResourceMetadata>, index32: u32, index: usize, start_state_provider: TextureStateProvider<'_>, end_state_provider: Option>, - metadata_provider: ResourceMetadataProvider<'_, A>, + metadata_provider: ResourceMetadataProvider<'_, A, TextureId, Texture>, barriers: &mut Vec>, ) { let currently_owned = unsafe { resource_metadata.contains_unchecked(index) }; @@ -968,7 +954,7 @@ unsafe fn insert_or_barrier_update( if !currently_owned { unsafe { insert( - Some(texture_data), + Some(texture_selector), start_state, current_state_set, resource_metadata, @@ -985,7 +971,7 @@ unsafe fn insert_or_barrier_update( let update_state_provider = end_state_provider.unwrap_or_else(|| start_state_provider.clone()); unsafe { barrier( - texture_data, + texture_selector, current_state_set, index32, index, @@ -997,7 +983,7 @@ unsafe fn insert_or_barrier_update( let start_state_set = start_state.unwrap(); unsafe { update( - texture_data, + texture_selector, start_state_set, current_state_set, index32, @@ -1008,18 +994,18 @@ unsafe fn insert_or_barrier_update( } #[inline(always)] -unsafe fn insert( - texture_data: Option<(&LifeGuard, &TextureSelector)>, +unsafe fn insert( + texture_selector: Option<&TextureSelector>, start_state: Option<&mut TextureStateSet>, end_state: &mut TextureStateSet, - resource_metadata: &mut ResourceMetadata, + resource_metadata: &mut ResourceMetadata>, index32: u32, index: usize, start_state_provider: TextureStateProvider<'_>, end_state_provider: Option>, - metadata_provider: ResourceMetadataProvider<'_, A>, + metadata_provider: ResourceMetadataProvider<'_, A, TextureId, Texture>, ) { - let start_layers = unsafe { start_state_provider.get_state(texture_data, index32, index) }; + let start_layers = unsafe { start_state_provider.get_state(texture_selector, index32, index) }; match start_layers { SingleOrManyStates::Single(state) => { // This should only ever happen with a wgpu bug, but let's just double @@ -1038,7 +1024,7 @@ unsafe fn insert( } } SingleOrManyStates::Many(state_iter) => { - let full_range = texture_data.unwrap().1.clone(); + let full_range = texture_selector.unwrap().clone(); let complex = unsafe { ComplexTextureState::from_selector_state_iter(full_range, state_iter) }; @@ -1059,7 +1045,7 @@ unsafe fn insert( } if let Some(end_state_provider) = end_state_provider { - match unsafe { end_state_provider.get_state(texture_data, index32, index) } { + match unsafe { end_state_provider.get_state(texture_selector, index32, index) } { SingleOrManyStates::Single(state) => { // This should only ever happen with a wgpu bug, but let's just double // check that resource states don't have any conflicts. @@ -1072,7 +1058,7 @@ unsafe fn insert( unsafe { *end_state.simple.get_unchecked_mut(index) = state }; } SingleOrManyStates::Many(state_iter) => { - let full_range = texture_data.unwrap().1.clone(); + let full_range = texture_selector.unwrap().clone(); let complex = unsafe { ComplexTextureState::from_selector_state_iter(full_range, state_iter) @@ -1089,20 +1075,19 @@ unsafe fn insert( } unsafe { - let (epoch, ref_count) = - metadata_provider.get_own(texture_data.map(|(life_guard, _)| life_guard), index); - resource_metadata.insert(index, epoch, ref_count); + let (epoch, resource) = metadata_provider.get_own(index); + resource_metadata.insert(index, epoch, resource); } } #[inline(always)] -unsafe fn merge( - texture_data: (&LifeGuard, &TextureSelector), +unsafe fn merge( + texture_selector: &TextureSelector, current_state_set: &mut TextureStateSet, index32: u32, index: usize, state_provider: TextureStateProvider<'_>, - metadata_provider: ResourceMetadataProvider<'_, A>, + metadata_provider: ResourceMetadataProvider<'_, A, TextureId, Texture>, ) -> Result<(), UsageConflict> { let current_simple = unsafe { current_state_set.simple.get_unchecked_mut(index) }; let current_state = if *current_simple == TextureUses::COMPLEX { @@ -1116,7 +1101,7 @@ unsafe fn merge( SingleOrManyStates::Single(current_simple) }; - let new_state = unsafe { state_provider.get_state(Some(texture_data), index32, index) }; + let new_state = unsafe { state_provider.get_state(Some(texture_selector), index32, index) }; match (current_state, new_state) { (SingleOrManyStates::Single(current_simple), SingleOrManyStates::Single(new_simple)) => { @@ -1131,7 +1116,7 @@ unsafe fn merge( unsafe { metadata_provider.get_epoch(index) }, A::VARIANT, ), - texture_data.1.clone(), + texture_selector.clone(), *current_simple, new_simple, )); @@ -1145,8 +1130,8 @@ unsafe fn merge( // as there wasn't one before. let mut new_complex = unsafe { ComplexTextureState::from_selector_state_iter( - texture_data.1.clone(), - iter::once((texture_data.1.clone(), *current_simple)), + texture_selector.clone(), + iter::once((texture_selector.clone(), *current_simple)), ) }; @@ -1275,7 +1260,7 @@ unsafe fn merge( #[inline(always)] unsafe fn barrier( - texture_data: (&LifeGuard, &TextureSelector), + texture_selector: &TextureSelector, current_state_set: &TextureStateSet, index32: u32, index: usize, @@ -1291,7 +1276,7 @@ unsafe fn barrier( SingleOrManyStates::Single(current_simple) }; - let new_state = unsafe { state_provider.get_state(Some(texture_data), index32, index) }; + let new_state = unsafe { state_provider.get_state(Some(texture_selector), index32, index) }; match (current_state, new_state) { (SingleOrManyStates::Single(current_simple), SingleOrManyStates::Single(new_simple)) => { @@ -1303,7 +1288,7 @@ unsafe fn barrier( barriers.push(PendingTransition { id: index32, - selector: texture_data.1.clone(), + selector: texture_selector.clone(), usage: current_simple..new_simple, }); } @@ -1398,7 +1383,7 @@ unsafe fn barrier( #[allow(clippy::needless_option_as_deref)] // we use this for reborrowing Option<&mut T> #[inline(always)] unsafe fn update( - texture_data: (&LifeGuard, &TextureSelector), + texture_selector: &TextureSelector, start_state_set: &mut TextureStateSet, current_state_set: &mut TextureStateSet, index32: u32, @@ -1428,7 +1413,7 @@ unsafe fn update( SingleOrManyStates::Single(current_simple) }; - let new_state = unsafe { state_provider.get_state(Some(texture_data), index32, index) }; + let new_state = unsafe { state_provider.get_state(Some(texture_selector), index32, index) }; match (current_state, new_state) { (SingleOrManyStates::Single(current_simple), SingleOrManyStates::Single(new_simple)) => { @@ -1440,8 +1425,8 @@ unsafe fn update( // as there wasn't one before. let mut new_complex = unsafe { ComplexTextureState::from_selector_state_iter( - texture_data.1.clone(), - iter::once((texture_data.1.clone(), *current_simple)), + texture_selector.clone(), + iter::once((texture_selector.clone(), *current_simple)), ) }; diff --git a/wgpu-hal/LICENSE.APACHE b/wgpu-hal/LICENSE.APACHE deleted file mode 120000 index 7141cad5b2..0000000000 --- a/wgpu-hal/LICENSE.APACHE +++ /dev/null @@ -1 +0,0 @@ -../LICENSE.APACHE \ No newline at end of file diff --git a/wgpu-hal/LICENSE.APACHE b/wgpu-hal/LICENSE.APACHE new file mode 100644 index 0000000000..d9a10c0d8e --- /dev/null +++ b/wgpu-hal/LICENSE.APACHE @@ -0,0 +1,176 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS diff --git a/wgpu-hal/LICENSE.MIT b/wgpu-hal/LICENSE.MIT deleted file mode 120000 index 6b8772d1a7..0000000000 --- a/wgpu-hal/LICENSE.MIT +++ /dev/null @@ -1 +0,0 @@ -../LICENSE.MIT \ No newline at end of file diff --git a/wgpu-hal/LICENSE.MIT b/wgpu-hal/LICENSE.MIT new file mode 100644 index 0000000000..4699691b8e --- /dev/null +++ b/wgpu-hal/LICENSE.MIT @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2021 The gfx-rs developers + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/wgpu-hal/src/dx11/device.rs b/wgpu-hal/src/dx11/device.rs index 3b087c4311..ce33584e35 100644 --- a/wgpu-hal/src/dx11/device.rs +++ b/wgpu-hal/src/dx11/device.rs @@ -204,7 +204,7 @@ impl crate::Device for super::Device { impl crate::Queue for super::Queue { unsafe fn submit( - &mut self, + &self, command_buffers: &[&super::CommandBuffer], signal_fence: Option<(&mut super::Fence, crate::FenceValue)>, ) -> Result<(), crate::DeviceError> { @@ -212,8 +212,8 @@ impl crate::Queue for super::Queue { } unsafe fn present( - &mut self, - surface: &mut super::Surface, + &self, + surface: &super::Surface, texture: super::SurfaceTexture, ) -> Result<(), crate::SurfaceError> { todo!() diff --git a/wgpu-hal/src/dx11/mod.rs b/wgpu-hal/src/dx11/mod.rs index 91827874b1..4605359d26 100644 --- a/wgpu-hal/src/dx11/mod.rs +++ b/wgpu-hal/src/dx11/mod.rs @@ -108,30 +108,32 @@ pub struct BindGroup {} pub struct PipelineLayout {} #[derive(Debug)] pub struct ShaderModule {} +#[derive(Debug)] pub struct RenderPipeline {} +#[derive(Debug)] pub struct ComputePipeline {} impl crate::Surface for Surface { unsafe fn configure( - &mut self, + &self, device: &Device, config: &crate::SurfaceConfiguration, ) -> Result<(), crate::SurfaceError> { todo!() } - unsafe fn unconfigure(&mut self, device: &Device) { + unsafe fn unconfigure(&self, device: &Device) { todo!() } unsafe fn acquire_texture( - &mut self, + &self, _timeout: Option, ) -> Result>, crate::SurfaceError> { todo!() } - unsafe fn discard_texture(&mut self, texture: SurfaceTexture) { + unsafe fn discard_texture(&self, texture: SurfaceTexture) { todo!() } } diff --git a/wgpu-hal/src/dx12/adapter.rs b/wgpu-hal/src/dx12/adapter.rs index a77484c301..bcf2658594 100644 --- a/wgpu-hal/src/dx12/adapter.rs +++ b/wgpu-hal/src/dx12/adapter.rs @@ -2,6 +2,7 @@ use crate::{ auxil::{self, dxgi::result::HResult as _}, dx12::SurfaceTarget, }; +use parking_lot::Mutex; use std::{mem, ptr, sync::Arc, thread}; use winapi::{ shared::{dxgi, dxgi1_2, minwindef::DWORD, windef, winerror}, @@ -372,7 +373,7 @@ impl crate::Adapter for super::Adapter { device, queue: super::Queue { raw: queue, - temp_lists: Vec::new(), + temp_lists: Mutex::new(Vec::new()), }, }) } diff --git a/wgpu-hal/src/dx12/instance.rs b/wgpu-hal/src/dx12/instance.rs index be7a3f7306..b6767c86bb 100644 --- a/wgpu-hal/src/dx12/instance.rs +++ b/wgpu-hal/src/dx12/instance.rs @@ -1,3 +1,4 @@ +use parking_lot::RwLock; use winapi::shared::{dxgi1_5, minwindef}; use super::SurfaceTarget; @@ -95,7 +96,7 @@ impl crate::Instance for super::Instance { factory_media: self.factory_media, target: SurfaceTarget::WndHandle(handle.hwnd as *mut _), supports_allow_tearing: self.supports_allow_tearing, - swap_chain: None, + swap_chain: RwLock::new(None), }), _ => Err(crate::InstanceError), } diff --git a/wgpu-hal/src/dx12/mod.rs b/wgpu-hal/src/dx12/mod.rs index 390a5693aa..7004682262 100644 --- a/wgpu-hal/src/dx12/mod.rs +++ b/wgpu-hal/src/dx12/mod.rs @@ -47,7 +47,7 @@ mod view; use crate::auxil::{self, dxgi::result::HResult as _}; use arrayvec::ArrayVec; -use parking_lot::Mutex; +use parking_lot::{Mutex, RwLock}; use std::{ffi, fmt, mem, num::NonZeroU32, sync::Arc}; use winapi::{ shared::{dxgi, dxgi1_4, dxgitype, windef, winerror}, @@ -108,7 +108,7 @@ impl Instance { factory_media: self.factory_media, target: SurfaceTarget::Visual(unsafe { d3d12::WeakPtr::from_raw(visual) }), supports_allow_tearing: self.supports_allow_tearing, - swap_chain: None, + swap_chain: RwLock::new(None), } } @@ -121,7 +121,7 @@ impl Instance { factory_media: self.factory_media, target: SurfaceTarget::SurfaceHandle(surface_handle), supports_allow_tearing: self.supports_allow_tearing, - swap_chain: None, + swap_chain: RwLock::new(None), } } } @@ -152,7 +152,7 @@ pub struct Surface { factory_media: Option, target: SurfaceTarget, supports_allow_tearing: bool, - swap_chain: Option, + swap_chain: RwLock>, } unsafe impl Send for Surface {} @@ -271,7 +271,7 @@ unsafe impl Sync for Device {} pub struct Queue { raw: d3d12::CommandQueue, - temp_lists: Vec, + temp_lists: Mutex>, } unsafe impl Send for Queue {} @@ -496,6 +496,7 @@ pub struct Fence { unsafe impl Send for Fence {} unsafe impl Sync for Fence {} +#[derive(Debug)] pub struct BindGroupLayout { /// Sorted list of entries. entries: Vec, @@ -504,7 +505,7 @@ pub struct BindGroupLayout { copy_counts: Vec, // all 1's } -#[derive(Clone, Copy)] +#[derive(Debug, Clone, Copy)] enum BufferViewKind { Constant, ShaderResource, @@ -528,19 +529,20 @@ bitflags::bitflags! { // Element (also known as parameter) index into the root signature. type RootIndex = u32; +#[derive(Debug)] struct BindGroupInfo { base_root_index: RootIndex, tables: TableTypes, dynamic_buffers: Vec, } -#[derive(Clone)] +#[derive(Debug, Clone)] struct RootConstantInfo { root_index: RootIndex, range: std::ops::Range, } -#[derive(Clone)] +#[derive(Debug, Clone)] struct PipelineLayoutShared { signature: d3d12::RootSignature, total_root_elements: RootIndex, @@ -551,6 +553,7 @@ struct PipelineLayoutShared { unsafe impl Send for PipelineLayoutShared {} unsafe impl Sync for PipelineLayoutShared {} +#[derive(Debug)] pub struct PipelineLayout { shared: PipelineLayoutShared, // Storing for each associated bind group, which tables we created @@ -589,6 +592,7 @@ impl CompiledShader { } } +#[derive(Debug)] pub struct RenderPipeline { raw: d3d12::PipelineState, layout: PipelineLayoutShared, @@ -599,6 +603,7 @@ pub struct RenderPipeline { unsafe impl Send for RenderPipeline {} unsafe impl Sync for RenderPipeline {} +#[derive(Debug)] pub struct ComputePipeline { raw: d3d12::PipelineState, layout: PipelineLayoutShared, @@ -637,7 +642,7 @@ impl SwapChain { impl crate::Surface for Surface { unsafe fn configure( - &mut self, + &self, device: &Device, config: &crate::SurfaceConfiguration, ) -> Result<(), crate::SurfaceError> { @@ -651,7 +656,7 @@ impl crate::Surface for Surface { let non_srgb_format = auxil::dxgi::conv::map_texture_format_nosrgb(config.format); - let swap_chain = match self.swap_chain.take() { + let swap_chain = match self.swap_chain.write().take() { //Note: this path doesn't properly re-initialize all of the things Some(sc) => { // can't have image resources in flight used by GPU @@ -790,7 +795,8 @@ impl crate::Surface for Surface { }; } - self.swap_chain = Some(SwapChain { + let mut swapchain = self.swap_chain.write(); + *swapchain = Some(SwapChain { raw: swap_chain, resources, waitable, @@ -803,8 +809,8 @@ impl crate::Surface for Surface { Ok(()) } - unsafe fn unconfigure(&mut self, device: &Device) { - if let Some(mut sc) = self.swap_chain.take() { + unsafe fn unconfigure(&self, device: &Device) { + if let Some(mut sc) = self.swap_chain.write().take() { unsafe { let _ = sc.wait(None); //TODO: this shouldn't be needed, @@ -817,10 +823,11 @@ impl crate::Surface for Surface { } unsafe fn acquire_texture( - &mut self, + &self, timeout: Option, ) -> Result>, crate::SurfaceError> { - let sc = self.swap_chain.as_mut().unwrap(); + let mut swapchain = self.swap_chain.write(); + let sc = swapchain.as_mut().unwrap(); unsafe { sc.wait(timeout) }?; @@ -842,26 +849,28 @@ impl crate::Surface for Surface { suboptimal: false, })) } - unsafe fn discard_texture(&mut self, _texture: Texture) { - let sc = self.swap_chain.as_mut().unwrap(); + unsafe fn discard_texture(&self, _texture: Texture) { + let mut swapchain = self.swap_chain.write(); + let sc = swapchain.as_mut().unwrap(); sc.acquired_count -= 1; } } impl crate::Queue for Queue { unsafe fn submit( - &mut self, + &self, command_buffers: &[&CommandBuffer], signal_fence: Option<(&mut Fence, crate::FenceValue)>, ) -> Result<(), crate::DeviceError> { - self.temp_lists.clear(); + let mut temp_lists = self.temp_lists.lock(); + temp_lists.clear(); for cmd_buf in command_buffers { - self.temp_lists.push(cmd_buf.raw.as_list()); + temp_lists.push(cmd_buf.raw.as_list()); } { profiling::scope!("ID3D12CommandQueue::ExecuteCommandLists"); - self.raw.execute_command_lists(&self.temp_lists); + self.raw.execute_command_lists(&temp_lists); } if let Some((fence, value)) = signal_fence { @@ -872,11 +881,12 @@ impl crate::Queue for Queue { Ok(()) } unsafe fn present( - &mut self, - surface: &mut Surface, + &self, + surface: &Surface, _texture: Texture, ) -> Result<(), crate::SurfaceError> { - let sc = surface.swap_chain.as_mut().unwrap(); + let mut swapchain = surface.swap_chain.write(); + let sc = swapchain.as_mut().unwrap(); sc.acquired_count -= 1; let (interval, flags) = match sc.present_mode { diff --git a/wgpu-hal/src/empty.rs b/wgpu-hal/src/empty.rs index 1497acad91..7b58e5a9d4 100644 --- a/wgpu-hal/src/empty.rs +++ b/wgpu-hal/src/empty.rs @@ -57,22 +57,22 @@ impl crate::Instance for Context { impl crate::Surface for Context { unsafe fn configure( - &mut self, + &self, device: &Context, config: &crate::SurfaceConfiguration, ) -> Result<(), crate::SurfaceError> { Ok(()) } - unsafe fn unconfigure(&mut self, device: &Context) {} + unsafe fn unconfigure(&self, device: &Context) {} unsafe fn acquire_texture( - &mut self, + &self, timeout: Option, ) -> Result>, crate::SurfaceError> { Ok(None) } - unsafe fn discard_texture(&mut self, texture: Resource) {} + unsafe fn discard_texture(&self, texture: Resource) {} } impl crate::Adapter for Context { @@ -101,15 +101,15 @@ impl crate::Adapter for Context { impl crate::Queue for Context { unsafe fn submit( - &mut self, + &self, command_buffers: &[&Resource], signal_fence: Option<(&mut Resource, crate::FenceValue)>, ) -> DeviceResult<()> { Ok(()) } unsafe fn present( - &mut self, - surface: &mut Context, + &self, + surface: &Context, texture: Resource, ) -> Result<(), crate::SurfaceError> { Ok(()) diff --git a/wgpu-hal/src/gles/adapter.rs b/wgpu-hal/src/gles/adapter.rs index 46bba478f3..210ac28886 100644 --- a/wgpu-hal/src/gles/adapter.rs +++ b/wgpu-hal/src/gles/adapter.rs @@ -1,5 +1,6 @@ use glow::HasContext; -use std::sync::Arc; +use parking_lot::Mutex; +use std::sync::{atomic::AtomicU8, Arc}; use wgt::AstcChannel; use crate::auxil::db; @@ -685,9 +686,9 @@ impl crate::Adapter for super::Adapter { shader_clear_program, shader_clear_program_color_uniform_location, zero_buffer, - temp_query_results: Vec::new(), - draw_buffer_count: 1, - current_index_buffer: None, + temp_query_results: Mutex::new(Vec::new()), + draw_buffer_count: AtomicU8::new(1), + current_index_buffer: Mutex::new(None), }, }) } diff --git a/wgpu-hal/src/gles/egl.rs b/wgpu-hal/src/gles/egl.rs index b23d5028d0..e75d319239 100644 --- a/wgpu-hal/src/gles/egl.rs +++ b/wgpu-hal/src/gles/egl.rs @@ -1,5 +1,5 @@ use glow::HasContext; -use parking_lot::{Mutex, MutexGuard}; +use parking_lot::{Mutex, MutexGuard, RwLock}; use std::{ffi, os::raw, ptr, sync::Arc, time::Duration}; @@ -882,7 +882,7 @@ impl crate::Instance for Instance { config: inner.config, presentable: inner.supports_native_window, raw_window_handle: window_handle, - swapchain: None, + swapchain: RwLock::new(None), srgb_kind: inner.srgb_kind, }) } @@ -980,7 +980,7 @@ pub struct Surface { config: khronos_egl::Config, pub(super) presentable: bool, raw_window_handle: raw_window_handle::RawWindowHandle, - swapchain: Option, + swapchain: RwLock>, srgb_kind: SrgbFrameBufferKind, } @@ -989,11 +989,12 @@ unsafe impl Sync for Surface {} impl Surface { pub(super) unsafe fn present( - &mut self, + &self, _suf_texture: super::Texture, gl: &glow::Context, ) -> Result<(), crate::SurfaceError> { - let sc = self.swapchain.as_ref().unwrap(); + let swapchain = self.swapchain.read(); + let sc = swapchain.as_ref().unwrap(); self.egl .instance @@ -1051,11 +1052,11 @@ impl Surface { } unsafe fn unconfigure_impl( - &mut self, + &self, device: &super::Device, ) -> Option<(khronos_egl::Surface, Option<*mut raw::c_void>)> { let gl = &device.shared.context.lock(); - match self.swapchain.take() { + match self.swapchain.write().take() { Some(sc) => { unsafe { gl.delete_renderbuffer(sc.renderbuffer) }; unsafe { gl.delete_framebuffer(sc.framebuffer) }; @@ -1075,7 +1076,7 @@ impl Surface { impl crate::Surface for Surface { unsafe fn configure( - &mut self, + &self, device: &super::Device, config: &crate::SurfaceConfiguration, ) -> Result<(), crate::SurfaceError> { @@ -1251,7 +1252,8 @@ impl crate::Surface for Surface { unsafe { gl.bind_renderbuffer(glow::RENDERBUFFER, None) }; unsafe { gl.bind_framebuffer(glow::READ_FRAMEBUFFER, None) }; - self.swapchain = Some(Swapchain { + let mut swapchain = self.swapchain.write(); + *swapchain = Some(Swapchain { surface, wl_window, renderbuffer, @@ -1265,7 +1267,7 @@ impl crate::Surface for Surface { Ok(()) } - unsafe fn unconfigure(&mut self, device: &super::Device) { + unsafe fn unconfigure(&self, device: &super::Device) { if let Some((surface, wl_window)) = unsafe { self.unconfigure_impl(device) } { self.egl .instance @@ -1281,10 +1283,11 @@ impl crate::Surface for Surface { } unsafe fn acquire_texture( - &mut self, + &self, _timeout_ms: Option, //TODO ) -> Result>, crate::SurfaceError> { - let sc = self.swapchain.as_ref().unwrap(); + let swapchain = self.swapchain.read(); + let sc = swapchain.as_ref().unwrap(); let texture = super::Texture { inner: super::TextureInner::Renderbuffer { raw: sc.renderbuffer, @@ -1306,5 +1309,5 @@ impl crate::Surface for Surface { suboptimal: false, })) } - unsafe fn discard_texture(&mut self, _texture: super::Texture) {} + unsafe fn discard_texture(&self, _texture: super::Texture) {} } diff --git a/wgpu-hal/src/gles/mod.rs b/wgpu-hal/src/gles/mod.rs index d196b8bc46..2509360317 100644 --- a/wgpu-hal/src/gles/mod.rs +++ b/wgpu-hal/src/gles/mod.rs @@ -88,7 +88,7 @@ use glow::HasContext; use naga::FastHashMap; use parking_lot::Mutex; -use std::sync::atomic::AtomicU32; +use std::sync::atomic::{AtomicU32, AtomicU8}; use std::{fmt, ops::Range, sync::Arc}; #[derive(Clone)] @@ -230,9 +230,9 @@ pub struct Queue { /// Keep a reasonably large buffer filled with zeroes, so that we can implement `ClearBuffer` of /// zeroes by copying from it. zero_buffer: glow::Buffer, - temp_query_results: Vec, - draw_buffer_count: u8, - current_index_buffer: Option, + temp_query_results: Mutex>, + draw_buffer_count: AtomicU8, + current_index_buffer: Mutex>, } #[derive(Clone, Debug)] @@ -352,10 +352,12 @@ pub struct Sampler { raw: glow::Sampler, } +#[derive(Debug)] pub struct BindGroupLayout { entries: Arc<[wgt::BindGroupLayoutEntry]>, } +#[derive(Debug)] struct BindGroupLayoutInfo { entries: Arc<[wgt::BindGroupLayoutEntry]>, /// Mapping of resources, indexed by `binding`, into the whole layout space. @@ -366,6 +368,7 @@ struct BindGroupLayoutInfo { binding_to_slot: Box<[u8]>, } +#[derive(Debug)] pub struct PipelineLayout { group_infos: Box<[BindGroupLayoutInfo]>, naga_options: naga::back::glsl::Options, @@ -470,6 +473,7 @@ unsafe impl Send for UniformDesc {} /// sampler (in this layout) that the texture is used with. type SamplerBindMap = [Option; MAX_TEXTURE_SLOTS]; +#[derive(Debug)] struct PipelineInner { program: glow::Program, sampler_map: SamplerBindMap, @@ -516,6 +520,7 @@ struct ProgramCacheKey { type ProgramCache = FastHashMap, crate::PipelineError>>; +#[derive(Debug)] pub struct RenderPipeline { inner: Arc, primitive: wgt::PrimitiveState, @@ -534,6 +539,7 @@ unsafe impl Send for RenderPipeline {} #[cfg(target_arch = "wasm32")] unsafe impl Sync for RenderPipeline {} +#[derive(Debug)] pub struct ComputePipeline { inner: Arc, } @@ -623,7 +629,7 @@ impl Default for StencilSide { } } -#[derive(Clone, Default)] +#[derive(Debug, Clone, Default)] struct StencilState { front: StencilSide, back: StencilSide, diff --git a/wgpu-hal/src/gles/queue.rs b/wgpu-hal/src/gles/queue.rs index 5dd8b1f554..592149355b 100644 --- a/wgpu-hal/src/gles/queue.rs +++ b/wgpu-hal/src/gles/queue.rs @@ -1,7 +1,10 @@ use super::Command as C; use arrayvec::ArrayVec; use glow::HasContext; -use std::{mem, slice, sync::Arc}; +use std::{ + mem, slice, + sync::{atomic::Ordering, Arc}, +}; #[cfg(not(target_arch = "wasm32"))] const DEBUG_ID: u32 = 0; @@ -48,20 +51,21 @@ impl super::Queue { unsafe { gl.draw_buffers(&[glow::COLOR_ATTACHMENT0 + draw_buffer]) }; unsafe { gl.draw_arrays(glow::TRIANGLES, 0, 3) }; - if self.draw_buffer_count != 0 { + let draw_buffer_count = self.draw_buffer_count.load(Ordering::Relaxed); + if draw_buffer_count != 0 { // Reset the draw buffers to what they were before the clear - let indices = (0..self.draw_buffer_count as u32) + let indices = (0..draw_buffer_count as u32) .map(|i| glow::COLOR_ATTACHMENT0 + i) .collect::>(); unsafe { gl.draw_buffers(&indices) }; } #[cfg(not(target_arch = "wasm32"))] - for draw_buffer in 0..self.draw_buffer_count as u32 { + for draw_buffer in 0..draw_buffer_count as u32 { unsafe { gl.disable_draw_buffer(glow::BLEND, draw_buffer) }; } } - unsafe fn reset_state(&mut self, gl: &glow::Context) { + unsafe fn reset_state(&self, gl: &glow::Context) { unsafe { gl.use_program(None) }; unsafe { gl.bind_framebuffer(glow::FRAMEBUFFER, None) }; unsafe { gl.disable(glow::DEPTH_TEST) }; @@ -76,7 +80,8 @@ impl super::Queue { } unsafe { gl.bind_buffer(glow::ELEMENT_ARRAY_BUFFER, None) }; - self.current_index_buffer = None; + let mut current_index_buffer = self.current_index_buffer.lock(); + *current_index_buffer = None; } unsafe fn set_attachment( @@ -152,7 +157,7 @@ impl super::Queue { } unsafe fn process( - &mut self, + &self, gl: &glow::Context, command: &C, #[cfg_attr(target_arch = "wasm32", allow(unused))] data_bytes: &[u8], @@ -361,7 +366,10 @@ impl super::Queue { unsafe { gl.bind_buffer(copy_src_target, None) }; if is_index_buffer_only_element_dst { unsafe { - gl.bind_buffer(glow::ELEMENT_ARRAY_BUFFER, self.current_index_buffer) + gl.bind_buffer( + glow::ELEMENT_ARRAY_BUFFER, + *self.current_index_buffer.lock(), + ) }; } else { unsafe { gl.bind_buffer(copy_dst_target, None) }; @@ -889,7 +897,8 @@ impl super::Queue { } C::SetIndexBuffer(buffer) => { unsafe { gl.bind_buffer(glow::ELEMENT_ARRAY_BUFFER, Some(buffer)) }; - self.current_index_buffer = Some(buffer); + let mut current_index_buffer = self.current_index_buffer.lock(); + *current_index_buffer = Some(buffer); } C::BeginQuery(query, target) => { unsafe { gl.begin_query(target, query) }; @@ -903,15 +912,16 @@ impl super::Queue { dst_target, dst_offset, } => { - self.temp_query_results.clear(); + let mut temp_query_results = self.temp_query_results.lock(); + temp_query_results.clear(); for &query in queries[query_range.start as usize..query_range.end as usize].iter() { let result = unsafe { gl.get_query_parameter_u32(query, glow::QUERY_RESULT) }; - self.temp_query_results.push(result as u64); + temp_query_results.push(result as u64); } let query_data = unsafe { slice::from_raw_parts( - self.temp_query_results.as_ptr() as *const u8, - self.temp_query_results.len() * mem::size_of::(), + temp_query_results.as_ptr() as *const u8, + temp_query_results.len() * mem::size_of::(), ) }; match dst.raw { @@ -1000,7 +1010,7 @@ impl super::Queue { unsafe { gl.invalidate_framebuffer(glow::DRAW_FRAMEBUFFER, list) }; } C::SetDrawColorBuffers(count) => { - self.draw_buffer_count = count; + self.draw_buffer_count.store(count, Ordering::Relaxed); let indices = (0..count as u32) .map(|i| glow::COLOR_ATTACHMENT0 + i) .collect::>(); @@ -1504,7 +1514,7 @@ impl super::Queue { impl crate::Queue for super::Queue { unsafe fn submit( - &mut self, + &self, command_buffers: &[&super::CommandBuffer], signal_fence: Option<(&mut super::Fence, crate::FenceValue)>, ) -> Result<(), crate::DeviceError> { @@ -1542,8 +1552,8 @@ impl crate::Queue for super::Queue { } unsafe fn present( - &mut self, - surface: &mut super::Surface, + &self, + surface: &super::Surface, texture: super::Texture, ) -> Result<(), crate::SurfaceError> { #[cfg(any(not(target_arch = "wasm32"), target_os = "emscripten"))] diff --git a/wgpu-hal/src/gles/web.rs b/wgpu-hal/src/gles/web.rs index 01c37ec5f9..d7ecfa6932 100644 --- a/wgpu-hal/src/gles/web.rs +++ b/wgpu-hal/src/gles/web.rs @@ -86,7 +86,7 @@ impl Instance { Ok(Surface { webgl2_context, srgb_present_program: None, - swapchain: None, + swapchain: RwLock::new(None), texture: None, presentable: true, }) @@ -162,7 +162,7 @@ impl crate::Instance for Instance { #[derive(Clone, Debug)] pub struct Surface { webgl2_context: web_sys::WebGl2RenderingContext, - pub(super) swapchain: Option, + pub(super) swapchain: RwLock>, texture: Option, pub(super) presentable: bool, srgb_present_program: Option, @@ -183,13 +183,17 @@ pub struct Swapchain { impl Surface { pub(super) unsafe fn present( - &mut self, + &self, _suf_texture: super::Texture, gl: &glow::Context, ) -> Result<(), crate::SurfaceError> { - let swapchain = self.swapchain.as_ref().ok_or(crate::SurfaceError::Other( - "need to configure surface before presenting", - ))?; + let swapchain = self + .swapchain + .read() + .as_ref() + .ok_or(crate::SurfaceError::Other( + "need to configure surface before presenting", + ))?; if swapchain.format.is_srgb() { // Important to set the viewport since we don't know in what state the user left it. @@ -266,13 +270,13 @@ impl Surface { impl crate::Surface for Surface { unsafe fn configure( - &mut self, + &self, device: &super::Device, config: &crate::SurfaceConfiguration, ) -> Result<(), crate::SurfaceError> { let gl = &device.shared.context.lock(); - if let Some(swapchain) = self.swapchain.take() { + if let Some(swapchain) = self.swapchain.write().take() { // delete all frame buffers already allocated unsafe { gl.delete_framebuffer(swapchain.framebuffer) }; } @@ -332,7 +336,8 @@ impl crate::Surface for Surface { }; unsafe { gl.bind_texture(glow::TEXTURE_2D, None) }; - self.swapchain = Some(Swapchain { + let mut swapchain = self.swapchain.write(); + *swapchain = Some(Swapchain { extent: config.extent, // channel: config.format.base_format().1, format: config.format, @@ -342,9 +347,9 @@ impl crate::Surface for Surface { Ok(()) } - unsafe fn unconfigure(&mut self, device: &super::Device) { + unsafe fn unconfigure(&self, device: &super::Device) { let gl = device.shared.context.lock(); - if let Some(swapchain) = self.swapchain.take() { + if let Some(swapchain) = self.swapchain.write().take() { unsafe { gl.delete_framebuffer(swapchain.framebuffer) }; } if let Some(renderbuffer) = self.texture.take() { @@ -353,10 +358,10 @@ impl crate::Surface for Surface { } unsafe fn acquire_texture( - &mut self, + &self, _timeout_ms: Option, //TODO ) -> Result>, crate::SurfaceError> { - let sc = self.swapchain.as_ref().unwrap(); + let sc = self.swapchain.read().as_ref().unwrap(); let texture = super::Texture { inner: super::TextureInner::Texture { raw: self.texture.unwrap(), @@ -380,5 +385,5 @@ impl crate::Surface for Surface { })) } - unsafe fn discard_texture(&mut self, _texture: super::Texture) {} + unsafe fn discard_texture(&self, _texture: super::Texture) {} } diff --git a/wgpu-hal/src/lib.rs b/wgpu-hal/src/lib.rs index adb5fdc773..d383bca01e 100644 --- a/wgpu-hal/src/lib.rs +++ b/wgpu-hal/src/lib.rs @@ -87,7 +87,7 @@ pub mod api { use std::{ borrow::{Borrow, Cow}, fmt, - num::NonZeroU32, + num::{NonZeroU32}, ops::{Range, RangeInclusive}, ptr::NonNull, sync::atomic::AtomicBool, @@ -161,7 +161,7 @@ pub trait Api: Clone + Sized { type Queue: Queue; type CommandEncoder: CommandEncoder; - type CommandBuffer: Send + Sync + fmt::Debug; + type CommandBuffer: fmt::Debug + Send + Sync; type Buffer: fmt::Debug + Send + Sync + 'static; type Texture: fmt::Debug + Send + Sync + 'static; @@ -171,12 +171,12 @@ pub trait Api: Clone + Sized { type QuerySet: fmt::Debug + Send + Sync; type Fence: fmt::Debug + Send + Sync; - type BindGroupLayout: Send + Sync; + type BindGroupLayout: fmt::Debug + Send + Sync; type BindGroup: fmt::Debug + Send + Sync; - type PipelineLayout: Send + Sync; + type PipelineLayout: fmt::Debug + Send + Sync; type ShaderModule: fmt::Debug + Send + Sync; - type RenderPipeline: Send + Sync; - type ComputePipeline: Send + Sync; + type RenderPipeline: fmt::Debug + Send + Sync; + type ComputePipeline: fmt::Debug + Send + Sync; } pub trait Instance: Sized + Send + Sync { @@ -192,12 +192,12 @@ pub trait Instance: Sized + Send + Sync { pub trait Surface: Send + Sync { unsafe fn configure( - &mut self, + &self, device: &A::Device, config: &SurfaceConfiguration, ) -> Result<(), SurfaceError>; - unsafe fn unconfigure(&mut self, device: &A::Device); + unsafe fn unconfigure(&self, device: &A::Device); /// Returns the next texture to be presented by the swapchain for drawing /// @@ -210,10 +210,10 @@ pub trait Surface: Send + Sync { /// /// Returns `None` on timing out. unsafe fn acquire_texture( - &mut self, + &self, timeout: Option, ) -> Result>, SurfaceError>; - unsafe fn discard_texture(&mut self, texture: A::SurfaceTexture); + unsafe fn discard_texture(&self, texture: A::SurfaceTexture); } pub trait Adapter: Send + Sync { @@ -344,13 +344,13 @@ pub trait Queue: Send + Sync { /// that are associated with this queue. /// - all of the command buffers had `CommadBuffer::finish()` called. unsafe fn submit( - &mut self, + &self, command_buffers: &[&A::CommandBuffer], signal_fence: Option<(&mut A::Fence, FenceValue)>, ) -> Result<(), DeviceError>; unsafe fn present( - &mut self, - surface: &mut A::Surface, + &self, + surface: &A::Surface, texture: A::SurfaceTexture, ) -> Result<(), SurfaceError>; unsafe fn get_timestamp_period(&self) -> f32; diff --git a/wgpu-hal/src/metal/mod.rs b/wgpu-hal/src/metal/mod.rs index b757b4101d..bae68cc306 100644 --- a/wgpu-hal/src/metal/mod.rs +++ b/wgpu-hal/src/metal/mod.rs @@ -377,7 +377,7 @@ impl crate::Queue for Queue { } unsafe fn present( &mut self, - _surface: &mut Surface, + _surface: &Surface, texture: SurfaceTexture, ) -> Result<(), crate::SurfaceError> { let queue = &self.raw.lock(); diff --git a/wgpu-hal/src/metal/surface.rs b/wgpu-hal/src/metal/surface.rs index 8101d52969..ebfa79e8eb 100644 --- a/wgpu-hal/src/metal/surface.rs +++ b/wgpu-hal/src/metal/surface.rs @@ -171,7 +171,7 @@ impl super::Surface { impl crate::Surface for super::Surface { unsafe fn configure( - &mut self, + &self, device: &super::Device, config: &crate::SurfaceConfiguration, ) -> Result<(), crate::SurfaceError> { @@ -233,12 +233,12 @@ impl crate::Surface for super::Surface { Ok(()) } - unsafe fn unconfigure(&mut self, _device: &super::Device) { + unsafe fn unconfigure(&self, _device: &super::Device) { self.swapchain_format = None; } unsafe fn acquire_texture( - &mut self, + &self, _timeout_ms: Option, //TODO ) -> Result>, crate::SurfaceError> { let render_layer = self.render_layer.lock(); @@ -274,5 +274,5 @@ impl crate::Surface for super::Surface { })) } - unsafe fn discard_texture(&mut self, _texture: super::SurfaceTexture) {} + unsafe fn discard_texture(&self, _texture: super::SurfaceTexture) {} } diff --git a/wgpu-hal/src/vulkan/adapter.rs b/wgpu-hal/src/vulkan/adapter.rs index ab07b7f854..67fa7f1fa4 100644 --- a/wgpu-hal/src/vulkan/adapter.rs +++ b/wgpu-hal/src/vulkan/adapter.rs @@ -3,7 +3,11 @@ use super::conv; use ash::{extensions::khr, vk}; use parking_lot::Mutex; -use std::{collections::BTreeMap, ffi::CStr, sync::Arc}; +use std::{ + collections::BTreeMap, + ffi::CStr, + sync::{atomic::AtomicIsize, Arc}, +}; fn depth_stencil_required_flags() -> vk::FormatFeatureFlags { vk::FormatFeatureFlags::SAMPLED_IMAGE | vk::FormatFeatureFlags::DEPTH_STENCIL_ATTACHMENT @@ -1339,7 +1343,7 @@ impl super::Adapter { device: Arc::clone(&shared), family_index, relay_semaphores, - relay_index: None, + relay_index: AtomicIsize::new(-1), }; let mem_allocator = { diff --git a/wgpu-hal/src/vulkan/device.rs b/wgpu-hal/src/vulkan/device.rs index 09b887772c..ad0fa192a1 100644 --- a/wgpu-hal/src/vulkan/device.rs +++ b/wgpu-hal/src/vulkan/device.rs @@ -539,7 +539,7 @@ struct CompiledStage { impl super::Device { pub(super) unsafe fn create_swapchain( &self, - surface: &mut super::Surface, + surface: &super::Surface, config: &crate::SurfaceConfiguration, provided_old_swapchain: Option, ) -> Result { diff --git a/wgpu-hal/src/vulkan/instance.rs b/wgpu-hal/src/vulkan/instance.rs index 328795a13b..c961bf0ed9 100644 --- a/wgpu-hal/src/vulkan/instance.rs +++ b/wgpu-hal/src/vulkan/instance.rs @@ -9,6 +9,7 @@ use ash::{ extensions::{ext, khr}, vk, }; +use parking_lot::RwLock; unsafe extern "system" fn debug_utils_messenger_callback( message_severity: vk::DebugUtilsMessageSeverityFlagsEXT, @@ -465,7 +466,7 @@ impl super::Instance { raw: surface, functor, instance: Arc::clone(&self.shared), - swapchain: None, + swapchain: RwLock::new(None), } } } @@ -707,33 +708,34 @@ impl crate::Instance for super::Instance { impl crate::Surface for super::Surface { unsafe fn configure( - &mut self, + &self, device: &super::Device, config: &crate::SurfaceConfiguration, ) -> Result<(), crate::SurfaceError> { - let old = self - .swapchain + let mut swap_chain = self.swapchain.write(); + let old = swap_chain .take() .map(|sc| unsafe { sc.release_resources(&device.shared.raw) }); let swapchain = unsafe { device.create_swapchain(self, config, old)? }; - self.swapchain = Some(swapchain); + *swap_chain = Some(swapchain); Ok(()) } - unsafe fn unconfigure(&mut self, device: &super::Device) { - if let Some(sc) = self.swapchain.take() { + unsafe fn unconfigure(&self, device: &super::Device) { + if let Some(sc) = self.swapchain.write().take() { let swapchain = unsafe { sc.release_resources(&device.shared.raw) }; unsafe { swapchain.functor.destroy_swapchain(swapchain.raw, None) }; } } unsafe fn acquire_texture( - &mut self, + &self, timeout: Option, ) -> Result>, crate::SurfaceError> { - let sc = self.swapchain.as_mut().unwrap(); + let mut swapchain = self.swapchain.write(); + let sc = swapchain.as_mut().unwrap(); let mut timeout_ns = match timeout { Some(duration) => duration.as_nanos() as u64, @@ -820,5 +822,5 @@ impl crate::Surface for super::Surface { })) } - unsafe fn discard_texture(&mut self, _texture: super::SurfaceTexture) {} + unsafe fn discard_texture(&self, _texture: super::SurfaceTexture) {} } diff --git a/wgpu-hal/src/vulkan/mod.rs b/wgpu-hal/src/vulkan/mod.rs index fdee547973..ba38c51796 100644 --- a/wgpu-hal/src/vulkan/mod.rs +++ b/wgpu-hal/src/vulkan/mod.rs @@ -31,14 +31,23 @@ mod conv; mod device; mod instance; -use std::{borrow::Borrow, ffi::CStr, fmt, num::NonZeroU32, sync::Arc}; +use std::{ + borrow::Borrow, + ffi::CStr, + fmt, + num::NonZeroU32, + sync::{ + atomic::{AtomicIsize, Ordering}, + Arc, + }, +}; use arrayvec::ArrayVec; use ash::{ extensions::{ext, khr}, vk, }; -use parking_lot::Mutex; +use parking_lot::{Mutex, RwLock}; const MILLIS_TO_NANOS: u64 = 1_000_000; const MAX_TOTAL_ATTACHMENTS: usize = crate::MAX_COLOR_ATTACHMENTS * 2 + 1; @@ -109,7 +118,7 @@ pub struct Surface { raw: vk::SurfaceKHR, functor: khr::Surface, instance: Arc, - swapchain: Option, + swapchain: RwLock>, } #[derive(Debug)] @@ -277,7 +286,7 @@ pub struct Queue { /// It would be correct to use a single semaphore there, but /// [Intel hangs in `anv_queue_finish`](https://gitlab.freedesktop.org/mesa/mesa/-/issues/5508). relay_semaphores: [vk::Semaphore; 2], - relay_index: Option, + relay_index: AtomicIsize, } #[derive(Debug)] @@ -493,7 +502,7 @@ impl Fence { impl crate::Queue for Queue { unsafe fn submit( - &mut self, + &self, command_buffers: &[&CommandBuffer], signal_fence: Option<(&mut Fence, crate::FenceValue)>, ) -> Result<(), crate::DeviceError> { @@ -538,16 +547,17 @@ impl crate::Queue for Queue { } let wait_stage_mask = [vk::PipelineStageFlags::TOP_OF_PIPE]; - let sem_index = match self.relay_index { - Some(old_index) => { - vk_info = vk_info - .wait_semaphores(&self.relay_semaphores[old_index..old_index + 1]) - .wait_dst_stage_mask(&wait_stage_mask); - (old_index + 1) % self.relay_semaphores.len() - } - None => 0, + let old_index = self.relay_index.load(Ordering::Relaxed); + let sem_index = if old_index >= 0 { + vk_info = vk_info + .wait_semaphores(&self.relay_semaphores[old_index as usize..old_index as usize + 1]) + .wait_dst_stage_mask(&wait_stage_mask); + (old_index as usize + 1) % self.relay_semaphores.len() + } else { + 0 }; - self.relay_index = Some(sem_index); + self.relay_index + .store(sem_index as isize, Ordering::Relaxed); signal_semaphores[0] = self.relay_semaphores[sem_index]; let signal_count = if signal_semaphores[1] == vk::Semaphore::null() { @@ -567,11 +577,12 @@ impl crate::Queue for Queue { } unsafe fn present( - &mut self, - surface: &mut Surface, + &self, + surface: &Surface, texture: SurfaceTexture, ) -> Result<(), crate::SurfaceError> { - let ssc = surface.swapchain.as_ref().unwrap(); + let mut swapchain = surface.swapchain.write(); + let ssc = swapchain.as_mut().unwrap(); let swapchains = [ssc.raw]; let image_indices = [texture.index]; @@ -579,8 +590,11 @@ impl crate::Queue for Queue { .swapchains(&swapchains) .image_indices(&image_indices); - if let Some(old_index) = self.relay_index.take() { - vk_info = vk_info.wait_semaphores(&self.relay_semaphores[old_index..old_index + 1]); + let old_index = self.relay_index.swap(-1, Ordering::Relaxed); + if old_index >= 0 { + vk_info = vk_info.wait_semaphores( + &self.relay_semaphores[old_index as usize..old_index as usize + 1], + ); } let suboptimal = { diff --git a/wgpu-types/LICENSE.APACHE b/wgpu-types/LICENSE.APACHE deleted file mode 120000 index 7141cad5b2..0000000000 --- a/wgpu-types/LICENSE.APACHE +++ /dev/null @@ -1 +0,0 @@ -../LICENSE.APACHE \ No newline at end of file diff --git a/wgpu-types/LICENSE.APACHE b/wgpu-types/LICENSE.APACHE new file mode 100644 index 0000000000..d9a10c0d8e --- /dev/null +++ b/wgpu-types/LICENSE.APACHE @@ -0,0 +1,176 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS diff --git a/wgpu-types/LICENSE.MIT b/wgpu-types/LICENSE.MIT deleted file mode 120000 index 6b8772d1a7..0000000000 --- a/wgpu-types/LICENSE.MIT +++ /dev/null @@ -1 +0,0 @@ -../LICENSE.MIT \ No newline at end of file diff --git a/wgpu-types/LICENSE.MIT b/wgpu-types/LICENSE.MIT new file mode 100644 index 0000000000..4699691b8e --- /dev/null +++ b/wgpu-types/LICENSE.MIT @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2021 The gfx-rs developers + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/wgpu/LICENSE.APACHE b/wgpu/LICENSE.APACHE deleted file mode 120000 index 7141cad5b2..0000000000 --- a/wgpu/LICENSE.APACHE +++ /dev/null @@ -1 +0,0 @@ -../LICENSE.APACHE \ No newline at end of file diff --git a/wgpu/LICENSE.APACHE b/wgpu/LICENSE.APACHE new file mode 100644 index 0000000000..d9a10c0d8e --- /dev/null +++ b/wgpu/LICENSE.APACHE @@ -0,0 +1,176 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS diff --git a/wgpu/LICENSE.MIT b/wgpu/LICENSE.MIT deleted file mode 120000 index 6b8772d1a7..0000000000 --- a/wgpu/LICENSE.MIT +++ /dev/null @@ -1 +0,0 @@ -../LICENSE.MIT \ No newline at end of file diff --git a/wgpu/LICENSE.MIT b/wgpu/LICENSE.MIT new file mode 100644 index 0000000000..4699691b8e --- /dev/null +++ b/wgpu/LICENSE.MIT @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2021 The gfx-rs developers + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/wgpu/src/backend/direct.rs b/wgpu/src/backend/direct.rs index 1b688b14a9..d0bd951e1f 100644 --- a/wgpu/src/backend/direct.rs +++ b/wgpu/src/backend/direct.rs @@ -26,7 +26,7 @@ use wgc::id::TypedId; const LABEL: &str = "label"; -pub struct Context(wgc::hub::Global); +pub struct Context(wgc::global::Global); impl Drop for Context { fn drop(&mut self) { @@ -41,11 +41,11 @@ impl fmt::Debug for Context { } impl Context { - pub unsafe fn from_hal_instance(hal_instance: A::Instance) -> Self { + pub unsafe fn from_hal_instance(hal_instance: A::Instance) -> Self { Self(unsafe { - wgc::hub::Global::from_hal_instance::( + wgc::global::Global::from_hal_instance::( "wgpu", - wgc::hub::IdentityManagerFactory, + wgc::identity::IdentityManagerFactory, hal_instance, ) }) @@ -54,17 +54,17 @@ impl Context { /// # Safety /// /// - The raw instance handle returned must not be manually destroyed. - pub unsafe fn instance_as_hal(&self) -> Option<&A::Instance> { + pub unsafe fn instance_as_hal(&self) -> Option<&A::Instance> { unsafe { self.0.instance_as_hal::() } } pub unsafe fn from_core_instance(core_instance: wgc::instance::Instance) -> Self { Self(unsafe { - wgc::hub::Global::from_instance(wgc::hub::IdentityManagerFactory, core_instance) + wgc::global::Global::from_instance(wgc::identity::IdentityManagerFactory, core_instance) }) } - pub(crate) fn global(&self) -> &wgc::hub::Global { + pub(crate) fn global(&self) -> &wgc::global::Global { &self.0 } @@ -73,14 +73,18 @@ impl Context { .enumerate_adapters(wgc::instance::AdapterInputs::Mask(backends, |_| ())) } - pub unsafe fn create_adapter_from_hal( + pub unsafe fn create_adapter_from_hal( &self, hal_adapter: hal::ExposedAdapter, ) -> wgc::id::AdapterId { unsafe { self.0.create_adapter_from_hal(hal_adapter, ()) } } - pub unsafe fn adapter_as_hal) -> R, R>( + pub unsafe fn adapter_as_hal< + A: wgc::hal_api::HalApi, + F: FnOnce(Option<&A::Adapter>) -> R, + R, + >( &self, adapter: wgc::id::AdapterId, hal_adapter_callback: F, @@ -91,7 +95,7 @@ impl Context { } } - pub unsafe fn create_device_from_hal( + pub unsafe fn create_device_from_hal( &self, adapter: &wgc::id::AdapterId, hal_device: hal::OpenDevice, @@ -124,7 +128,7 @@ impl Context { Ok((device, queue)) } - pub unsafe fn create_texture_from_hal( + pub unsafe fn create_texture_from_hal( &self, hal_texture: A::Texture, device: &Device, @@ -149,7 +153,7 @@ impl Context { } } - pub unsafe fn device_as_hal) -> R, R>( + pub unsafe fn device_as_hal) -> R, R>( &self, device: &Device, hal_device_callback: F, @@ -160,9 +164,9 @@ impl Context { } } - pub unsafe fn surface_as_hal_mut< - A: wgc::hub::HalApi, - F: FnOnce(Option<&mut A::Surface>) -> R, + pub unsafe fn surface_as_hal< + A: wgc::hal_api::HalApi, + F: FnOnce(Option<&A::Surface>) -> R, R, >( &self, @@ -171,11 +175,11 @@ impl Context { ) -> R { unsafe { self.0 - .surface_as_hal_mut::(surface.id, hal_surface_callback) + .surface_as_hal::(surface.id, hal_surface_callback) } } - pub unsafe fn texture_as_hal)>( + pub unsafe fn texture_as_hal)>( &self, texture: &Texture, hal_texture_callback: F, @@ -186,7 +190,7 @@ impl Context { } } - pub fn generate_report(&self) -> wgc::hub::GlobalReport { + pub fn generate_report(&self) -> wgc::global::GlobalReport { self.0.generate_report() } @@ -536,9 +540,9 @@ impl crate::Context for Context { type PopErrorScopeFuture = Ready>; fn init(instance_desc: wgt::InstanceDescriptor) -> Self { - Self(wgc::hub::Global::new( + Self(wgc::global::Global::new( "wgpu", - wgc::hub::IdentityManagerFactory, + wgc::identity::IdentityManagerFactory, instance_desc, )) } @@ -1273,7 +1277,7 @@ impl crate::Context for Context { let (id, error) = wgc::gfx_select!(device => global.device_create_texture( *device, &wgt_desc, - () + (), () )); if let Some(cause) = error { self.handle_error( diff --git a/wgpu/src/lib.rs b/wgpu/src/lib.rs index e9c700053c..981c683f6a 100644 --- a/wgpu/src/lib.rs +++ b/wgpu/src/lib.rs @@ -1346,7 +1346,7 @@ impl Instance { target_os = "emscripten", feature = "webgl" ))] - pub unsafe fn from_hal(hal_instance: A::Instance) -> Self { + pub unsafe fn from_hal(hal_instance: A::Instance) -> Self { Self { context: Arc::new(unsafe { crate::backend::Context::from_hal_instance::(hal_instance) @@ -1369,7 +1369,7 @@ impl Instance { target_os = "emscripten", feature = "webgl" ))] - pub unsafe fn as_hal(&self) -> Option<&A::Instance> { + pub unsafe fn as_hal(&self) -> Option<&A::Instance> { unsafe { self.context .as_any() @@ -1454,7 +1454,7 @@ impl Instance { target_os = "emscripten", feature = "webgl" ))] - pub unsafe fn create_adapter_from_hal( + pub unsafe fn create_adapter_from_hal( &self, hal_adapter: hal::ExposedAdapter, ) -> Adapter { @@ -1684,7 +1684,7 @@ impl Instance { target_os = "emscripten", feature = "webgl" ))] - pub fn generate_report(&self) -> wgc::hub::GlobalReport { + pub fn generate_report(&self) -> wgc::global::GlobalReport { self.context .as_any() .downcast_ref::() @@ -1759,7 +1759,7 @@ impl Adapter { target_os = "emscripten", feature = "webgl" ))] - pub unsafe fn create_device_from_hal( + pub unsafe fn create_device_from_hal( &self, hal_device: hal::OpenDevice, desc: &DeviceDescriptor, @@ -1813,7 +1813,7 @@ impl Adapter { target_os = "emscripten", feature = "webgl" ))] - pub unsafe fn as_hal) -> R, R>( + pub unsafe fn as_hal) -> R, R>( &self, hal_adapter_callback: F, ) -> R { @@ -2162,7 +2162,7 @@ impl Device { target_os = "emscripten", feature = "webgl" ))] - pub unsafe fn create_texture_from_hal( + pub unsafe fn create_texture_from_hal( &self, hal_texture: A::Texture, desc: &TextureDescriptor, @@ -2267,7 +2267,7 @@ impl Device { target_os = "emscripten", feature = "webgl" ))] - pub unsafe fn as_hal) -> R, R>( + pub unsafe fn as_hal) -> R, R>( &self, hal_device_callback: F, ) -> R { @@ -2609,7 +2609,7 @@ impl Texture { target_os = "emscripten", feature = "webgl" ))] - pub unsafe fn as_hal)>( + pub unsafe fn as_hal)>( &self, hal_texture_callback: F, ) { @@ -4222,7 +4222,7 @@ impl Surface { target_os = "emscripten", feature = "webgl" ))] - pub unsafe fn as_hal_mut) -> R, R>( + pub unsafe fn as_hal) -> R, R>( &mut self, hal_surface_callback: F, ) -> R { @@ -4231,10 +4231,7 @@ impl Surface { .as_any() .downcast_ref::() .unwrap() - .surface_as_hal_mut::( - self.data.downcast_ref().unwrap(), - hal_surface_callback, - ) + .surface_as_hal::(self.data.downcast_ref().unwrap(), hal_surface_callback) } } } @@ -4251,10 +4248,9 @@ impl Adapter { /// Returns a globally-unique identifier for this `Adapter`. /// /// Calling this method multiple times on the same object will always return the same value. - /// The returned value is guaranteed to be unique among all `Adapter`s created from the same - /// `Instance`. + /// The returned value is guaranteed to be different for all resources created from the same `Instance`. #[cfg_attr(docsrs, doc(cfg(feature = "expose-ids")))] - pub fn global_id(&self) -> Id { + pub fn global_id(&self) -> Id { Id(self.id.global_id(), std::marker::PhantomData) } } @@ -4264,10 +4260,9 @@ impl Device { /// Returns a globally-unique identifier for this `Device`. /// /// Calling this method multiple times on the same object will always return the same value. - /// The returned value is guaranteed to be unique among all `Device`s created from the same - /// `Instance`. + /// The returned value is guaranteed to be different for all resources created from the same `Instance`. #[cfg_attr(docsrs, doc(cfg(feature = "expose-ids")))] - pub fn global_id(&self) -> Id { + pub fn global_id(&self) -> Id { Id(self.id.global_id(), std::marker::PhantomData) } } @@ -4277,10 +4272,9 @@ impl Queue { /// Returns a globally-unique identifier for this `Queue`. /// /// Calling this method multiple times on the same object will always return the same value. - /// The returned value is guaranteed to be unique among all `Queue`s created from the same - /// `Instance`. + /// The returned value is guaranteed to be different for all resources created from the same `Instance`. #[cfg_attr(docsrs, doc(cfg(feature = "expose-ids")))] - pub fn global_id(&self) -> Id { + pub fn global_id(&self) -> Id { Id(self.id.global_id(), std::marker::PhantomData) } } @@ -4290,10 +4284,9 @@ impl ShaderModule { /// Returns a globally-unique identifier for this `ShaderModule`. /// /// Calling this method multiple times on the same object will always return the same value. - /// The returned value is guaranteed to be unique among all `ShaderModule`s created from the same - /// `Instance`. + /// The returned value is guaranteed to be different for all resources created from the same `Instance`. #[cfg_attr(docsrs, doc(cfg(feature = "expose-ids")))] - pub fn global_id(&self) -> Id { + pub fn global_id(&self) -> Id { Id(self.id.global_id(), std::marker::PhantomData) } } @@ -4303,10 +4296,9 @@ impl BindGroupLayout { /// Returns a globally-unique identifier for this `BindGroupLayout`. /// /// Calling this method multiple times on the same object will always return the same value. - /// The returned value is guaranteed to be unique among all `BindGroupLayout`s created from the same - /// `Instance`. + /// The returned value is guaranteed to be different for all resources created from the same `Instance`. #[cfg_attr(docsrs, doc(cfg(feature = "expose-ids")))] - pub fn global_id(&self) -> Id { + pub fn global_id(&self) -> Id { Id(self.id.global_id(), std::marker::PhantomData) } } @@ -4316,10 +4308,9 @@ impl BindGroup { /// Returns a globally-unique identifier for this `BindGroup`. /// /// Calling this method multiple times on the same object will always return the same value. - /// The returned value is guaranteed to be unique among all `BindGroup`s created from the same - /// `Instance`. + /// The returned value is guaranteed to be different for all resources created from the same `Instance`. #[cfg_attr(docsrs, doc(cfg(feature = "expose-ids")))] - pub fn global_id(&self) -> Id { + pub fn global_id(&self) -> Id { Id(self.id.global_id(), std::marker::PhantomData) } } @@ -4329,10 +4320,9 @@ impl TextureView { /// Returns a globally-unique identifier for this `TextureView`. /// /// Calling this method multiple times on the same object will always return the same value. - /// The returned value is guaranteed to be unique among all `TextureView`s created from the same - /// `Instance`. + /// The returned value is guaranteed to be different for all resources created from the same `Instance`. #[cfg_attr(docsrs, doc(cfg(feature = "expose-ids")))] - pub fn global_id(&self) -> Id { + pub fn global_id(&self) -> Id { Id(self.id.global_id(), std::marker::PhantomData) } } @@ -4342,10 +4332,9 @@ impl Sampler { /// Returns a globally-unique identifier for this `Sampler`. /// /// Calling this method multiple times on the same object will always return the same value. - /// The returned value is guaranteed to be unique among all `Sampler`s created from the same - /// `Instance`. + /// The returned value is guaranteed to be different for all resources created from the same `Instance`. #[cfg_attr(docsrs, doc(cfg(feature = "expose-ids")))] - pub fn global_id(&self) -> Id { + pub fn global_id(&self) -> Id { Id(self.id.global_id(), std::marker::PhantomData) } } @@ -4355,10 +4344,9 @@ impl Buffer { /// Returns a globally-unique identifier for this `Buffer`. /// /// Calling this method multiple times on the same object will always return the same value. - /// The returned value is guaranteed to be unique among all `Buffer`s created from the same - /// `Instance`. + /// The returned value is guaranteed to be different for all resources created from the same `Instance`. #[cfg_attr(docsrs, doc(cfg(feature = "expose-ids")))] - pub fn global_id(&self) -> Id { + pub fn global_id(&self) -> Id { Id(self.id.global_id(), std::marker::PhantomData) } } @@ -4368,10 +4356,9 @@ impl Texture { /// Returns a globally-unique identifier for this `Texture`. /// /// Calling this method multiple times on the same object will always return the same value. - /// The returned value is guaranteed to be unique among all `Texture`s created from the same - /// `Instance`. + /// The returned value is guaranteed to be different for all resources created from the same `Instance`. #[cfg_attr(docsrs, doc(cfg(feature = "expose-ids")))] - pub fn global_id(&self) -> Id { + pub fn global_id(&self) -> Id { Id(self.id.global_id(), std::marker::PhantomData) } } @@ -4381,10 +4368,9 @@ impl QuerySet { /// Returns a globally-unique identifier for this `QuerySet`. /// /// Calling this method multiple times on the same object will always return the same value. - /// The returned value is guaranteed to be unique among all `QuerySet`s created from the same - /// `Instance`. + /// The returned value is guaranteed to be different for all resources created from the same `Instance`. #[cfg_attr(docsrs, doc(cfg(feature = "expose-ids")))] - pub fn global_id(&self) -> Id { + pub fn global_id(&self) -> Id { Id(self.id.global_id(), std::marker::PhantomData) } } @@ -4394,10 +4380,9 @@ impl PipelineLayout { /// Returns a globally-unique identifier for this `PipelineLayout`. /// /// Calling this method multiple times on the same object will always return the same value. - /// The returned value is guaranteed to be unique among all `PipelineLayout`s created from the same - /// `Instance`. + /// The returned value is guaranteed to be different for all resources created from the same `Instance`. #[cfg_attr(docsrs, doc(cfg(feature = "expose-ids")))] - pub fn global_id(&self) -> Id { + pub fn global_id(&self) -> Id { Id(self.id.global_id(), std::marker::PhantomData) } } @@ -4407,10 +4392,9 @@ impl RenderPipeline { /// Returns a globally-unique identifier for this `RenderPipeline`. /// /// Calling this method multiple times on the same object will always return the same value. - /// The returned value is guaranteed to be unique among all `RenderPipeline`s created from the same - /// `Instance`. + /// The returned value is guaranteed to be different for all resources created from the same `Instance`. #[cfg_attr(docsrs, doc(cfg(feature = "expose-ids")))] - pub fn global_id(&self) -> Id { + pub fn global_id(&self) -> Id { Id(self.id.global_id(), std::marker::PhantomData) } } @@ -4420,10 +4404,9 @@ impl ComputePipeline { /// Returns a globally-unique identifier for this `ComputePipeline`. /// /// Calling this method multiple times on the same object will always return the same value. - /// The returned value is guaranteed to be unique among all `ComputePipeline`s created from the same - /// `Instance`. + /// The returned value is guaranteed to be different for all resources created from the same `Instance`. #[cfg_attr(docsrs, doc(cfg(feature = "expose-ids")))] - pub fn global_id(&self) -> Id { + pub fn global_id(&self) -> Id { Id(self.id.global_id(), std::marker::PhantomData) } } @@ -4433,10 +4416,9 @@ impl RenderBundle { /// Returns a globally-unique identifier for this `RenderBundle`. /// /// Calling this method multiple times on the same object will always return the same value. - /// The returned value is guaranteed to be unique among all `RenderBundle`s created from the same - /// `Instance`. + /// The returned value is guaranteed to be different for all resources created from the same `Instance`. #[cfg_attr(docsrs, doc(cfg(feature = "expose-ids")))] - pub fn global_id(&self) -> Id { + pub fn global_id(&self) -> Id { Id(self.id.global_id(), std::marker::PhantomData) } } @@ -4446,10 +4428,9 @@ impl Surface { /// Returns a globally-unique identifier for this `Surface`. /// /// Calling this method multiple times on the same object will always return the same value. - /// The returned value is guaranteed to be unique among all `Surface`s created from the same - /// `Instance`. + /// The returned value is guaranteed to be different for all resources created from the same `Instance`. #[cfg_attr(docsrs, doc(cfg(feature = "expose-ids")))] - pub fn global_id(&self) -> Id { + pub fn global_id(&self) -> Id { Id(self.id.global_id(), std::marker::PhantomData) } } From d6857262f2eeffe94e059522cf77b79dc3cf7062 Mon Sep 17 00:00:00 2001 From: Mauro Gentile Date: Thu, 30 Mar 2023 20:04:20 +0200 Subject: [PATCH 002/132] Keeping Arc only outside and not inside resource --- wgpu-core/src/binding_model.rs | 23 +++++---------- wgpu-core/src/command/bundle.rs | 4 +-- wgpu-core/src/command/render.rs | 10 +++---- wgpu-core/src/device/device.rs | 48 +++++++++++++++--------------- wgpu-core/src/device/global.rs | 6 ++-- wgpu-core/src/pipeline.rs | 33 ++++++++------------- wgpu-core/src/resource.rs | 52 +++++++++++---------------------- 7 files changed, 70 insertions(+), 106 deletions(-) diff --git a/wgpu-core/src/binding_model.rs b/wgpu-core/src/binding_model.rs index fe63d7081e..8f6717be73 100644 --- a/wgpu-core/src/binding_model.rs +++ b/wgpu-core/src/binding_model.rs @@ -444,7 +444,7 @@ pub(crate) type BindEntryMap = FastHashMap; /// - pipelines with implicit layouts #[derive(Debug)] pub struct BindGroupLayout { - pub(crate) raw: Option>, + pub(crate) raw: Option, pub(crate) device: Arc>, pub(crate) entries: BindEntryMap, pub(crate) count_validator: BindingTypeMaxCountValidator, @@ -455,8 +455,7 @@ pub struct BindGroupLayout { impl Drop for BindGroupLayout { fn drop(&mut self) { - let raw = self.raw.take().unwrap(); - if let Ok(raw) = Arc::try_unwrap(raw) { + if let Some(raw) = self.raw.take() { unsafe { use hal::Device; self.device @@ -465,8 +464,6 @@ impl Drop for BindGroupLayout { .unwrap() .destroy_bind_group_layout(raw); } - } else { - panic!("BindGroupLayout raw cannot be destroyed because is still in use"); } } } @@ -582,7 +579,7 @@ pub struct PipelineLayoutDescriptor<'a> { #[derive(Debug)] pub struct PipelineLayout { - pub(crate) raw: Option>, + pub(crate) raw: Option, pub(crate) device: Arc>, pub(crate) info: ResourceInfo, pub(crate) bind_group_layout_ids: ArrayVec, { hal::MAX_BIND_GROUPS }>, @@ -591,8 +588,7 @@ pub struct PipelineLayout { impl Drop for PipelineLayout { fn drop(&mut self) { - let raw = self.raw.take().unwrap(); - if let Ok(raw) = Arc::try_unwrap(raw) { + if let Some(raw) = self.raw.take() { unsafe { use hal::Device; self.device @@ -601,8 +597,6 @@ impl Drop for PipelineLayout { .unwrap() .destroy_pipeline_layout(raw); } - } else { - panic!("PipelineLayout raw cannot be destroyed because is still in use"); } } } @@ -795,7 +789,7 @@ pub(crate) fn buffer_binding_type_alignment( #[derive(Debug)] pub struct BindGroup { - pub(crate) raw: Option>, + pub(crate) raw: Option, pub(crate) device: Arc>, pub(crate) layout_id: Valid, pub(crate) info: ResourceInfo, @@ -810,15 +804,12 @@ pub struct BindGroup { impl Drop for BindGroup { fn drop(&mut self) { - let raw = self.raw.take().unwrap(); - if let Ok(raw) = Arc::try_unwrap(raw) { + if let Some(raw) = self.raw.take() { unsafe { use hal::Device; self.device.raw.as_ref().unwrap().destroy_bind_group(raw); } - } else { - panic!("BindGroup cannot be destroyed because is still in use"); - } + } } } diff --git a/wgpu-core/src/command/bundle.rs b/wgpu-core/src/command/bundle.rs index a9a004b0d2..46d1c41bbb 100644 --- a/wgpu-core/src/command/bundle.rs +++ b/wgpu-core/src/command/bundle.rs @@ -813,7 +813,7 @@ impl RenderBundle { .as_ref() .ok_or(ExecutionError::DestroyedBuffer(buffer_id))?; let bb = hal::BufferBinding { - buffer: buffer.as_ref(), + buffer, offset, size, }; @@ -832,7 +832,7 @@ impl RenderBundle { .as_ref() .ok_or(ExecutionError::DestroyedBuffer(buffer_id))?; let bb = hal::BufferBinding { - buffer: buffer.as_ref(), + buffer, offset, size, }; diff --git a/wgpu-core/src/command/render.rs b/wgpu-core/src/command/render.rs index 05d44d8063..43b031553d 100644 --- a/wgpu-core/src/command/render.rs +++ b/wgpu-core/src/command/render.rs @@ -928,7 +928,7 @@ impl<'a, A: HalApi> RenderPassInfo<'a, A> { depth_stencil = Some(hal::DepthStencilAttachment { target: hal::Attachment { - view: view.raw.as_ref().unwrap().as_ref(), + view: view.raw.as_ref().unwrap(), usage, }, depth_ops: at.depth.hal_ops(), @@ -1039,7 +1039,7 @@ impl<'a, A: HalApi> RenderPassInfo<'a, A> { .push(resolve_view.to_render_attachment(hal::TextureUses::COLOR_TARGET)); hal_resolve_target = Some(hal::Attachment { - view: resolve_view.raw.as_ref().unwrap().as_ref(), + view: resolve_view.raw.as_ref().unwrap(), usage: hal::TextureUses::COLOR_TARGET, }); } @@ -1167,7 +1167,7 @@ impl<'a, A: HalApi> RenderPassInfo<'a, A> { color_attachments: &[], depth_stencil_attachment: Some(hal::DepthStencilAttachment { target: hal::Attachment { - view: view.raw.as_ref().unwrap().as_ref(), + view: view.raw.as_ref().unwrap(), usage: hal::TextureUses::DEPTH_STENCIL_WRITE, }, depth_ops, @@ -1540,7 +1540,7 @@ impl Global { ); let bb = hal::BufferBinding { - buffer: buf_raw.as_ref(), + buffer: buf_raw, offset, size, }; @@ -1591,7 +1591,7 @@ impl Global { ); let bb = hal::BufferBinding { - buffer: buf_raw.as_ref(), + buffer: buf_raw, offset, size, }; diff --git a/wgpu-core/src/device/device.rs b/wgpu-core/src/device/device.rs index 1b35f3a219..333ea713f1 100644 --- a/wgpu-core/src/device/device.rs +++ b/wgpu-core/src/device/device.rs @@ -480,7 +480,7 @@ impl Device { .map_err(DeviceError::from)?; Ok(Buffer { - raw: Some(Arc::new(buffer)), + raw: Some(buffer), device: self.clone(), usage: desc.usage, size: desc.size, @@ -504,7 +504,7 @@ impl Device { Texture { inner: Some(resource::TextureInner::Native { - raw: Some(Arc::new(hal_texture)), + raw: Some(hal_texture), }), device: self.clone(), desc: desc.map_label(|_| ()), @@ -1011,7 +1011,7 @@ impl Device { }; Ok(TextureView { - raw: Some(Arc::new(raw)), + raw: Some(raw), parent_id: id::Valid(texture_id), device: self.clone(), desc: resource::HalTextureViewDescriptor { @@ -1148,7 +1148,7 @@ impl Device { .map_err(DeviceError::from)? }; Ok(Sampler { - raw: Some(Arc::new(raw)), + raw: Some(raw), device: self.clone(), info: ResourceInfo::new(desc.label.borrow_or_default()), comparison: desc.compare.is_some(), @@ -1286,7 +1286,7 @@ impl Device { }; Ok(pipeline::ShaderModule { - raw: Some(Arc::new(raw)), + raw: Some(raw), device: self.clone(), interface: Some(interface), info: ResourceInfo::new(desc.label.borrow_or_default()), @@ -1328,7 +1328,7 @@ impl Device { }; Ok(pipeline::ShaderModule { - raw: Some(Arc::new(raw)), + raw: Some(raw), device: self.clone(), interface: None, info: ResourceInfo::new(desc.label.borrow_or_default()), @@ -1577,7 +1577,7 @@ impl Device { .map_err(binding_model::CreateBindGroupLayoutError::TooManyBindings)?; Ok(binding_model::BindGroupLayout { - raw: Some(Arc::new(raw)), + raw: Some(raw), device: self.clone(), count_validator, entries: entry_map, @@ -1708,7 +1708,7 @@ impl Device { )); Ok(hal::BufferBinding { - buffer: raw_buffer.as_ref(), + buffer: raw_buffer, offset: bb.offset, size: bb.size, }) @@ -1918,7 +1918,7 @@ impl Device { )?; let res_index = hal_textures.len(); hal_textures.push(hal::TextureBinding { - view: view.raw.as_ref().unwrap().as_ref(), + view: view.raw.as_ref().unwrap(), usage: internal_use, }); (res_index, 1) @@ -1945,7 +1945,7 @@ impl Device { &mut used_texture_ranges, )?; hal_textures.push(hal::TextureBinding { - view: view.raw.as_ref().unwrap().as_ref(), + view: view.raw.as_ref().unwrap(), usage: internal_use, }); } @@ -1971,11 +1971,11 @@ impl Device { } let samplers = hal_samplers .iter() - .map(|&s| s.as_ref().unwrap().as_ref()) + .map(|&s| s.as_ref().unwrap()) .collect::>(); let hal_desc = hal::BindGroupDescriptor { label: desc.label.borrow_option(), - layout: layout.raw.as_ref().unwrap().as_ref(), + layout: layout.raw.as_ref().unwrap(), entries: &hal_entries, buffers: &hal_buffers, samplers: samplers.as_ref(), @@ -1990,7 +1990,7 @@ impl Device { }; Ok(binding_model::BindGroup { - raw: Some(Arc::new(raw)), + raw: Some(raw), device: self.clone(), layout_id: id::Valid(desc.layout), info: ResourceInfo::new(desc.label.borrow_or_default()), @@ -2240,7 +2240,7 @@ impl Device { let bgl_vec = desc .bind_group_layouts .iter() - .map(|&id| bgl_guard.get(id).unwrap().raw.as_ref().unwrap().as_ref()) + .map(|&id| bgl_guard.get(id).unwrap().raw.as_ref().unwrap()) .collect::>(); let hal_desc = hal::PipelineLayoutDescriptor { label: desc.label.borrow_option(), @@ -2258,7 +2258,7 @@ impl Device { }; Ok(binding_model::PipelineLayout { - raw: Some(Arc::new(raw)), + raw: Some(raw), device: self.clone(), info: ResourceInfo::new(desc.label.borrow_or_default()), bind_group_layout_ids: desc @@ -2398,10 +2398,10 @@ impl Device { let pipeline_desc = hal::ComputePipelineDescriptor { label: desc.label.borrow_option(), - layout: layout.raw.as_ref().unwrap().as_ref(), + layout: layout.raw.as_ref().unwrap(), stage: hal::ProgrammableStage { entry_point: desc.stage.entry_point.as_ref(), - module: shader_module.raw.as_ref().unwrap().as_ref(), + module: shader_module.raw.as_ref().unwrap(), }, }; @@ -2424,7 +2424,7 @@ impl Device { })?; let pipeline = pipeline::ComputePipeline { - raw: Some(Arc::new(raw)), + raw: Some(raw), layout_id: id::Valid(pipeline_layout_id), device: self.clone(), late_sized_buffer_groups, @@ -2743,7 +2743,7 @@ impl Device { } hal::ProgrammableStage { - module: shader_module.raw.as_ref().unwrap().as_ref(), + module: shader_module.raw.as_ref().unwrap(), entry_point: stage.entry_point.as_ref(), } }; @@ -2792,7 +2792,7 @@ impl Device { } Some(hal::ProgrammableStage { - module: shader_module.raw.as_ref().unwrap().as_ref(), + module: shader_module.raw.as_ref().unwrap(), entry_point: fragment.stage.entry_point.as_ref(), }) } @@ -2874,7 +2874,7 @@ impl Device { let pipeline_desc = hal::RenderPipelineDescriptor { label: desc.label.borrow_option(), - layout: layout.raw.as_ref().unwrap().as_ref(), + layout: layout.raw.as_ref().unwrap(), vertex_buffers: &vertex_buffers, vertex_stage, primitive: desc.primitive, @@ -2939,7 +2939,7 @@ impl Device { } let pipeline = pipeline::RenderPipeline { - raw: Some(Arc::new(raw)), + raw: Some(raw), layout_id: id::Valid(pipeline_layout_id), device: self.clone(), pass_context, @@ -3034,13 +3034,13 @@ impl Device { let hal_desc = desc.map_label(crate::LabelHelpers::borrow_option); Ok(QuerySet { - raw: Some(Arc::new(unsafe { + raw: Some(unsafe { self.raw .as_ref() .unwrap() .create_query_set(&hal_desc) .unwrap() - })), + }), device: self.clone(), info: ResourceInfo::new(""), desc: desc.map_label(|_| ()), diff --git a/wgpu-core/src/device/global.rs b/wgpu-core/src/device/global.rs index cb2a76681c..ea3c3f8ab1 100644 --- a/wgpu-core/src/device/global.rs +++ b/wgpu-core/src/device/global.rs @@ -2469,11 +2469,11 @@ impl Global { size, }); let transition_src = hal::BufferBarrier { - buffer: stage_buffer.raw.as_ref().unwrap().as_ref(), + buffer: stage_buffer.raw.as_ref().unwrap(), usage: hal::BufferUses::MAP_WRITE..hal::BufferUses::COPY_SRC, }; let transition_dst = hal::BufferBarrier { - buffer: raw_buf.as_ref(), + buffer: raw_buf, usage: hal::BufferUses::empty()..hal::BufferUses::COPY_DST, }; let mut pending_writes = device.pending_writes.lock(); @@ -2485,7 +2485,7 @@ impl Global { ); if buffer.size > 0 { encoder.copy_buffer_to_buffer( - stage_buffer.raw.as_ref().unwrap().as_ref(), + stage_buffer.raw.as_ref().unwrap(), raw_buf, region.into_iter(), ); diff --git a/wgpu-core/src/pipeline.rs b/wgpu-core/src/pipeline.rs index f3f1133df0..aeb7216f6f 100644 --- a/wgpu-core/src/pipeline.rs +++ b/wgpu-core/src/pipeline.rs @@ -42,7 +42,7 @@ pub struct ShaderModuleDescriptor<'a> { #[derive(Debug)] pub struct ShaderModule { - pub(crate) raw: Option>, + pub(crate) raw: Option, pub(crate) device: Arc>, pub(crate) interface: Option, pub(crate) info: ResourceInfo, @@ -52,20 +52,17 @@ pub struct ShaderModule { impl Drop for ShaderModule { fn drop(&mut self) { - #[cfg(feature = "trace")] - if let Some(ref trace) = self.device.trace { - trace - .lock() - .add(trace::Action::DestroyShaderModule(self.info.id())); - } - let raw = self.raw.take().unwrap(); - if let Ok(raw) = Arc::try_unwrap(raw) { + if let Some(raw) = self.raw.take() { + #[cfg(feature = "trace")] + if let Some(ref trace) = self.device.trace { + trace + .lock() + .add(trace::Action::DestroyShaderModule(self.info.id())); + } unsafe { use hal::Device; self.device.raw.as_ref().unwrap().destroy_shader_module(raw); } - } else { - panic!("ShaderModule raw cannot be destroyed because is still in use"); } } } @@ -232,7 +229,7 @@ pub enum CreateComputePipelineError { #[derive(Debug)] pub struct ComputePipeline { - pub(crate) raw: Option>, + pub(crate) raw: Option, pub(crate) layout_id: Valid, pub(crate) device: Arc>, pub(crate) late_sized_buffer_groups: ArrayVec, @@ -241,8 +238,7 @@ pub struct ComputePipeline { impl Drop for ComputePipeline { fn drop(&mut self) { - let raw = self.raw.take().unwrap(); - if let Ok(raw) = Arc::try_unwrap(raw) { + if let Some(raw) = self.raw.take() { unsafe { use hal::Device; self.device @@ -251,8 +247,6 @@ impl Drop for ComputePipeline { .unwrap() .destroy_compute_pipeline(raw); } - } else { - panic!("ComputePipeline raw cannot be destroyed because is still in use"); } } } @@ -451,7 +445,7 @@ impl Default for VertexStep { #[derive(Debug)] pub struct RenderPipeline { - pub(crate) raw: Option>, + pub(crate) raw: Option, pub(crate) layout_id: Valid, pub(crate) device: Arc>, pub(crate) pass_context: RenderPassContext, @@ -464,8 +458,7 @@ pub struct RenderPipeline { impl Drop for RenderPipeline { fn drop(&mut self) { - let raw = self.raw.take().unwrap(); - if let Ok(raw) = Arc::try_unwrap(raw) { + if let Some(raw) = self.raw.take() { unsafe { use hal::Device; self.device @@ -474,8 +467,6 @@ impl Drop for RenderPipeline { .unwrap() .destroy_render_pipeline(raw); } - } else { - panic!("RenderPipeline raw cannot be destroyed because is still in use"); } } } diff --git a/wgpu-core/src/resource.rs b/wgpu-core/src/resource.rs index c7f2006731..442f061cef 100644 --- a/wgpu-core/src/resource.rs +++ b/wgpu-core/src/resource.rs @@ -339,7 +339,7 @@ pub type BufferDescriptor<'a> = wgt::BufferDescriptor>; #[derive(Debug)] pub struct Buffer { - pub(crate) raw: Option>, + pub(crate) raw: Option, pub(crate) device: Arc>, pub(crate) usage: wgt::BufferUsages, pub(crate) size: wgt::BufferAddress, @@ -351,14 +351,10 @@ pub struct Buffer { impl Drop for Buffer { fn drop(&mut self) { - if let Some(buffer) = self.raw.take() { - if let Ok(raw) = Arc::try_unwrap(buffer) { - unsafe { - use hal::Device; - self.device.raw.as_ref().unwrap().destroy_buffer(raw); - } - } else { - panic!("Buffer cannot be destroyed because is still in use"); + if let Some(raw) = self.raw.take() { + unsafe { + use hal::Device; + self.device.raw.as_ref().unwrap().destroy_buffer(raw); } } } @@ -434,7 +430,7 @@ pub type TextureDescriptor<'a> = wgt::TextureDescriptor, Vec { Native { - raw: Option>, + raw: Option, }, Surface { raw: A::SurfaceTexture, @@ -488,13 +484,8 @@ impl Drop for Texture { } if let Some(inner) = self.inner.take() { if let TextureInner::Native { raw: Some(raw) } = inner { - let raw = Arc::try_unwrap(raw); unsafe { - self.device - .raw - .as_ref() - .unwrap() - .destroy_texture(raw.unwrap()); + self.device.raw.as_ref().unwrap().destroy_texture(raw); } } } @@ -747,7 +738,7 @@ pub enum TextureViewNotRenderableReason { #[derive(Debug)] pub struct TextureView { - pub(crate) raw: Option>, + pub(crate) raw: Option, // The parent's refcount is held alive, but the parent may still be deleted // if it's a surface texture. TODO: make this cleaner. pub(crate) parent_id: Valid, @@ -764,14 +755,11 @@ pub struct TextureView { impl Drop for TextureView { fn drop(&mut self) { - use hal::Device; - let raw = Arc::try_unwrap(self.raw.take().unwrap()); - unsafe { - self.device - .raw - .as_ref() - .unwrap() - .destroy_texture_view(raw.unwrap()); + if let Some(raw) = self.raw.take() { + unsafe { + use hal::Device; + self.device.raw.as_ref().unwrap().destroy_texture_view(raw); + } } } } @@ -865,7 +853,7 @@ pub struct SamplerDescriptor<'a> { #[derive(Debug)] pub struct Sampler { - pub(crate) raw: Option>, + pub(crate) raw: Option, pub(crate) device: Arc>, pub(crate) info: ResourceInfo, /// `true` if this is a comparison sampler @@ -876,14 +864,11 @@ pub struct Sampler { impl Drop for Sampler { fn drop(&mut self) { - let raw = self.raw.take().unwrap(); - if let Ok(raw) = Arc::try_unwrap(raw) { + if let Some(raw) = self.raw.take() { unsafe { use hal::Device; self.device.raw.as_ref().unwrap().destroy_sampler(raw); } - } else { - panic!("Sampler raw cannot be destroyed because is still in use"); } } } @@ -955,7 +940,7 @@ pub type QuerySetDescriptor<'a> = wgt::QuerySetDescriptor>; #[derive(Debug)] pub struct QuerySet { - pub(crate) raw: Option>, + pub(crate) raw: Option, pub(crate) device: Arc>, pub(crate) info: ResourceInfo, pub(crate) desc: wgt::QuerySetDescriptor<()>, @@ -963,14 +948,11 @@ pub struct QuerySet { impl Drop for QuerySet { fn drop(&mut self) { - let raw = self.raw.take().unwrap(); - if let Ok(raw) = Arc::try_unwrap(raw) { + if let Some(raw) = self.raw.take() { unsafe { use hal::Device; self.device.raw.as_ref().unwrap().destroy_query_set(raw); } - } else { - panic!("QuerySet raw cannot be destroyed because is still in use"); } } } From 44413ca5a4cc69fd1ea90e59498e50f0dcfab109 Mon Sep 17 00:00:00 2001 From: Mauro Gentile Date: Sat, 1 Apr 2023 18:08:54 +0200 Subject: [PATCH 003/132] Fixing clippy errors --- deno_webgpu/lib.rs | 7 +- deno_webgpu/texture.rs | 2 +- player/src/lib.rs | 6 +- wgpu-core/src/binding_model.rs | 2 +- wgpu-core/src/command/clear.rs | 6 +- wgpu-core/src/command/compute.rs | 25 +- wgpu-core/src/command/memory_init.rs | 2 +- wgpu-core/src/command/mod.rs | 43 +-- wgpu-core/src/command/query.rs | 18 +- wgpu-core/src/command/render.rs | 21 +- wgpu-core/src/command/transfer.rs | 78 +++--- wgpu-core/src/device/global.rs | 260 +++++++++--------- wgpu-core/src/device/life.rs | 143 ++++------ wgpu-core/src/device/mod.rs | 4 +- wgpu-core/src/device/queue.rs | 35 ++- .../src/device/{device.rs => resource.rs} | 22 +- wgpu-core/src/device/trace.rs | 6 +- wgpu-core/src/global.rs | 12 +- wgpu-core/src/instance.rs | 3 +- wgpu-core/src/pipeline.rs | 8 +- wgpu-core/src/present.rs | 25 +- wgpu-core/src/registry.rs | 7 +- wgpu-core/src/resource.rs | 27 +- wgpu-core/src/track/buffer.rs | 10 +- wgpu-core/src/track/metadata.rs | 2 +- wgpu-core/src/track/stateless.rs | 8 +- wgpu-core/src/track/texture.rs | 44 +-- wgpu-hal/examples/halmark/main.rs | 6 +- wgpu-hal/examples/raw-gles.rs | 2 +- wgpu/src/backend/direct.rs | 7 +- 30 files changed, 398 insertions(+), 443 deletions(-) rename wgpu-core/src/device/{device.rs => resource.rs} (99%) diff --git a/deno_webgpu/lib.rs b/deno_webgpu/lib.rs index 46b9d8d14f..1c9beb1589 100644 --- a/deno_webgpu/lib.rs +++ b/deno_webgpu/lib.rs @@ -94,7 +94,8 @@ fn check_unstable(state: &OpState, api_name: &str) { } } -pub type Instance = std::sync::Arc>; +pub type Instance = + std::sync::Arc>; struct WebGpuAdapter(Instance, wgpu_core::id::AdapterId); impl Resource for WebGpuAdapter { @@ -318,9 +319,9 @@ pub async fn op_webgpu_request_adapter( let instance = if let Some(instance) = state.try_borrow::() { instance } else { - state.put(std::sync::Arc::new(wgpu_core::hub::Global::new( + state.put(std::sync::Arc::new(wgpu_core::global::Global::new( "webgpu", - wgpu_core::hub::IdentityManagerFactory, + wgpu_core::identity::IdentityManagerFactory, wgpu_types::InstanceDescriptor { backends, dx12_shader_compiler: wgpu_types::Dx12Compiler::Fxc, diff --git a/deno_webgpu/texture.rs b/deno_webgpu/texture.rs index 92c8457071..5dfdaa848d 100644 --- a/deno_webgpu/texture.rs +++ b/deno_webgpu/texture.rs @@ -83,7 +83,7 @@ pub fn op_webgpu_create_texture( let (val, maybe_err) = gfx_select!(device => instance.device_create_texture( device, &descriptor, - () + (), Some(()) )); let rid = state.resource_table.add(WebGpuTexture { diff --git a/player/src/lib.rs b/player/src/lib.rs index 8702b982ec..dfd8dd7fe5 100644 --- a/player/src/lib.rs +++ b/player/src/lib.rs @@ -36,7 +36,7 @@ impl wgc::identity::IdentityHandlerFactory< IdentityPassThrough(PhantomData) } } -impl wgc::global::GlobalIdentityHandlerFactory for IdentityPassThroughFactory {} +impl wgc::identity::GlobalIdentityHandlerFactory for IdentityPassThroughFactory {} pub trait GlobalPlay { fn encode_commands( @@ -180,9 +180,9 @@ impl GlobalPlay for wgc::global::Global { Action::DestroyBuffer(id) => { self.buffer_drop::(id, true); } - Action::CreateTexture(id, desc) => { + Action::CreateTexture(id, tv_id, desc) => { self.device_maintain_ids::(device).unwrap(); - let (_, error) = self.device_create_texture::(device, &desc, id); + let (_, error) = self.device_create_texture::(device, &desc, id, tv_id); if let Some(e) = error { panic!("{:?}", e); } diff --git a/wgpu-core/src/binding_model.rs b/wgpu-core/src/binding_model.rs index 8f6717be73..9720e235e4 100644 --- a/wgpu-core/src/binding_model.rs +++ b/wgpu-core/src/binding_model.rs @@ -809,7 +809,7 @@ impl Drop for BindGroup { use hal::Device; self.device.raw.as_ref().unwrap().destroy_bind_group(raw); } - } + } } } diff --git a/wgpu-core/src/command/clear.rs b/wgpu-core/src/command/clear.rs index 1ba02ef255..d0ca562a60 100644 --- a/wgpu-core/src/command/clear.rs +++ b/wgpu-core/src/command/clear.rs @@ -79,8 +79,7 @@ impl Global { let hub = A::hub(self); - let cmd_buf_guard = hub.command_buffers.read(); - let cmd_buf = CommandBuffer::get_encoder(&*cmd_buf_guard, command_encoder_id) + let cmd_buf = CommandBuffer::get_encoder(hub, command_encoder_id) .map_err(|_| ClearError::InvalidCommandEncoder(command_encoder_id))?; let mut cmd_buf_data = cmd_buf.data.lock(); let cmd_buf_data = cmd_buf_data.as_mut().unwrap(); @@ -159,8 +158,7 @@ impl Global { let hub = A::hub(self); - let cmd_buf_guard = hub.command_buffers.read(); - let cmd_buf = CommandBuffer::get_encoder(&*cmd_buf_guard, command_encoder_id) + let cmd_buf = CommandBuffer::get_encoder(hub, command_encoder_id) .map_err(|_| ClearError::InvalidCommandEncoder(command_encoder_id))?; let mut cmd_buf_data = cmd_buf.data.lock(); let cmd_buf_data = cmd_buf_data.as_mut().unwrap(); diff --git a/wgpu-core/src/command/compute.rs b/wgpu-core/src/command/compute.rs index 1370a9dcbb..ed95eab7b8 100644 --- a/wgpu-core/src/command/compute.rs +++ b/wgpu-core/src/command/compute.rs @@ -337,20 +337,9 @@ impl Global { let hub = A::hub(self); - let cmd_buf_guard = hub.command_buffers.read(); - // Spell out the type, to placate rust-analyzer. - // https://github.com/rust-lang/rust-analyzer/issues/12247 - let cmd_buf: &CommandBuffer = - CommandBuffer::get_encoder(&*cmd_buf_guard, encoder_id).map_pass_err(init_scope)?; + let cmd_buf = CommandBuffer::get_encoder(hub, encoder_id).map_pass_err(init_scope)?; let mut cmd_buf_data = cmd_buf.data.lock(); let cmd_buf_data = cmd_buf_data.as_mut().unwrap(); - // will be reset to true if recording is done without errors - let (encoder, status, tracker, buffer_memory_init_actions, texture_memory_actions) = - cmd_buf_data.raw_mut(); - - *status = CommandEncoderStatus::Error; - let raw = encoder.open(); - let device = &cmd_buf.device; #[cfg(feature = "trace")] if let Some(ref mut list) = cmd_buf_data.commands { @@ -359,6 +348,14 @@ impl Global { }); } + // will be reset to true if recording is done without errors + let (encoder, status, tracker, buffer_memory_init_actions, texture_memory_actions) = + cmd_buf_data.raw_mut(); + + *status = CommandEncoderStatus::Error; + let raw = encoder.open(); + let device = &cmd_buf.device; + let pipeline_layout_guard = hub.pipeline_layouts.read(); let bind_group_guard = hub.bind_groups.read(); let pipeline_guard = hub.compute_pipelines.read(); @@ -591,7 +588,7 @@ impl Global { raw, &texture_guard, &mut tracker.textures, - &device, + device, ); state.is_ready().map_pass_err(scope)?; @@ -780,7 +777,7 @@ impl Global { raw, &texture_guard, &mut tracker.textures, - &device, + device, ); Ok(()) diff --git a/wgpu-core/src/command/memory_init.rs b/wgpu-core/src/command/memory_init.rs index 75ffcfcbdd..88d2da14b4 100644 --- a/wgpu-core/src/command/memory_init.rs +++ b/wgpu-core/src/command/memory_init.rs @@ -233,7 +233,7 @@ impl BakedCommands { unsafe { self.encoder.transition_buffers( transition - .map(|pending| pending.into_hal(&buffer)) + .map(|pending| pending.into_hal(buffer)) .into_iter(), ); } diff --git a/wgpu-core/src/command/mod.rs b/wgpu-core/src/command/mod.rs index e6f9364913..1571b7938c 100644 --- a/wgpu-core/src/command/mod.rs +++ b/wgpu-core/src/command/mod.rs @@ -20,7 +20,9 @@ use self::memory_init::CommandBufferTextureMemoryActions; use crate::device::Device; use crate::error::{ErrorFormatter, PrettyError}; +use crate::hub::Hub; use crate::id::CommandBufferId; + use crate::init_tracker::BufferInitTrackerAction; use crate::resource::{Resource, ResourceInfo}; use crate::track::{Tracker, UsageScope}; @@ -153,7 +155,7 @@ impl Drop for CommandBuffer { if self.data.lock().is_none() { return; } - let mut baked = self.into_baked(); + let mut baked = self.extract_baked_commands(); unsafe { baked.encoder.reset_all(baked.list.into_iter()); } @@ -249,11 +251,11 @@ impl CommandBuffer { let buffer_barriers = base.buffers.drain().map(|pending| { let buf = unsafe { buffer_guard.get_unchecked(pending.id) }; - pending.into_hal(&buf) + pending.into_hal(buf) }); let texture_barriers = base.textures.drain().map(|pending| { let tex = unsafe { texture_guard.get_unchecked(pending.id) }; - pending.into_hal(&tex) + pending.into_hal(tex) }); unsafe { @@ -264,13 +266,17 @@ impl CommandBuffer { } impl CommandBuffer { - fn get_encoder( - storage: &Storage, + fn get_encoder( + hub: &Hub, id: id::CommandEncoderId, - ) -> Result<&Self, CommandEncoderError> { + ) -> Result, CommandEncoderError> + where + G: GlobalIdentityHandlerFactory, + { + let storage = hub.command_buffers.read(); match storage.get(id) { Ok(cmd_buf) => match cmd_buf.data.lock().as_ref().unwrap().status { - CommandEncoderStatus::Recording => Ok(&cmd_buf), + CommandEncoderStatus::Recording => Ok(cmd_buf.clone()), CommandEncoderStatus::Finished => Err(CommandEncoderError::NotRecording), CommandEncoderStatus::Error => Err(CommandEncoderError::Invalid), }, @@ -285,7 +291,7 @@ impl CommandBuffer { } } - pub(crate) fn into_baked(&mut self) -> BakedCommands { + pub(crate) fn extract_baked_commands(&mut self) -> BakedCommands { let data = self.data.lock().take().unwrap(); BakedCommands { encoder: data.encoder.raw, @@ -298,7 +304,7 @@ impl CommandBuffer { pub(crate) fn from_arc_into_baked(self: Arc) -> BakedCommands { if let Ok(mut command_buffer) = Arc::try_unwrap(self) { - command_buffer.into_baked() + command_buffer.extract_baked_commands() } else { panic!("CommandBuffer cannot be destroyed because is still in use"); } @@ -313,11 +319,11 @@ impl Resource for CommandBuffer { } fn label(&self) -> String { - if let Some(label) = &self.data.lock().as_ref().unwrap().encoder.label { - label.clone() - } else { - String::new() - } + let str = match self.data.lock().as_ref().unwrap().encoder.label.as_ref() { + Some(label) => label.clone(), + _ => String::new(), + }; + str } } @@ -462,8 +468,7 @@ impl Global { let hub = A::hub(self); - let cmd_buf_guard = hub.command_buffers.read(); - let cmd_buf = CommandBuffer::get_encoder(&*cmd_buf_guard, encoder_id)?; + let cmd_buf = CommandBuffer::get_encoder(hub, encoder_id)?; let mut cmd_buf_data = cmd_buf.data.lock(); let cmd_buf_data = cmd_buf_data.as_mut().unwrap(); #[cfg(feature = "trace")] @@ -487,8 +492,7 @@ impl Global { let hub = A::hub(self); - let cmd_buf_guard = hub.command_buffers.read(); - let cmd_buf = CommandBuffer::get_encoder(&*cmd_buf_guard, encoder_id)?; + let cmd_buf = CommandBuffer::get_encoder(hub, encoder_id)?; let mut cmd_buf_data = cmd_buf.data.lock(); let cmd_buf_data = cmd_buf_data.as_mut().unwrap(); @@ -512,8 +516,7 @@ impl Global { let hub = A::hub(self); - let cmd_buf_guard = hub.command_buffers.read(); - let cmd_buf = CommandBuffer::get_encoder(&*cmd_buf_guard, encoder_id)?; + let cmd_buf = CommandBuffer::get_encoder(hub, encoder_id)?; let mut cmd_buf_data = cmd_buf.data.lock(); let cmd_buf_data = cmd_buf_data.as_mut().unwrap(); diff --git a/wgpu-core/src/command/query.rs b/wgpu-core/src/command/query.rs index 264b17f12e..7d05365353 100644 --- a/wgpu-core/src/command/query.rs +++ b/wgpu-core/src/command/query.rs @@ -302,13 +302,9 @@ impl Global { ) -> Result<(), QueryError> { let hub = A::hub(self); - let cmd_buf_guard = hub.command_buffers.read(); - - let cmd_buf = CommandBuffer::get_encoder(&cmd_buf_guard, command_encoder_id)?; + let cmd_buf = CommandBuffer::get_encoder(hub, command_encoder_id)?; let mut cmd_buf_data = cmd_buf.data.lock(); let cmd_buf_data = cmd_buf_data.as_mut().unwrap(); - let (encoder, _, tracker, _, _) = cmd_buf_data.raw_mut(); - let raw_encoder = encoder.open(); #[cfg(feature = "trace")] if let Some(ref mut list) = cmd_buf_data.commands { @@ -318,6 +314,9 @@ impl Global { }); } + let (encoder, _, tracker, _, _) = cmd_buf_data.raw_mut(); + let raw_encoder = encoder.open(); + let query_set_guard = hub.query_sets.read(); let query_set = tracker .query_sets @@ -340,13 +339,9 @@ impl Global { ) -> Result<(), QueryError> { let hub = A::hub(self); - let cmd_buf_guard = hub.command_buffers.read(); - - let cmd_buf = CommandBuffer::get_encoder(&cmd_buf_guard, command_encoder_id)?; + let cmd_buf = CommandBuffer::get_encoder(hub, command_encoder_id)?; let mut cmd_buf_data = cmd_buf.data.lock(); let cmd_buf_data = cmd_buf_data.as_mut().unwrap(); - let (encoder, _, tracker, buffer_memory_init_actions, _) = cmd_buf_data.raw_mut(); - let raw_encoder = encoder.open(); #[cfg(feature = "trace")] if let Some(ref mut list) = cmd_buf_data.commands { @@ -359,6 +354,9 @@ impl Global { }); } + let (encoder, _, tracker, buffer_memory_init_actions, _) = cmd_buf_data.raw_mut(); + let raw_encoder = encoder.open(); + if destination_offset % wgt::QUERY_RESOLVE_BUFFER_ALIGNMENT != 0 { return Err(QueryError::Resolve(ResolveError::BufferOffsetAlignment)); } diff --git a/wgpu-core/src/command/render.rs b/wgpu-core/src/command/render.rs index 43b031553d..ca6787881f 100644 --- a/wgpu-core/src/command/render.rs +++ b/wgpu-core/src/command/render.rs @@ -1216,20 +1216,9 @@ impl Global { let hub = A::hub(self); let (scope, query_reset_state, pending_discard_init_fixups) = { - let cmb_guard = hub.command_buffers.read(); - - // Spell out the type, to placate rust-analyzer. - // https://github.com/rust-lang/rust-analyzer/issues/12247 - let cmd_buf: &CommandBuffer = - CommandBuffer::get_encoder(&*cmb_guard, encoder_id).map_pass_err(init_scope)?; + let cmd_buf = CommandBuffer::get_encoder(hub, encoder_id).map_pass_err(init_scope)?; let mut cmd_buf_data = cmd_buf.data.lock(); let cmd_buf_data = cmd_buf_data.as_mut().unwrap(); - let (encoder, status, tracker, buffer_memory_init_actions, texture_memory_actions) = - cmd_buf_data.raw_mut(); - // close everything while the new command encoder is filled - encoder.close(); - // will be reset to true if recording is done without errors - *status = CommandEncoderStatus::Error; #[cfg(feature = "trace")] if let Some(ref mut list) = cmd_buf_data.commands { @@ -1240,6 +1229,12 @@ impl Global { }); } + let (encoder, status, tracker, buffer_memory_init_actions, texture_memory_actions) = + cmd_buf_data.raw_mut(); + // close everything while the new command encoder is filled + encoder.close(); + // will be reset to true if recording is done without errors + *status = CommandEncoderStatus::Error; let device = &cmd_buf.device; encoder.open_pass(base.label); @@ -1258,7 +1253,7 @@ impl Global { ); let mut info = RenderPassInfo::start( - &device, + device, base.label, color_attachments, depth_stencil_attachment, diff --git a/wgpu-core/src/command/transfer.rs b/wgpu-core/src/command/transfer.rs index 9e584d3510..72189e9f1e 100644 --- a/wgpu-core/src/command/transfer.rs +++ b/wgpu-core/src/command/transfer.rs @@ -570,8 +570,7 @@ impl Global { } let hub = A::hub(self); - let cmd_buf_guard = hub.command_buffers.read(); - let cmd_buf = CommandBuffer::get_encoder(&*cmd_buf_guard, command_encoder_id)?; + let cmd_buf = CommandBuffer::get_encoder(hub, command_encoder_id)?; let mut cmd_buf_data = cmd_buf.data.lock(); let cmd_buf_data = cmd_buf_data.as_mut().unwrap(); let buffer_guard = hub.buffers.read(); @@ -714,16 +713,9 @@ impl Global { let hub = A::hub(self); - let cmd_buf_guard = hub.command_buffers.read(); - let cmd_buf = CommandBuffer::get_encoder(&*cmd_buf_guard, command_encoder_id)?; + let cmd_buf = CommandBuffer::get_encoder(hub, command_encoder_id)?; let mut cmd_buf_data = cmd_buf.data.lock(); let cmd_buf_data = cmd_buf_data.as_mut().unwrap(); - let (encoder, _, tracker, buffer_memory_init_actions, texture_memory_actions) = - cmd_buf_data.raw_mut(); - let buffer_guard = hub.buffers.read(); - let texture_guard = hub.textures.read(); - - let device = &cmd_buf.device; #[cfg(feature = "trace")] if let Some(ref mut list) = cmd_buf_data.commands { @@ -733,6 +725,12 @@ impl Global { size: *copy_size, }); } + let (encoder, _, tracker, buffer_memory_init_actions, texture_memory_actions) = + cmd_buf_data.raw_mut(); + let buffer_guard = hub.buffers.read(); + let texture_guard = hub.textures.read(); + + let device = &cmd_buf.device; if copy_size.width == 0 || copy_size.height == 0 || copy_size.depth_or_array_layers == 0 { log::trace!("Ignoring copy_buffer_to_texture of size 0"); @@ -750,7 +748,7 @@ impl Global { copy_size, )?; - let (dst_range, dst_base) = extract_texture_selector(destination, copy_size, &dst_texture)?; + let (dst_range, dst_base) = extract_texture_selector(destination, copy_size, dst_texture)?; // Handle texture init *before* dealing with barrier transitions so we // have an easier time inserting "immediate-inits" that may be required @@ -759,7 +757,7 @@ impl Global { encoder, tracker, texture_memory_actions, - &device, + device, destination, copy_size, &texture_guard, @@ -781,7 +779,7 @@ impl Global { let dst_pending = tracker .textures .set_single( - &dst_texture, + dst_texture, destination.texture, dst_range, hal::TextureUses::COPY_DST, @@ -798,7 +796,7 @@ impl Global { TransferError::MissingCopyDstUsageFlag(None, Some(destination.texture)).into(), ); } - let dst_barrier = dst_pending.map(|pending| pending.into_hal(&dst_texture)); + let dst_barrier = dst_pending.map(|pending| pending.into_hal(dst_texture)); if !dst_base.aspect.is_one() { return Err(TransferError::CopyAspectNotOne.into()); @@ -866,17 +864,9 @@ impl Global { let hub = A::hub(self); - let cmd_buf_guard = hub.command_buffers.read(); - let cmd_buf = CommandBuffer::get_encoder(&*cmd_buf_guard, command_encoder_id)?; + let cmd_buf = CommandBuffer::get_encoder(hub, command_encoder_id)?; let mut cmd_buf_data = cmd_buf.data.lock(); let cmd_buf_data = cmd_buf_data.as_mut().unwrap(); - let (encoder, _, tracker, buffer_memory_init_actions, texture_memory_actions) = - cmd_buf_data.raw_mut(); - - let buffer_guard = hub.buffers.read(); - let texture_guard = hub.textures.read(); - - let device = &cmd_buf.device; #[cfg(feature = "trace")] if let Some(ref mut list) = cmd_buf_data.commands { @@ -886,6 +876,13 @@ impl Global { size: *copy_size, }); } + let (encoder, _, tracker, buffer_memory_init_actions, texture_memory_actions) = + cmd_buf_data.raw_mut(); + + let buffer_guard = hub.buffers.read(); + let texture_guard = hub.textures.read(); + + let device = &cmd_buf.device; if copy_size.width == 0 || copy_size.height == 0 || copy_size.depth_or_array_layers == 0 { log::trace!("Ignoring copy_texture_to_buffer of size 0"); @@ -899,7 +896,7 @@ impl Global { let (hal_copy_size, array_layer_count) = validate_texture_copy_range(source, &src_texture.desc, CopySide::Source, copy_size)?; - let (src_range, src_base) = extract_texture_selector(source, copy_size, &src_texture)?; + let (src_range, src_base) = extract_texture_selector(source, copy_size, src_texture)?; // Handle texture init *before* dealing with barrier transitions so we // have an easier time inserting "immediate-inits" that may be required @@ -908,7 +905,7 @@ impl Global { encoder, tracker, texture_memory_actions, - &device, + device, source, copy_size, &texture_guard, @@ -917,7 +914,7 @@ impl Global { let src_pending = tracker .textures .set_single( - &src_texture, + src_texture, source.texture, src_range, hal::TextureUses::COPY_SRC, @@ -945,7 +942,7 @@ impl Global { } .into()); } - let src_barrier = src_pending.map(|pending| pending.into_hal(&src_texture)); + let src_barrier = src_pending.map(|pending| pending.into_hal(src_texture)); let (dst_buffer, dst_pending) = tracker .buffers @@ -1036,14 +1033,9 @@ impl Global { let hub = A::hub(self); - let cmd_buf_guard = hub.command_buffers.read(); - let cmd_buf = CommandBuffer::get_encoder(&*cmd_buf_guard, command_encoder_id)?; + let cmd_buf = CommandBuffer::get_encoder(hub, command_encoder_id)?; let mut cmd_buf_data = cmd_buf.data.lock(); let cmd_buf_data = cmd_buf_data.as_mut().unwrap(); - let (encoder, _, tracker, _, texture_memory_actions) = cmd_buf_data.raw_mut(); - let texture_guard = hub.textures.read(); - - let device = &cmd_buf.device; #[cfg(feature = "trace")] if let Some(ref mut list) = cmd_buf_data.commands { @@ -1053,6 +1045,10 @@ impl Global { size: *copy_size, }); } + let (encoder, _, tracker, _, texture_memory_actions) = cmd_buf_data.raw_mut(); + let texture_guard = hub.textures.read(); + + let device = &cmd_buf.device; if copy_size.width == 0 || copy_size.height == 0 || copy_size.depth_or_array_layers == 0 { log::trace!("Ignoring copy_texture_to_texture of size 0"); @@ -1087,9 +1083,9 @@ impl Global { copy_size, )?; - let (src_range, src_tex_base) = extract_texture_selector(source, copy_size, &src_texture)?; + let (src_range, src_tex_base) = extract_texture_selector(source, copy_size, src_texture)?; let (dst_range, dst_tex_base) = - extract_texture_selector(destination, copy_size, &dst_texture)?; + extract_texture_selector(destination, copy_size, dst_texture)?; let src_texture_aspects = hal::FormatAspects::from(src_texture.desc.format); let dst_texture_aspects = hal::FormatAspects::from(dst_texture.desc.format); if src_tex_base.aspect != src_texture_aspects { @@ -1106,7 +1102,7 @@ impl Global { encoder, tracker, texture_memory_actions, - &device, + device, source, copy_size, &texture_guard, @@ -1115,7 +1111,7 @@ impl Global { encoder, tracker, texture_memory_actions, - &device, + device, destination, copy_size, &texture_guard, @@ -1125,7 +1121,7 @@ impl Global { .trackers .textures .set_single( - &src_texture, + src_texture, source.texture, src_range, hal::TextureUses::COPY_SRC, @@ -1144,14 +1140,14 @@ impl Global { //TODO: try to avoid this the collection. It's needed because both // `src_pending` and `dst_pending` try to hold `trackers.textures` mutably. let mut barriers: ArrayVec<_, 2> = src_pending - .map(|pending| pending.into_hal(&src_texture)) + .map(|pending| pending.into_hal(src_texture)) .collect(); let dst_pending = cmd_buf_data .trackers .textures .set_single( - &dst_texture, + dst_texture, destination.texture, dst_range, hal::TextureUses::COPY_DST, @@ -1169,7 +1165,7 @@ impl Global { ); } - barriers.extend(dst_pending.map(|pending| pending.into_hal(&dst_texture))); + barriers.extend(dst_pending.map(|pending| pending.into_hal(dst_texture))); let hal_copy_size = hal::CopyExtent { width: src_copy_size.width.min(dst_copy_size.width), diff --git a/wgpu-core/src/device/global.rs b/wgpu-core/src/device/global.rs index ea3c3f8ab1..650ebfd2fc 100644 --- a/wgpu-core/src/device/global.rs +++ b/wgpu-core/src/device/global.rs @@ -1,3 +1,5 @@ +#[cfg(any(feature = "trace", feature = "replay"))] +use crate::device::trace; use crate::{ binding_model, command, conv, device::{life::WaitIdleError, map_buffer, queue, Device, DeviceError, HostMap}, @@ -9,7 +11,7 @@ use crate::{ instance::{self, Adapter, Surface}, pipeline, present, resource::{self, Buffer, BufferAccessResult, BufferMapState, Resource}, - resource::{BufferAccessError, BufferMapOperation}, + resource::{BufferAccessError, BufferMapOperation, TextureClearMode}, validation::check_buffer_usage, FastHashMap, Label, LabelHelpers as _, }; @@ -46,7 +48,7 @@ impl Global { let surface = surface_guard .get(surface_id) .map_err(|_| instance::IsSurfaceSupportedError::InvalidSurface)?; - Ok(adapter.is_surface_supported(&surface)) + Ok(adapter.is_surface_supported(surface)) } pub fn surface_get_capabilities( @@ -89,7 +91,7 @@ impl Global { .get(surface_id) .map_err(|_| instance::GetSurfaceSupportError::InvalidSurface)?; - get_supported_callback(&adapter, &surface) + get_supported_callback(adapter, surface) } pub fn device_features( @@ -139,18 +141,16 @@ impl Global { Err(_) => break DeviceError::Invalid.into(), }; #[cfg(feature = "trace")] - if let Some(ref trace) = device.trace { + if let Some(ref mut trace) = *device.trace.lock() { let mut desc = desc.clone(); let mapped_at_creation = mem::replace(&mut desc.mapped_at_creation, false); if mapped_at_creation && !desc.usage.contains(wgt::BufferUsages::MAP_WRITE) { desc.usage |= wgt::BufferUsages::COPY_DST; } - trace - .lock() - .add(trace::Action::CreateBuffer(fid.id(), desc)); + trace.add(trace::Action::CreateBuffer(fid.id(), desc)); } - let mut buffer = match device.create_buffer(device_id, desc, false) { + let buffer = match device.create_buffer(device_id, desc, false) { Ok(buffer) => buffer, Err(e) => break e, }; @@ -165,7 +165,7 @@ impl Global { } else { match map_buffer( device.raw.as_ref().unwrap(), - &mut buffer, + &buffer, 0, map_size, HostMap::Write, @@ -350,8 +350,7 @@ impl Global { //assert!(buffer isn't used by the GPU); #[cfg(feature = "trace")] - if let Some(ref trace) = device.trace { - let mut trace = trace.lock(); + if let Some(ref mut trace) = *device.trace.lock() { let data_path = trace.make_binary("bin", data); trace.add(trace::Action::WriteBuffer { id: buffer_id, @@ -467,15 +466,15 @@ impl Global { | &BufferMapState::Init { .. } | &BufferMapState::Active { .. } => { - self.buffer_unmap_inner(buffer_id, &buffer, &device) + self.buffer_unmap_inner(buffer_id, &buffer, device) .unwrap_or(None) } _ => None, }; #[cfg(feature = "trace")] - if let Some(ref trace) = device.trace { - trace.lock().add(trace::Action::FreeBuffer(buffer_id)); + if let Some(ref mut trace) = *device.trace.lock() { + trace.add(trace::Action::FreeBuffer(buffer_id)); } if buffer.raw.is_none() { return Err(resource::DestroyError::AlreadyDestroyed); @@ -552,13 +551,16 @@ impl Global { device_id: DeviceId, desc: &resource::TextureDescriptor, id_in: Input, - idtv_in: Input, + idtv_in: Option>, ) -> (id::TextureId, Option) { profiling::scope!("Device::create_texture"); let hub = A::hub(self); let fid = hub.textures.prepare(id_in); + let mut fid_tv = idtv_in + .as_ref() + .map(|id| hub.texture_views.prepare(id.clone())); let error = loop { let device = match hub.devices.get(device_id) { @@ -566,10 +568,12 @@ impl Global { Err(_) => break DeviceError::Invalid.into(), }; #[cfg(feature = "trace")] - if let Some(ref trace) = device.trace { - trace - .lock() - .add(trace::Action::CreateTexture(fid.id(), desc.clone())); + if let Some(ref mut trace) = *device.trace.lock() { + trace.add(trace::Action::CreateTexture( + fid.id(), + fid_tv.as_ref().map(|id| id.id()), + desc.clone(), + )); } let adapter = hub.adapters.get(device.adapter_id.0).unwrap(); @@ -580,51 +584,54 @@ impl Global { let (id, resource) = fid.assign(texture); log::info!("Created texture {:?} with {:?}", id, desc); - let dimension = match desc.dimension { - wgt::TextureDimension::D1 => wgt::TextureViewDimension::D1, - wgt::TextureDimension::D2 => wgt::TextureViewDimension::D2, - wgt::TextureDimension::D3 => unreachable!(), - }; - - for mip_level in 0..desc.mip_level_count { - for array_layer in 0..desc.size.depth_or_array_layers { - let fid = hub.texture_views.prepare(idtv_in.clone()); - - let descriptor = resource::TextureViewDescriptor { - label: Some(Cow::Borrowed("(wgpu internal) clear texture view")), - format: Some(desc.format), - dimension: Some(dimension), - range: wgt::ImageSubresourceRange { - aspect: wgt::TextureAspect::All, - base_mip_level: mip_level, - mip_level_count: Some(1), - base_array_layer: array_layer, - array_layer_count: Some(1), - }, - }; - - let texture_view = device - .create_texture_view(&resource, id.0, &descriptor) - .unwrap(); - let (tv_id, texture_view) = fid.assign(texture_view); - log::info!("Created texture view {:?} for texture {:?}", tv_id, id); + if idtv_in.is_some() { + let dimension = match desc.dimension { + wgt::TextureDimension::D1 => wgt::TextureViewDimension::D1, + wgt::TextureDimension::D2 => wgt::TextureViewDimension::D2, + wgt::TextureDimension::D3 => unreachable!(), + }; - let mut texture_clear_mode = resource.clear_mode.write(); - match &mut *texture_clear_mode { - resource::TextureClearMode::RenderPass { - clear_views, + for mip_level in 0..desc.mip_level_count { + for array_layer in 0..desc.size.depth_or_array_layers { + let descriptor = resource::TextureViewDescriptor { + label: Some(Cow::Borrowed("(wgpu internal) clear texture view")), + format: Some(desc.format), + dimension: Some(dimension), + range: wgt::ImageSubresourceRange { + aspect: wgt::TextureAspect::All, + base_mip_level: mip_level, + mip_level_count: Some(1), + base_array_layer: array_layer, + array_layer_count: Some(1), + }, + }; + + let texture_view = device + .create_texture_view(&resource, id.0, &descriptor) + .unwrap(); + let fid_tv = if fid_tv.is_some() { + fid_tv.take().unwrap() + } else { + hub.texture_views.prepare(idtv_in.clone().unwrap()) + }; + let (tv_id, texture_view) = fid_tv.assign(texture_view); + log::info!("Created texture view {:?} for texture {:?}", tv_id, id); + + let texture_clear_mode = &mut *resource.clear_mode.write(); + if let &mut TextureClearMode::RenderPass { + ref mut clear_views, is_color: _, - } => { + } = texture_clear_mode + { clear_views.push(texture_view.clone()); } - _ => {} - } - device - .trackers - .lock() - .views - .insert_single(tv_id, texture_view); + device + .trackers + .lock() + .views + .insert_single(tv_id, texture_view); + } } } @@ -668,10 +675,8 @@ impl Global { // NB: Any change done through the raw texture handle will not be // recorded in the replay #[cfg(feature = "trace")] - if let Some(ref trace) = device.trace { - trace - .lock() - .add(trace::Action::CreateTexture(fid.id(), desc.clone())); + if let Some(ref mut trace) = *device.trace.lock() { + trace.add(trace::Action::CreateTexture(fid.id(), None, desc.clone())); } let adapter = hub.adapters.get(device.adapter_id.0).unwrap(); @@ -736,8 +741,8 @@ impl Global { let device = &texture.device; #[cfg(feature = "trace")] - if let Some(ref trace) = device.trace { - trace.lock().add(trace::Action::FreeTexture(texture_id)); + if let Some(ref mut trace) = *device.trace.lock() { + trace.add(trace::Action::FreeTexture(texture_id)); } let last_submit_index = texture.info.submission_index(); @@ -751,20 +756,21 @@ impl Global { resource::TextureClearMode::None => SmallVec::new(), }; - match texture.inner.as_ref().unwrap() { + match *texture.inner.as_ref().unwrap() { resource::TextureInner::Native { ref raw } => { - if raw.is_none() { - return Err(resource::DestroyError::AlreadyDestroyed); - } - let temp = queue::TempResource::Texture(texture.clone(), clear_views); - let mut pending_writes = device.pending_writes.lock(); - let pending_writes = pending_writes.as_mut().unwrap(); - if pending_writes.dst_textures.contains_key(&texture_id) { - pending_writes.temp_resources.push(temp); + if !raw.is_none() { + let temp = queue::TempResource::Texture(texture.clone(), clear_views); + let mut pending_writes = device.pending_writes.lock(); + let pending_writes = pending_writes.as_mut().unwrap(); + if pending_writes.dst_textures.contains_key(&texture_id) { + pending_writes.temp_resources.push(temp); + } else { + device + .lock_life() + .schedule_resource_destruction(temp, last_submit_index); + } } else { - device - .lock_life() - .schedule_resource_destruction(temp, last_submit_index); + return Err(resource::DestroyError::AlreadyDestroyed); } } resource::TextureInner::Surface { .. } => { @@ -840,8 +846,8 @@ impl Global { }; let device = &texture.device; #[cfg(feature = "trace")] - if let Some(ref trace) = device.trace { - trace.lock().add(trace::Action::CreateTextureView { + if let Some(ref mut trace) = *device.trace.lock() { + trace.add(trace::Action::CreateTextureView { id: fid.id(), parent_id: texture_id, desc: desc.clone(), @@ -927,10 +933,8 @@ impl Global { Err(_) => break DeviceError::Invalid.into(), }; #[cfg(feature = "trace")] - if let Some(ref trace) = device.trace { - trace - .lock() - .add(trace::Action::CreateSampler(fid.id(), desc.clone())); + if let Some(ref mut trace) = *device.trace.lock() { + trace.add(trace::Action::CreateSampler(fid.id(), desc.clone())); } let sampler = match device.create_sampler(desc) { @@ -999,10 +1003,8 @@ impl Global { Err(_) => break DeviceError::Invalid.into(), }; #[cfg(feature = "trace")] - if let Some(ref trace) = device.trace { - trace - .lock() - .add(trace::Action::CreateBindGroupLayout(fid.id(), desc.clone())); + if let Some(ref mut trace) = *device.trace.lock() { + trace.add(trace::Action::CreateBindGroupLayout(fid.id(), desc.clone())); } let mut entry_map = FastHashMap::default(); @@ -1096,10 +1098,8 @@ impl Global { Err(_) => break DeviceError::Invalid.into(), }; #[cfg(feature = "trace")] - if let Some(ref trace) = device.trace { - trace - .lock() - .add(trace::Action::CreatePipelineLayout(fid.id(), desc.clone())); + if let Some(ref mut trace) = *device.trace.lock() { + trace.add(trace::Action::CreatePipelineLayout(fid.id(), desc.clone())); } let layout = { @@ -1164,10 +1164,8 @@ impl Global { Err(_) => break DeviceError::Invalid.into(), }; #[cfg(feature = "trace")] - if let Some(ref trace) = device.trace { - trace - .lock() - .add(trace::Action::CreateBindGroup(fid.id(), desc.clone())); + if let Some(ref mut trace) = *device.trace.lock() { + trace.add(trace::Action::CreateBindGroup(fid.id(), desc.clone())); } let bind_group_layout = match hub.bind_group_layouts.get(desc.layout) { @@ -1244,8 +1242,7 @@ impl Global { Err(_) => break DeviceError::Invalid.into(), }; #[cfg(feature = "trace")] - if let Some(ref trace) = device.trace { - let mut trace = trace.lock(); + if let Some(ref mut trace) = *device.trace.lock() { let data = match source { #[cfg(feature = "wgsl")] pipeline::ShaderModuleSource::Wgsl(ref code) => { @@ -1307,8 +1304,7 @@ impl Global { Err(_) => break DeviceError::Invalid.into(), }; #[cfg(feature = "trace")] - if let Some(ref trace) = device.trace { - let mut trace = trace.lock(); + if let Some(ref mut trace) = *device.trace.lock() { let data = trace.make_binary("spv", unsafe { std::slice::from_raw_parts(source.as_ptr() as *const u8, source.len() * 4) }); @@ -1373,7 +1369,7 @@ impl Global { encoder, &device, #[cfg(feature = "trace")] - device.trace.is_some(), + device.trace.lock().is_some(), &desc.label, ); @@ -1441,8 +1437,8 @@ impl Global { Err(_) => break command::RenderBundleError::INVALID_DEVICE, }; #[cfg(feature = "trace")] - if let Some(ref trace) = device.trace { - trace.lock().add(trace::Action::CreateRenderBundle { + if let Some(ref mut trace) = *device.trace.lock() { + trace.add(trace::Action::CreateRenderBundle { id: fid.id(), desc: trace::new_render_bundle_encoder_descriptor( desc.label.clone(), @@ -1517,8 +1513,8 @@ impl Global { Err(_) => break DeviceError::Invalid.into(), }; #[cfg(feature = "trace")] - if let Some(ref trace) = device.trace { - trace.lock().add(trace::Action::CreateQuerySet { + if let Some(ref mut trace) = *device.trace.lock() { + trace.add(trace::Action::CreateQuerySet { id: fid.id(), desc: desc.clone(), }); @@ -1559,10 +1555,8 @@ impl Global { let device = &query_set.device; #[cfg(feature = "trace")] - if let Some(ref trace) = device.trace { - trace - .lock() - .add(trace::Action::DestroyQuerySet(query_set_id)); + if let Some(ref mut trace) = *device.trace.lock() { + trace.add(trace::Action::DestroyQuerySet(query_set_id)); } device @@ -1600,8 +1594,8 @@ impl Global { }; let adapter = hub.adapters.get(device.adapter_id.0).unwrap(); #[cfg(feature = "trace")] - if let Some(ref trace) = device.trace { - trace.lock().add(trace::Action::CreateRenderPipeline { + if let Some(ref mut trace) = *device.trace.lock() { + trace.add(trace::Action::CreateRenderPipeline { id: fid.id(), desc: desc.clone(), implicit_context: implicit_context.clone(), @@ -1720,8 +1714,8 @@ impl Global { Err(_) => break DeviceError::Invalid.into(), }; #[cfg(feature = "trace")] - if let Some(ref trace) = device.trace { - trace.lock().add(trace::Action::CreateComputePipeline { + if let Some(ref mut trace) = *device.trace.lock() { + trace.add(trace::Action::CreateComputePipeline { id: fid.id(), desc: desc.clone(), implicit_context: implicit_context.clone(), @@ -1949,10 +1943,8 @@ impl Global { Err(_) => break DeviceError::Invalid.into(), }; #[cfg(feature = "trace")] - if let Some(ref trace) = device.trace { - trace - .lock() - .add(trace::Action::ConfigureSurface(surface_id, config.clone())); + if let Some(ref mut trace) = *device.trace.lock() { + trace.add(trace::Action::ConfigureSurface(surface_id, config.clone())); } let surface = match surface_guard.get(surface_id) { @@ -1961,7 +1953,7 @@ impl Global { }; let caps = unsafe { - let suf = A::get_surface(&surface); + let suf = A::get_surface(surface); let adapter = &adapter_guard[device.adapter_id]; match adapter .raw @@ -2019,7 +2011,7 @@ impl Global { } match unsafe { - A::get_surface(&surface) + A::get_surface(surface) .unwrap() .raw .configure(device.raw.as_ref().unwrap(), &hal_config) @@ -2108,12 +2100,12 @@ impl Global { /// /// Return `all_queue_empty` indicating whether there are more queue /// submissions still in flight. - fn poll_devices( + fn poll_device( &self, force_wait: bool, closures: &mut UserClosures, ) -> Result { - profiling::scope!("poll_devices"); + profiling::scope!("poll_device"); let hub = A::hub(self); let mut devices_to_drop = vec![]; @@ -2158,28 +2150,28 @@ impl Global { #[cfg(all(feature = "vulkan", not(target_arch = "wasm32")))] { - all_queue_empty = self.poll_devices::(force_wait, &mut closures)? - && all_queue_empty; + all_queue_empty = + self.poll_device::(force_wait, &mut closures)? && all_queue_empty; } #[cfg(all(feature = "metal", any(target_os = "macos", target_os = "ios")))] { all_queue_empty = - self.poll_devices::(force_wait, &mut closures)? && all_queue_empty; + self.poll_device::(force_wait, &mut closures)? && all_queue_empty; } #[cfg(all(feature = "dx12", windows))] { all_queue_empty = - self.poll_devices::(force_wait, &mut closures)? && all_queue_empty; + self.poll_device::(force_wait, &mut closures)? && all_queue_empty; } #[cfg(all(feature = "dx11", windows))] { all_queue_empty = - self.poll_devices::(force_wait, &mut closures)? && all_queue_empty; + self.poll_device::(force_wait, &mut closures)? && all_queue_empty; } #[cfg(feature = "gles")] { all_queue_empty = - self.poll_devices::(force_wait, &mut closures)? && all_queue_empty; + self.poll_device::(force_wait, &mut closures)? && all_queue_empty; } closures.fire(); @@ -2382,8 +2374,8 @@ impl Global { return Err(BufferAccessError::UnalignedRangeSize { range_size }); } let map_state = &*buffer.map_state.lock(); - match map_state { - resource::BufferMapState::Init { ptr, .. } => { + match *map_state { + resource::BufferMapState::Init { ref ptr, .. } => { // offset (u64) can not be < 0, so no need to validate the lower bound if offset + range_size > buffer.size { return Err(BufferAccessError::OutOfBoundsOverrun { @@ -2393,7 +2385,9 @@ impl Global { } unsafe { Ok((ptr.as_ptr().offset(offset as isize), range_size)) } } - resource::BufferMapState::Active { ptr, ref range, .. } => { + resource::BufferMapState::Active { + ref ptr, ref range, .. + } => { if offset < range.start { return Err(BufferAccessError::OutOfBoundsUnderrun { index: offset, @@ -2434,8 +2428,7 @@ impl Global { needs_flush, } => { #[cfg(feature = "trace")] - if let Some(ref trace) = device.trace { - let mut trace = trace.lock(); + if let Some(ref mut trace) = *device.trace.lock() { let data = trace.make_binary("bin", unsafe { std::slice::from_raw_parts(ptr.as_ptr(), buffer.size as usize) }); @@ -2503,8 +2496,7 @@ impl Global { resource::BufferMapState::Active { ptr, range, host } => { if host == HostMap::Write { #[cfg(feature = "trace")] - if let Some(ref trace) = device.trace { - let mut trace = trace.lock(); + if let Some(ref mut trace) = *device.trace.lock() { let size = range.end - range.start; let data = trace.make_binary("bin", unsafe { std::slice::from_raw_parts(ptr.as_ptr(), size as usize) @@ -2545,7 +2537,7 @@ impl Global { .map_err(|_| BufferAccessError::Invalid)?; let device = &buffer.device; - closure = self.buffer_unmap_inner(buffer_id, &buffer, &device) + closure = self.buffer_unmap_inner(buffer_id, &buffer, device) } // Note: outside the scope where locks are held when calling the callback diff --git a/wgpu-core/src/device/life.rs b/wgpu-core/src/device/life.rs index 79f8344ee7..f2fb9800f4 100644 --- a/wgpu-core/src/device/life.rs +++ b/wgpu-core/src/device/life.rs @@ -87,72 +87,25 @@ impl SuspectedResources { } pub(crate) fn add_render_bundle_scope(&mut self, trackers: &RenderBundleScope) { - self.buffers.extend( - trackers - .buffers - .used_resources() - .map(|r| r.clone()) - .into_iter(), - ); - self.textures.extend( - trackers - .textures - .used_resources() - .map(|r| r.clone()) - .into_iter(), - ); - self.bind_groups.extend( - trackers - .bind_groups - .used_resources() - .map(|r| r.clone()) - .into_iter(), - ); - self.render_pipelines.extend( - trackers - .render_pipelines - .used_resources() - .map(|r| r.clone()) - .into_iter(), - ); - self.query_sets.extend( - trackers - .query_sets - .used_resources() - .map(|r| r.clone()) - .into_iter(), - ); + self.buffers + .extend(trackers.buffers.used_resources().cloned()); + self.textures + .extend(trackers.textures.used_resources().cloned()); + self.bind_groups + .extend(trackers.bind_groups.used_resources().cloned()); + self.render_pipelines + .extend(trackers.render_pipelines.used_resources().cloned()); + self.query_sets + .extend(trackers.query_sets.used_resources().cloned()); } pub(crate) fn add_bind_group_states(&mut self, trackers: &BindGroupStates) { - self.buffers.extend( - trackers - .buffers - .used_resources() - .map(|r| r.clone()) - .into_iter(), - ); - self.textures.extend( - trackers - .textures - .used_resources() - .map(|r| r.clone()) - .into_iter(), - ); - self.texture_views.extend( - trackers - .views - .used_resources() - .map(|r| r.clone()) - .into_iter(), - ); - self.samplers.extend( - trackers - .samplers - .used_resources() - .map(|r| r.clone()) - .into_iter(), - ); + self.buffers + .extend(trackers.buffers.used_resources().cloned()); + self.textures + .extend(trackers.textures.used_resources().cloned()); + self.texture_views.extend(trackers.views.used_resources()); + self.samplers.extend(trackers.samplers.used_resources()); } } @@ -410,12 +363,10 @@ impl LifetimeTracker { pub fn post_submit(&mut self) { self.suspected_resources .buffers - .extend(self.future_suspected_buffers.drain(..).map(|stored| stored)); - self.suspected_resources.textures.extend( - self.future_suspected_textures - .drain(..) - .map(|stored| stored), - ); + .append(&mut self.future_suspected_buffers); + self.suspected_resources + .textures + .append(&mut self.future_suspected_textures); } pub(crate) fn map(&mut self, value: &Arc>) { @@ -560,7 +511,7 @@ impl LifetimeTracker { &mut self, hub: &Hub, trackers: &Mutex>, - #[cfg(feature = "trace")] trace: Option<&Mutex>, + #[cfg(feature = "trace")] mut trace: Option<&mut trace::Trace>, ) { profiling::scope!("triage_suspected"); @@ -573,8 +524,8 @@ impl LifetimeTracker { if trackers.bundles.remove_abandoned(id) { log::debug!("Bundle {:?} will be destroyed", id); #[cfg(feature = "trace")] - if let Some(t) = trace { - t.lock().add(trace::Action::DestroyRenderBundle(id)); + if let Some(ref mut t) = trace { + t.add(trace::Action::DestroyRenderBundle(id.0)); } if let Some(res) = hub @@ -596,8 +547,8 @@ impl LifetimeTracker { if trackers.bind_groups.remove_abandoned(id) { log::debug!("Bind group {:?} will be destroyed", id); #[cfg(feature = "trace")] - if let Some(t) = trace { - t.lock().add(trace::Action::DestroyBindGroup(id)); + if let Some(ref mut t) = trace { + t.add(trace::Action::DestroyBindGroup(id.0)); } if let Some(res) = hub @@ -633,8 +584,8 @@ impl LifetimeTracker { if trackers.views.remove_abandoned(id) { log::debug!("Texture view {:?} will be destroyed", id); #[cfg(feature = "trace")] - if let Some(t) = trace { - t.lock().add(trace::Action::DestroyTextureView(id)); + if let Some(ref mut t) = trace { + t.add(trace::Action::DestroyTextureView(id.0)); } if let Some(res) = hub @@ -665,8 +616,8 @@ impl LifetimeTracker { if trackers.textures.remove_abandoned(id) { log::debug!("Texture {:?} will be destroyed", id); #[cfg(feature = "trace")] - if let Some(t) = trace { - t.lock().add(trace::Action::DestroyTexture(id)); + if let Some(ref mut t) = trace { + t.add(trace::Action::DestroyTexture(id.0)); } if let Some(res) = hub.textures.unregister_locked(id.0, &mut *textures_locked) { @@ -677,8 +628,9 @@ impl LifetimeTracker { .find(|a| a.index == submit_index) .map_or(&mut self.free_resources, |a| &mut a.last_resources); - if let resource::TextureClearMode::RenderPass { clear_views, .. } = - &*res.clear_mode.read() + if let &resource::TextureClearMode::RenderPass { + ref clear_views, .. + } = &*res.clear_mode.read() { non_referenced_resources .texture_views @@ -699,8 +651,8 @@ impl LifetimeTracker { if trackers.samplers.remove_abandoned(id) { log::debug!("Sampler {:?} will be destroyed", id); #[cfg(feature = "trace")] - if let Some(t) = trace { - t.lock().add(trace::Action::DestroySampler(id)); + if let Some(ref mut t) = trace { + t.add(trace::Action::DestroySampler(id.0)); } if let Some(res) = hub.samplers.unregister_locked(id.0, &mut *samplers_locked) { @@ -725,14 +677,15 @@ impl LifetimeTracker { if trackers.buffers.remove_abandoned(id) { log::debug!("Buffer {:?} will be destroyed", id); #[cfg(feature = "trace")] - if let Some(t) = trace { - t.lock().add(trace::Action::DestroyBuffer(id)); + if let Some(ref mut t) = trace { + t.add(trace::Action::DestroyBuffer(id.0)); } if let Some(res) = hub.buffers.unregister_locked(id.0, &mut *buffers_locked) { let submit_index = res.info.submission_index(); - if let resource::BufferMapState::Init { stage_buffer, .. } = - &*res.map_state.lock() + if let resource::BufferMapState::Init { + ref stage_buffer, .. + } = *res.map_state.lock() { self.free_resources.buffers.push(stage_buffer.clone()); } @@ -756,8 +709,8 @@ impl LifetimeTracker { if trackers.compute_pipelines.remove_abandoned(id) { log::debug!("Compute pipeline {:?} will be destroyed", id); #[cfg(feature = "trace")] - if let Some(t) = trace { - t.lock().add(trace::Action::DestroyComputePipeline(id)); + if let Some(ref mut t) = trace { + t.add(trace::Action::DestroyComputePipeline(id.0)); } if let Some(res) = hub @@ -785,8 +738,8 @@ impl LifetimeTracker { if trackers.render_pipelines.remove_abandoned(id) { log::debug!("Render pipeline {:?} will be destroyed", id); #[cfg(feature = "trace")] - if let Some(t) = trace { - t.lock().add(trace::Action::DestroyRenderPipeline(id)); + if let Some(ref mut t) = trace { + t.add(trace::Action::DestroyRenderPipeline(id.0)); } if let Some(res) = hub @@ -814,8 +767,8 @@ impl LifetimeTracker { if pipeline_layouts_locked.is_unique(id.0).unwrap() { log::debug!("Pipeline layout {:?} will be destroyed", id); #[cfg(feature = "trace")] - if let Some(t) = trace { - t.lock().add(trace::Action::DestroyPipelineLayout(id)); + if let Some(ref mut t) = trace { + t.add(trace::Action::DestroyPipelineLayout(id.0)); } if let Some(lay) = hub @@ -844,8 +797,8 @@ impl LifetimeTracker { if bind_group_layouts_locked.is_unique(id.0).unwrap() { log::debug!("Bind group layout {:?} will be destroyed", id); #[cfg(feature = "trace")] - if let Some(t) = trace { - t.lock().add(trace::Action::DestroyBindGroupLayout(id)); + if let Some(ref mut t) = trace { + t.add(trace::Action::DestroyBindGroupLayout(id.0)); } if let Some(lay) = hub .bind_group_layouts @@ -866,7 +819,7 @@ impl LifetimeTracker { if trackers.query_sets.remove_abandoned(id) { log::debug!("Query set {:?} will be destroyed", id); // #[cfg(feature = "trace")] - // trace.map(|t| t.lock().add(trace::Action::DestroyComputePipeline(id.0))); + // trace.map(|t| t.add(trace::Action::DestroyComputePipeline(id.0))); if let Some(res) = hub .query_sets .unregister_locked(id.0, &mut *query_sets_locked) diff --git a/wgpu-core/src/device/mod.rs b/wgpu-core/src/device/mod.rs index 60fab2777f..9ef67d9bbb 100644 --- a/wgpu-core/src/device/mod.rs +++ b/wgpu-core/src/device/mod.rs @@ -18,13 +18,13 @@ use wgt::{BufferAddress, TextureFormat}; use std::{iter, num::NonZeroU32, ptr}; -pub mod device; pub mod global; mod life; pub mod queue; +pub mod resource; #[cfg(any(feature = "trace", feature = "replay"))] pub mod trace; -pub use device::Device; +pub use resource::Device; pub const SHADER_STAGE_COUNT: usize = 3; // Should be large enough for the largest possible texture row. This diff --git a/wgpu-core/src/device/queue.rs b/wgpu-core/src/device/queue.rs index ec11cf09a9..c4d2b58ba9 100644 --- a/wgpu-core/src/device/queue.rs +++ b/wgpu-core/src/device/queue.rs @@ -334,8 +334,7 @@ impl Global { let data_size = data.len() as wgt::BufferAddress; #[cfg(feature = "trace")] - if let Some(ref trace) = device.trace { - let mut trace = trace.lock(); + if let Some(ref mut trace) = *device.trace.lock() { let data_path = trace.make_binary("bin", data); trace.add(Action::WriteBuffer { id: buffer_id, @@ -597,8 +596,7 @@ impl Global { .map_err(|_| DeviceError::Invalid)?; #[cfg(feature = "trace")] - if let Some(ref trace) = device.trace { - let mut trace = trace.lock(); + if let Some(ref mut trace) = *device.trace.lock() { let data_path = trace.make_binary("bin", data); trace.add(Action::WriteTexture { to: *destination, @@ -629,7 +627,7 @@ impl Global { let (hal_copy_size, array_layer_count) = validate_texture_copy_range(destination, &dst.desc, CopySide::Destination, size)?; - let (selector, dst_base) = extract_texture_selector(destination, size, &dst)?; + let (selector, dst_base) = extract_texture_selector(destination, size, dst)?; if !dst_base.aspect.is_one() { return Err(TransferError::CopyAspectNotOne.into()); @@ -740,7 +738,7 @@ impl Global { let transition = trackers .textures .set_single( - &dst, + dst, destination.texture, selector, hal::TextureUses::COPY_DST, @@ -829,7 +827,7 @@ impl Global { }; unsafe { - encoder.transition_textures(transition.map(|pending| pending.into_hal(&dst))); + encoder.transition_textures(transition.map(|pending| pending.into_hal(dst))); encoder.transition_buffers(iter::once(barrier)); encoder.copy_buffer_to_texture(&staging_buffer.raw, dst_raw, regions); } @@ -1102,10 +1100,17 @@ impl Global { None => continue, }; #[cfg(feature = "trace")] - if let Some(ref trace) = device.trace { - trace.lock().add(Action::Submit( + if let Some(ref mut trace) = *device.trace.lock() { + trace.add(Action::Submit( submit_index, - cmdbuf.data.lock().commands.take().unwrap(), + cmdbuf + .data + .lock() + .as_mut() + .unwrap() + .commands + .take() + .unwrap(), )); } if !cmdbuf.is_finished() { @@ -1128,7 +1133,7 @@ impl Global { // update submission IDs for buffer in cmd_buf_trackers.buffers.used_resources() { let id = buffer.info.id(); - let raw_buf = match &buffer.raw { + let raw_buf = match buffer.raw { Some(ref raw) => raw, None => { return Err(QueueSubmitError::DestroyedBuffer(id.0)); @@ -1154,7 +1159,7 @@ impl Global { } for texture in cmd_buf_trackers.textures.used_resources() { let id = texture.info.id(); - let should_extend = match texture.inner.as_ref().unwrap() { + let should_extend = match *texture.inner.as_ref().unwrap() { TextureInner::Native { raw: None } => { return Err(QueueSubmitError::DestroyedTexture(id.0)); } @@ -1305,7 +1310,7 @@ impl Global { .set_from_usage_scope(&*texture_guard, &used_surface_textures); let texture_barriers = trackers.textures.drain().map(|pending| { let tex = unsafe { texture_guard.get_unchecked(pending.id) }; - pending.into_hal(&tex) + pending.into_hal(tex) }); let present = unsafe { baked.encoder.transition_textures(texture_barriers); @@ -1330,7 +1335,7 @@ impl Global { used_surface_textures.set_size(texture_guard.len()); for (&id, texture) in pending_writes.dst_textures.iter() { - match texture.inner.as_ref().unwrap() { + match *texture.inner.as_ref().unwrap() { TextureInner::Native { raw: None } => { return Err(QueueSubmitError::DestroyedTexture(id)); } @@ -1359,7 +1364,7 @@ impl Global { .set_from_usage_scope(&*texture_guard, &used_surface_textures); let texture_barriers = trackers.textures.drain().map(|pending| { let tex = unsafe { texture_guard.get_unchecked(pending.id) }; - pending.into_hal(&tex) + pending.into_hal(tex) }); unsafe { diff --git a/wgpu-core/src/device/device.rs b/wgpu-core/src/device/resource.rs similarity index 99% rename from wgpu-core/src/device/device.rs rename to wgpu-core/src/device/resource.rs index 333ea713f1..686fa6b4e9 100644 --- a/wgpu-core/src/device/device.rs +++ b/wgpu-core/src/device/resource.rs @@ -1,3 +1,5 @@ +#[cfg(any(feature = "trace", feature = "replay"))] +use crate::device::trace; use crate::{ binding_model, command, conv, device::life::{LifetimeTracker, WaitIdleError}, @@ -91,7 +93,7 @@ pub struct Device { pub(crate) downlevel: wgt::DownlevelCapabilities, pub(crate) pending_writes: Mutex>>, #[cfg(feature = "trace")] - pub(crate) trace: Option>, + pub(crate) trace: Mutex>, } impl std::fmt::Debug for Device { @@ -215,19 +217,19 @@ impl Device { life_tracker: Mutex::new(life::LifetimeTracker::new()), temp_suspected: Mutex::new(life::SuspectedResources::new()), #[cfg(feature = "trace")] - trace: trace_path.and_then(|path| match trace::Trace::new(path) { + trace: Mutex::new(trace_path.and_then(|path| match trace::Trace::new(path) { Ok(mut trace) => { trace.add(trace::Action::Init { desc: desc.clone(), backend: A::VARIANT, }); - Some(Mutex::new(trace)) + Some(trace) } Err(e) => { log::error!("Unable to start a trace in '{:?}': {:?}", path, e); None } - }), + })), alignments, limits: desc.limits.clone(), features: desc.features, @@ -275,7 +277,7 @@ impl Device { hub, &self.trackers, #[cfg(feature = "trace")] - self.trace.as_ref(), + self.trace.lock().as_mut(), ); life_tracker.triage_mapped(); @@ -2394,7 +2396,7 @@ impl Device { .map_err(|_| pipeline::CreateComputePipelineError::InvalidLayout)?; let late_sized_buffer_groups = - Device::make_late_sized_buffer_groups(&shader_binding_sizes, &layout, &*bgl_guard); + Device::make_late_sized_buffer_groups(&shader_binding_sizes, layout, &*bgl_guard); let pipeline_desc = hal::ComputePipelineDescriptor { label: desc.label.borrow_option(), @@ -2717,7 +2719,7 @@ impl Device { .get(pipeline_layout_id) .map_err(|_| pipeline::CreateRenderPipelineError::InvalidLayout)?; Some(Device::get_introspection_bind_group_layouts( - &pipeline_layout, + pipeline_layout, &*bgl_guard, )) } @@ -2870,7 +2872,7 @@ impl Device { } let late_sized_buffer_groups = - Device::make_late_sized_buffer_groups(&shader_binding_sizes, &layout, &*bgl_guard); + Device::make_late_sized_buffer_groups(&shader_binding_sizes, layout, &*bgl_guard); let pipeline_desc = hal::RenderPipelineDescriptor { label: desc.label.borrow_option(), @@ -3050,7 +3052,7 @@ impl Device { impl Device { pub(crate) fn destroy_command_buffer(&self, mut cmd_buf: command::CommandBuffer) { - let mut baked = cmd_buf.into_baked(); + let mut baked = cmd_buf.extract_baked_commands(); unsafe { baked.encoder.reset_all(baked.list.into_iter()); } @@ -3083,7 +3085,7 @@ impl Device { life_tracker.cleanup(); #[cfg(feature = "trace")] { - self.trace = None; + *self.trace.lock() = None; } } } diff --git a/wgpu-core/src/device/trace.rs b/wgpu-core/src/device/trace.rs index 57f82c181e..a7c92f302b 100644 --- a/wgpu-core/src/device/trace.rs +++ b/wgpu-core/src/device/trace.rs @@ -47,7 +47,11 @@ pub enum Action<'a> { CreateBuffer(id::BufferId, crate::resource::BufferDescriptor<'a>), FreeBuffer(id::BufferId), DestroyBuffer(id::BufferId), - CreateTexture(id::TextureId, crate::resource::TextureDescriptor<'a>), + CreateTexture( + id::TextureId, + Option, + crate::resource::TextureDescriptor<'a>, + ), FreeTexture(id::TextureId), DestroyTexture(id::TextureId), CreateTextureView { diff --git a/wgpu-core/src/global.rs b/wgpu-core/src/global.rs index 6e224511d9..8e89fd171c 100644 --- a/wgpu-core/src/global.rs +++ b/wgpu-core/src/global.rs @@ -78,9 +78,9 @@ impl Global { pub fn clear_backend(&self, _dummy: ()) { let hub = A::hub(self); - let mut surfaces_locked = self.surfaces.write(); + let surfaces_locked = self.surfaces.read(); // this is used for tests, which keep the adapter - hub.clear(&mut surfaces_locked, false); + hub.clear(&surfaces_locked, false); } pub fn generate_report(&self) -> GlobalReport { @@ -129,7 +129,7 @@ impl Drop for Global { // destroy hubs before the instance gets dropped #[cfg(all(feature = "vulkan", not(target_arch = "wasm32")))] { - self.hubs.vulkan.clear(&mut surfaces_locked, true); + self.hubs.vulkan.clear(&surfaces_locked, true); } #[cfg(all(feature = "metal", any(target_os = "macos", target_os = "ios")))] { @@ -137,15 +137,15 @@ impl Drop for Global { } #[cfg(all(feature = "dx12", windows))] { - self.hubs.dx12.clear(&mut surfaces_locked, true); + self.hubs.dx12.clear(&surfaces_locked, true); } #[cfg(all(feature = "dx11", windows))] { - self.hubs.dx11.clear(&mut surfaces_locked, true); + self.hubs.dx11.clear(&surfaces_locked, true); } #[cfg(feature = "gles")] { - self.hubs.gl.clear(&mut surfaces_locked, true); + self.hubs.gl.clear(&surfaces_locked, true); } // destroy surfaces diff --git a/wgpu-core/src/instance.rs b/wgpu-core/src/instance.rs index 803e733c42..ef8ce3bfd7 100644 --- a/wgpu-core/src/instance.rs +++ b/wgpu-core/src/instance.rs @@ -1,7 +1,7 @@ use std::sync::Arc; use crate::{ - device::{device::Device, DeviceDescriptor}, + device::{resource::Device, DeviceDescriptor}, global::Global, hal_api::HalApi, id::{AdapterId, DeviceId, SurfaceId, Valid}, @@ -767,7 +767,6 @@ impl Global { self.surfaces .get(id) .map_err(|_| RequestAdapterError::InvalidSurface(id)) - .map(|v| v) }) .transpose()?; let compatible_surface = compatible_surface.as_ref().map(|surface| surface.as_ref()); diff --git a/wgpu-core/src/pipeline.rs b/wgpu-core/src/pipeline.rs index aeb7216f6f..c69ab3a77e 100644 --- a/wgpu-core/src/pipeline.rs +++ b/wgpu-core/src/pipeline.rs @@ -1,3 +1,5 @@ +#[cfg(any(feature = "trace", feature = "replay"))] +use crate::device::trace; use crate::{ binding_model::{CreateBindGroupLayoutError, CreatePipelineLayoutError}, command::ColorAttachmentError, @@ -54,10 +56,8 @@ impl Drop for ShaderModule { fn drop(&mut self) { if let Some(raw) = self.raw.take() { #[cfg(feature = "trace")] - if let Some(ref trace) = self.device.trace { - trace - .lock() - .add(trace::Action::DestroyShaderModule(self.info.id())); + if let Some(ref mut trace) = *self.device.trace.lock() { + trace.add(trace::Action::DestroyShaderModule(self.info.id().0)); } unsafe { use hal::Device; diff --git a/wgpu-core/src/present.rs b/wgpu-core/src/present.rs index 38c0a6d6ae..c7f4ab5a82 100644 --- a/wgpu-core/src/present.rs +++ b/wgpu-core/src/present.rs @@ -129,15 +129,15 @@ impl Global { let (device, config) = match surface.presentation.lock().as_ref() { Some(present) => { - let device = hub.devices.get(present.device_id.0).unwrap().clone(); + let device = hub.devices.get(present.device_id.0).unwrap(); (device, present.config.clone()) } None => return Err(SurfaceError::NotConfigured), }; #[cfg(feature = "trace")] - if let Some(ref trace) = device.trace { - trace.lock().add(Action::GetSurfaceTexture { + if let Some(ref mut trace) = *device.trace.lock() { + trace.add(Action::GetSurfaceTexture { id: fid.id(), parent_id: surface_id, }); @@ -281,11 +281,11 @@ impl Global { None => return Err(SurfaceError::NotConfigured), }; - let device = hub.devices.get(present.device_id.0).unwrap().clone(); + let device = hub.devices.get(present.device_id.0).unwrap(); #[cfg(feature = "trace")] - if let Some(ref trace) = device.trace { - trace.lock().add(Action::Present(surface_id)); + if let Some(ref mut trace) = *device.trace.lock() { + trace.add(Action::Present(surface_id)); } let result = { @@ -306,8 +306,11 @@ impl Global { if let Some(texture) = texture { if let Ok(mut texture) = Arc::try_unwrap(texture) { let mut clear_mode = texture.clear_mode.write(); - if let resource::TextureClearMode::RenderPass { clear_views, .. } = - &mut *clear_mode + let clear_mode = &mut *clear_mode; + if let resource::TextureClearMode::RenderPass { + ref mut clear_views, + .. + } = *clear_mode { clear_views.clear(); } @@ -382,11 +385,11 @@ impl Global { None => return Err(SurfaceError::NotConfigured), }; - let device = hub.devices.get(present.device_id.0).unwrap().clone(); + let device = hub.devices.get(present.device_id.0).unwrap(); #[cfg(feature = "trace")] - if let Some(ref trace) = device.trace { - trace.lock().add(Action::DiscardSurfaceTexture(surface_id)); + if let Some(ref mut trace) = *device.trace.lock() { + trace.add(Action::DiscardSurfaceTexture(surface_id)); } { diff --git a/wgpu-core/src/registry.rs b/wgpu-core/src/registry.rs index 6d5c450050..2da9e35189 100644 --- a/wgpu-core/src/registry.rs +++ b/wgpu-core/src/registry.rs @@ -76,10 +76,7 @@ impl, F: IdentityHandlerFactory> Regist } } pub(crate) fn try_get(&self, id: I) -> Result>, InvalidId> { - self.storage - .read() - .try_get(id) - .map(|o| o.map(|v| v.clone())) + self.storage.read().try_get(id).map(|o| o.cloned()) } pub(crate) fn get(&self, id: I) -> Result, InvalidId> { self.storage.read().get(id).map(|v| v.clone()) @@ -115,7 +112,7 @@ impl, F: IdentityHandlerFactory> Regist if label.is_empty() { format!("<{}-{:?}>", type_name, id.unzip()) } else { - label.to_string() + label } } Err(_) => format!( diff --git a/wgpu-core/src/resource.rs b/wgpu-core/src/resource.rs index 442f061cef..54abe78a2b 100644 --- a/wgpu-core/src/resource.rs +++ b/wgpu-core/src/resource.rs @@ -195,7 +195,7 @@ enum BufferMapCallbackInner { impl Debug for BufferMapCallbackInner { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { + match *self { BufferMapCallbackInner::Rust { callback: _ } => f.debug_struct("Rust").finish(), BufferMapCallbackInner::C { inner: _ } => f.debug_struct("C").finish(), } @@ -442,7 +442,7 @@ pub(crate) enum TextureInner { impl TextureInner { pub fn as_raw(&self) -> Option<&A::Texture> { match *self { - Self::Native { raw: Some(ref tex) } => Some(&tex), + Self::Native { raw: Some(ref tex) } => Some(tex), Self::Native { raw: None } => None, Self::Surface { ref raw, .. } => Some(raw.borrow()), } @@ -479,14 +479,21 @@ impl Drop for Texture { fn drop(&mut self) { use hal::Device; let mut clear_mode = self.clear_mode.write(); - if let TextureClearMode::RenderPass { clear_views, .. } = &mut *clear_mode { + let clear_mode = &mut *clear_mode; + if let TextureClearMode::RenderPass { + ref mut clear_views, + .. + } = *clear_mode + { clear_views.clear(); } - if let Some(inner) = self.inner.take() { - if let TextureInner::Native { raw: Some(raw) } = inner { - unsafe { - self.device.raw.as_ref().unwrap().destroy_texture(raw); - } + if self.inner.is_none() { + return; + } + let inner = self.inner.take().unwrap(); + if let TextureInner::Native { raw: Some(raw) } = inner { + unsafe { + self.device.raw.as_ref().unwrap().destroy_texture(raw); } } } @@ -499,7 +506,7 @@ impl Texture { mip_level: u32, depth_or_layer: u32, ) -> &'a A::TextureView { - match clear_mode { + match *clear_mode { TextureClearMode::BufferCopy => { panic!("Given texture is cleared with buffer copies, not render passes") } @@ -588,7 +595,7 @@ impl Global { let surface = self.surfaces.get(id).ok(); let hal_surface = surface .as_ref() - .and_then(|surface| A::get_surface(&surface)) + .and_then(|surface| A::get_surface(surface)) .map(|surface| &*surface.raw); hal_surface_callback(hal_surface) diff --git a/wgpu-core/src/track/buffer.rs b/wgpu-core/src/track/buffer.rs index 2022fa0015..09875af3f0 100644 --- a/wgpu-core/src/track/buffer.rs +++ b/wgpu-core/src/track/buffer.rs @@ -72,7 +72,7 @@ impl BufferBindGroupState { /// Returns a list of all buffers tracked. May contain duplicates. pub fn used_resources(&self) -> impl Iterator>> + '_ { - self.buffers.iter().map(|(_, buffer, _)| buffer) + self.buffers.iter().map(|&(_id, ref buffer, _u)| buffer) } /// Adds the given resource with the given state. @@ -86,7 +86,7 @@ impl BufferBindGroupState { self.buffers.push((Valid(id), buffer.clone(), state)); - Some(&buffer) + Some(buffer) } } @@ -248,7 +248,7 @@ impl BufferUsageScope { )?; } - Ok(&buffer) + Ok(buffer) } } @@ -342,7 +342,7 @@ impl BufferTracker { None, ResourceMetadataProvider::Direct { epoch, - resource: Cow::Owned(resource.clone()), + resource: Cow::Owned(resource), }, ) } @@ -389,7 +389,7 @@ impl BufferTracker { strict_assert!(self.temp.len() <= 1); - Some((&buffer, self.temp.pop())) + Some((buffer, self.temp.pop())) } /// Sets the given state for all buffers in the given tracker. diff --git a/wgpu-core/src/track/metadata.rs b/wgpu-core/src/track/metadata.rs index 56bbf5cd2f..4050bc0ff6 100644 --- a/wgpu-core/src/track/metadata.rs +++ b/wgpu-core/src/track/metadata.rs @@ -210,7 +210,7 @@ impl> ResourceMetadataProvider<'_, A, I, T pub(super) unsafe fn get_own(self, index: usize) -> (Epoch, Arc) { match self { ResourceMetadataProvider::Direct { epoch, resource } => { - (epoch, resource.into_owned().clone()) + (epoch, resource.into_owned()) } ResourceMetadataProvider::Indirect { metadata } => { metadata.tracker_assert_in_bounds(index); diff --git a/wgpu-core/src/track/stateless.rs b/wgpu-core/src/track/stateless.rs index 989ff69d7e..2ecf7f016d 100644 --- a/wgpu-core/src/track/stateless.rs +++ b/wgpu-core/src/track/stateless.rs @@ -43,7 +43,9 @@ impl> StatelessBindGroupSate { /// Returns a list of all resources tracked. May contain duplicates. pub fn used_resources(&self) -> impl Iterator> + '_ { - self.resources.iter().map(|(_, resource)| resource.clone()) + self.resources + .iter() + .map(|&(_, ref resource)| resource.clone()) } /// Adds the given resource. @@ -52,7 +54,7 @@ impl> StatelessBindGroupSate { self.resources.push((Valid(id), resource.clone())); - Some(&resource) + Some(resource) } } @@ -134,7 +136,7 @@ impl> StatelessTracker { self.metadata.insert(index, epoch, resource.clone()); } - Some(&resource) + Some(resource) } /// Adds the given resources from the given tracker. diff --git a/wgpu-core/src/track/texture.rs b/wgpu-core/src/track/texture.rs index 8f7355d9f2..90fca95cd7 100644 --- a/wgpu-core/src/track/texture.rs +++ b/wgpu-core/src/track/texture.rs @@ -148,24 +148,23 @@ impl ComplexTextureState { } } +#[derive(Debug)] +struct TextureBindGroupStateData { + id: Valid, + selector: Option, + texture: Arc>, + usage: TextureUses, +} + /// Stores all the textures that a bind group stores. #[derive(Debug)] pub(crate) struct TextureBindGroupState { - textures: Vec<( - Valid, - Option, - Arc>, - TextureUses, - )>, - - _phantom: PhantomData, + textures: Vec>, } impl TextureBindGroupState { pub fn new() -> Self { Self { textures: Vec::new(), - - _phantom: PhantomData, } } @@ -174,13 +173,12 @@ impl TextureBindGroupState { /// When this list of states is merged into a tracker, the memory /// accesses will be in a constant assending order. pub(crate) fn optimize(&mut self) { - self.textures - .sort_unstable_by_key(|&(id, _, _, _)| id.0.unzip().0); + self.textures.sort_unstable_by_key(|v| v.id.0.unzip().0); } /// Returns a list of all textures tracked. May contain duplicates. pub fn used_resources(&self) -> impl Iterator>> + '_ { - self.textures.iter().map(|(_, _, texture, _)| texture) + self.textures.iter().map(|v| &v.texture) } /// Adds the given resource with the given state. @@ -193,10 +191,14 @@ impl TextureBindGroupState { ) -> Option<&'a Texture> { let resource = storage.get(id).ok()?; - self.textures - .push((Valid(id), selector, resource.clone(), state)); + self.textures.push(TextureBindGroupStateData { + id: Valid(id), + selector, + texture: resource.clone(), + usage: state, + }); - Some(&resource) + Some(resource) } } @@ -330,8 +332,8 @@ impl TextureUsageScope { storage: &Storage, TextureId>, bind_group: &TextureBindGroupState, ) -> Result<(), UsageConflict> { - for &(id, ref selector, ref _texture, state) in &bind_group.textures { - unsafe { self.merge_single(storage, id, selector.clone(), state)? }; + for t in &bind_group.textures { + unsafe { self.merge_single(storage, t.id, t.selector.clone(), t.usage)? }; } Ok(()) @@ -490,7 +492,7 @@ impl TextureTracker { None, ResourceMetadataProvider::Direct { epoch, - resource: Cow::Owned(resource.clone()), + resource: Cow::Owned(resource), }, ) }; @@ -656,8 +658,8 @@ impl TextureTracker { self.set_size(incoming_size); } - for &(id, _, _, _) in bind_group_state.textures.iter() { - let (index32, _, _) = id.0.unzip(); + for t in bind_group_state.textures.iter() { + let (index32, _, _) = t.id.0.unzip(); let index = index32 as usize; scope.tracker_assert_in_bounds(index); diff --git a/wgpu-hal/examples/halmark/main.rs b/wgpu-hal/examples/halmark/main.rs index 2810d160b1..fc6bac11b6 100644 --- a/wgpu-hal/examples/halmark/main.rs +++ b/wgpu-hal/examples/halmark/main.rs @@ -98,7 +98,7 @@ impl Example { dx12_shader_compiler: wgt::Dx12Compiler::Fxc, }; let instance = unsafe { A::Instance::init(&instance_desc)? }; - let mut surface = unsafe { + let surface = unsafe { instance .create_surface(window.raw_display_handle(), window.raw_window_handle()) .unwrap() @@ -116,7 +116,7 @@ impl Example { unsafe { adapter.surface_capabilities(&surface) }.ok_or(hal::InstanceError)?; log::info!("Surface caps: {:#?}", surface_caps); - let hal::OpenDevice { device, mut queue } = unsafe { + let hal::OpenDevice { device, queue } = unsafe { adapter .open(wgt::Features::empty(), &wgt::Limits::default()) .unwrap() @@ -717,7 +717,7 @@ impl Example { None }; self.queue.submit(&[&cmd_buf], fence_param).unwrap(); - self.queue.present(&mut self.surface, surface_tex).unwrap(); + self.queue.present(&self.surface, surface_tex).unwrap(); ctx.used_cmd_bufs.push(cmd_buf); ctx.used_views.push(surface_tex_view); }; diff --git a/wgpu-hal/examples/raw-gles.rs b/wgpu-hal/examples/raw-gles.rs index d9dfc492fa..06a490fa6c 100644 --- a/wgpu-hal/examples/raw-gles.rs +++ b/wgpu-hal/examples/raw-gles.rs @@ -122,7 +122,7 @@ fn main() {} fn fill_screen(exposed: &hal::ExposedAdapter, width: u32, height: u32) { use hal::{Adapter as _, CommandEncoder as _, Device as _, Queue as _}; - let mut od = unsafe { + let od = unsafe { exposed .adapter .open(wgt::Features::empty(), &wgt::Limits::downlevel_defaults()) diff --git a/wgpu/src/backend/direct.rs b/wgpu/src/backend/direct.rs index d0bd951e1f..af2b03960a 100644 --- a/wgpu/src/backend/direct.rs +++ b/wgpu/src/backend/direct.rs @@ -136,8 +136,9 @@ impl Context { ) -> Texture { let descriptor = desc.map_label_and_view_formats(|l| l.map(Borrowed), |v| v.to_vec()); let global = &self.0; - let (id, error) = - unsafe { global.create_texture_from_hal::(hal_texture, device.id, &descriptor, ()) }; + let (id, error) = unsafe { + global.create_texture_from_hal::(hal_texture, device.id, &descriptor, ()) + }; if let Some(cause) = error { self.handle_error( &device.error_sink, @@ -1277,7 +1278,7 @@ impl crate::Context for Context { let (id, error) = wgc::gfx_select!(device => global.device_create_texture( *device, &wgt_desc, - (), () + (), Some(()) )); if let Some(cause) = error { self.handle_error( From a9b3bcb2c5a7aa7ebff09aabbbe1a82b1979059d Mon Sep 17 00:00:00 2001 From: Mauro Gentile Date: Sat, 1 Apr 2023 18:13:16 +0200 Subject: [PATCH 004/132] Removing unneeded replay feature --- wgpu-core/src/device/global.rs | 2 +- wgpu-core/src/device/resource.rs | 2 +- wgpu-core/src/pipeline.rs | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/wgpu-core/src/device/global.rs b/wgpu-core/src/device/global.rs index 650ebfd2fc..6bdef8da70 100644 --- a/wgpu-core/src/device/global.rs +++ b/wgpu-core/src/device/global.rs @@ -1,4 +1,4 @@ -#[cfg(any(feature = "trace", feature = "replay"))] +#[cfg(feature = "trace")] use crate::device::trace; use crate::{ binding_model, command, conv, diff --git a/wgpu-core/src/device/resource.rs b/wgpu-core/src/device/resource.rs index 686fa6b4e9..8a9d414d38 100644 --- a/wgpu-core/src/device/resource.rs +++ b/wgpu-core/src/device/resource.rs @@ -1,4 +1,4 @@ -#[cfg(any(feature = "trace", feature = "replay"))] +#[cfg(feature = "trace")] use crate::device::trace; use crate::{ binding_model, command, conv, diff --git a/wgpu-core/src/pipeline.rs b/wgpu-core/src/pipeline.rs index c69ab3a77e..5b0068253f 100644 --- a/wgpu-core/src/pipeline.rs +++ b/wgpu-core/src/pipeline.rs @@ -1,4 +1,4 @@ -#[cfg(any(feature = "trace", feature = "replay"))] +#[cfg(feature = "trace")] use crate::device::trace; use crate::{ binding_model::{CreateBindGroupLayoutError, CreatePipelineLayoutError}, From 17b8e712319c7d9683c3b62aac28395ca40c9187 Mon Sep 17 00:00:00 2001 From: Elabajaba Date: Sat, 1 Apr 2023 23:21:03 -0400 Subject: [PATCH 005/132] string::new doesn't take any arguments --- wgpu-core/src/binding_model.rs | 2 +- wgpu-core/src/pipeline.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/wgpu-core/src/binding_model.rs b/wgpu-core/src/binding_model.rs index 9720e235e4..f0cfb84abc 100644 --- a/wgpu-core/src/binding_model.rs +++ b/wgpu-core/src/binding_model.rs @@ -479,7 +479,7 @@ impl Resource for BindGroupLayout { #[cfg(debug_assertions)] return self.label.clone(); #[cfg(not(debug_assertions))] - return String::new(""); + return String::new(); } } diff --git a/wgpu-core/src/pipeline.rs b/wgpu-core/src/pipeline.rs index 5b0068253f..02f3c019eb 100644 --- a/wgpu-core/src/pipeline.rs +++ b/wgpu-core/src/pipeline.rs @@ -78,7 +78,7 @@ impl Resource for ShaderModule { #[cfg(debug_assertions)] return self.label.clone(); #[cfg(not(debug_assertions))] - return String::new(""); + return String::new(); } } From c7e1c549964d09411517a51280e02cf31d974a7b Mon Sep 17 00:00:00 2001 From: Mauro Gentile Date: Sun, 2 Apr 2023 10:20:17 +0200 Subject: [PATCH 006/132] Changed doc to align to moved files --- wgpu-core/src/command/bundle.rs | 4 ++-- wgpu-core/src/hub.rs | 18 +++++++++--------- wgpu-core/src/id.rs | 4 ++-- wgpu-core/src/lib.rs | 4 ++-- wgpu-core/src/resource.rs | 4 ++-- 5 files changed, 17 insertions(+), 17 deletions(-) diff --git a/wgpu-core/src/command/bundle.rs b/wgpu-core/src/command/bundle.rs index 46d1c41bbb..37e4728bde 100644 --- a/wgpu-core/src/command/bundle.rs +++ b/wgpu-core/src/command/bundle.rs @@ -71,8 +71,8 @@ called. It goes through the commands and issues them into the native command buffer. Thanks to isolation, it doesn't track any bind group invalidations or index format changes. -[Gdcrbe]: crate::hub::Global::device_create_render_bundle_encoder -[Grbef]: crate::hub::Global::render_bundle_encoder_finish +[Gdcrbe]: crate::global::Global::device_create_render_bundle_encoder +[Grbef]: crate::global::Global::render_bundle_encoder_finish [wrpeb]: crate::command::render_ffi::wgpu_render_pass_execute_bundles !*/ diff --git a/wgpu-core/src/hub.rs b/wgpu-core/src/hub.rs index b349d4ca37..a3749c50c2 100644 --- a/wgpu-core/src/hub.rs +++ b/wgpu-core/src/hub.rs @@ -7,7 +7,7 @@ resources of type `R`. For example, [`id::DeviceId`] is an alias for of course `Debug`. Each `Id` contains not only an index for the resource it denotes but -also a [`Backend`] indicating which `wgpu` backend it belongs to. You +also a Backend indicating which `wgpu` backend it belongs to. You can use the [`gfx_select`] macro to dynamically dispatch on an id's backend to a function specialized at compile time for a specific backend. See that macro's documentation for details. @@ -57,13 +57,13 @@ itself to choose ids always pass `()`. In either case, the id ultimately assigned is returned as the first element of the tuple. Producing true identifiers from `id_in` values is the job of an -[`IdentityHandler`] implementation, which has an associated type +[`crate::identity::IdentityHandler`] implementation, which has an associated type [`Input`] saying what type of `id_in` values it accepts, and a [`process`] method that turns such values into true identifiers of type `I`. There are two kinds of `IdentityHandler`s: - Users that want `wgpu_core` to assign ids generally use - [`IdentityManager`] ([wrapped in a mutex]). Its `Input` type is + [`crate::identity::IdentityManager`] ([wrapped in a mutex]). Its `Input` type is `()`, and it tracks assigned ids and generation numbers as necessary. (This is what `wgpu` does.) @@ -76,16 +76,16 @@ type `I`. There are two kinds of `IdentityHandler`s: but passes the rest of the id's content through unchanged. Because an `IdentityHandler` can only create ids for a single -resource type `I`, constructing a [`Global`] entails constructing a +resource type `I`, constructing a [`crate::global::Global`] entails constructing a separate `IdentityHandler` for each resource type `I` that the `Global` will manage: an `IdentityHandler`, an `IdentityHandler`, and so on. -The [`Global::new`] function could simply take a large collection of +The [`crate::global::Global::new`] function could simply take a large collection of `IdentityHandler` implementations as arguments, but that would be ungainly. Instead, `Global::new` expects a `factory` argument that implements the [`GlobalIdentityHandlerFactory`] trait, which extends -[`IdentityHandlerFactory`] for each resource id type `I`. This +[`crate::identity::IdentityHandlerFactory`] for each resource id type `I`. This trait, in turn, has a `spawn` method that constructs an `IdentityHandler` for the `Global` to use. @@ -104,7 +104,7 @@ Thus, its `id_in` type is: <>::Filter as IdentityHandler>::Input ``` -The [`Input`] type is an alias for this construction. +The [`crate::identity::Input`] type is an alias for this construction. ## Id allocation and streaming @@ -141,8 +141,8 @@ as much, allowing subsequent operations using that id to be properly flagged as errors as well. [`gfx_select`]: crate::gfx_select -[`Input`]: IdentityHandler::Input -[`process`]: IdentityHandler::process +[`Input`]: crate::identity::IdentityHandler::Input +[`process`]: crate::identity::IdentityHandler::process [`Id`]: crate::id::Id [wrapped in a mutex]: trait.IdentityHandler.html#impl-IdentityHandler%3CI%3E-for-Mutex%3CIdentityManager%3E [WebGPU]: https://www.w3.org/TR/webgpu/ diff --git a/wgpu-core/src/id.rs b/wgpu-core/src/id.rs index 9c654e3ca9..6aa2aa2872 100644 --- a/wgpu-core/src/id.rs +++ b/wgpu-core/src/id.rs @@ -42,10 +42,10 @@ type Dummy = hal::api::Empty; /// `X` type with the resource type `X`, for some specific backend /// `A`. /// -/// [`Global`]: crate::hub::Global +/// [`Global`]: crate::global::Global /// [`Hub`]: crate::hub::Hub /// [`Hub`]: crate::hub::Hub -/// [`Storage`]: crate::hub::Storage +/// [`Storage`]: crate::storage::Storage /// [`Texture`]: crate::resource::Texture /// [`Index`]: std::ops::Index /// [`IndexMut`]: std::ops::IndexMut diff --git a/wgpu-core/src/lib.rs b/wgpu-core/src/lib.rs index 83158f8b2b..95c11d167a 100644 --- a/wgpu-core/src/lib.rs +++ b/wgpu-core/src/lib.rs @@ -209,7 +209,7 @@ define_backend_caller! { gfx_if_gles, gfx_if_gles_hidden, "gles" if feature = "g /// identifiers to select backends dynamically, even though many `wgpu_core` /// methods are compiled and optimized for a specific back end. /// -/// This macro is typically used to call methods on [`wgpu_core::hub::Global`], +/// This macro is typically used to call methods on [`wgpu_core::global::Global`], /// many of which take a single `hal::Api` type parameter. For example, to /// create a new buffer on the device indicated by `device_id`, one would say: /// @@ -235,7 +235,7 @@ define_backend_caller! { gfx_if_gles, gfx_if_gles_hidden, "gles" if feature = "g /// dynamically dispatch to the right specialization based on the resource's id. /// /// [`wgpu_types::Backend`]: wgt::Backend -/// [`wgpu_core::hub::Global`]: crate::hub::Global +/// [`wgpu_core::global::Global`]: crate::global::Global /// [`Id`]: id::Id #[macro_export] macro_rules! gfx_select { diff --git a/wgpu-core/src/resource.rs b/wgpu-core/src/resource.rs index 54abe78a2b..5ac00fd9ea 100644 --- a/wgpu-core/src/resource.rs +++ b/wgpu-core/src/resource.rs @@ -45,8 +45,8 @@ use std::{ /// - It may be used by commands sent to the GPU that have not yet /// finished execution. /// -/// [`Device`]: device::Device -/// [`Buffer`]: resource::Buffer +/// [`Device`]: crate::device::resource::Device +/// [`Buffer`]: crate::resource::Buffer #[derive(Debug)] pub struct ResourceInfo { id: RwLock>>, From ecd0cc2c168ac81b6d9b50654780a5df10bffc8d Mon Sep 17 00:00:00 2001 From: Mauro Gentile Date: Sun, 2 Apr 2023 20:50:20 +0200 Subject: [PATCH 007/132] Correct texture view creation for render pass --- wgpu-core/src/device/global.rs | 88 ++++++++++++++++---------------- wgpu-core/src/device/life.rs | 7 ++- wgpu-core/src/device/resource.rs | 15 ++++-- wgpu-core/src/global.rs | 2 +- wgpu-core/src/hub.rs | 2 +- wgpu-core/src/resource.rs | 2 + wgpu-core/src/track/buffer.rs | 4 +- wgpu-core/src/track/metadata.rs | 4 +- wgpu-core/src/track/stateless.rs | 4 +- wgpu-core/src/track/texture.rs | 4 +- wgpu/src/backend/direct.rs | 5 +- 11 files changed, 73 insertions(+), 64 deletions(-) diff --git a/wgpu-core/src/device/global.rs b/wgpu-core/src/device/global.rs index 6bdef8da70..bdbc3d8753 100644 --- a/wgpu-core/src/device/global.rs +++ b/wgpu-core/src/device/global.rs @@ -584,53 +584,52 @@ impl Global { let (id, resource) = fid.assign(texture); log::info!("Created texture {:?} with {:?}", id, desc); - if idtv_in.is_some() { - let dimension = match desc.dimension { - wgt::TextureDimension::D1 => wgt::TextureViewDimension::D1, - wgt::TextureDimension::D2 => wgt::TextureViewDimension::D2, - wgt::TextureDimension::D3 => unreachable!(), - }; + if let TextureClearMode::RenderPass { + ref mut clear_views, + is_color: _, + } = *resource.clear_mode.write() + { + if idtv_in.is_some() { + let dimension = match desc.dimension { + wgt::TextureDimension::D1 => wgt::TextureViewDimension::D1, + wgt::TextureDimension::D2 => wgt::TextureViewDimension::D2, + wgt::TextureDimension::D3 => unreachable!(), + }; + + for mip_level in 0..desc.mip_level_count { + for array_layer in 0..desc.size.depth_or_array_layers { + let descriptor = resource::TextureViewDescriptor { + label: Some(Cow::Borrowed("(wgpu internal) clear texture view")), + format: Some(desc.format), + dimension: Some(dimension), + range: wgt::ImageSubresourceRange { + aspect: wgt::TextureAspect::All, + base_mip_level: mip_level, + mip_level_count: Some(1), + base_array_layer: array_layer, + array_layer_count: Some(1), + }, + }; + + let texture_view = device + .create_texture_view(&resource, id.0, &descriptor) + .unwrap(); + let fid_tv = if fid_tv.is_some() { + fid_tv.take().unwrap() + } else { + hub.texture_views.prepare(idtv_in.clone().unwrap()) + }; + let (tv_id, texture_view) = fid_tv.assign(texture_view); + log::info!("Created texture view {:?} for texture {:?}", tv_id, id); - for mip_level in 0..desc.mip_level_count { - for array_layer in 0..desc.size.depth_or_array_layers { - let descriptor = resource::TextureViewDescriptor { - label: Some(Cow::Borrowed("(wgpu internal) clear texture view")), - format: Some(desc.format), - dimension: Some(dimension), - range: wgt::ImageSubresourceRange { - aspect: wgt::TextureAspect::All, - base_mip_level: mip_level, - mip_level_count: Some(1), - base_array_layer: array_layer, - array_layer_count: Some(1), - }, - }; - - let texture_view = device - .create_texture_view(&resource, id.0, &descriptor) - .unwrap(); - let fid_tv = if fid_tv.is_some() { - fid_tv.take().unwrap() - } else { - hub.texture_views.prepare(idtv_in.clone().unwrap()) - }; - let (tv_id, texture_view) = fid_tv.assign(texture_view); - log::info!("Created texture view {:?} for texture {:?}", tv_id, id); - - let texture_clear_mode = &mut *resource.clear_mode.write(); - if let &mut TextureClearMode::RenderPass { - ref mut clear_views, - is_color: _, - } = texture_clear_mode - { clear_views.push(texture_view.clone()); - } - device - .trackers - .lock() - .views - .insert_single(tv_id, texture_view); + device + .trackers + .lock() + .views + .insert_single(tv_id, texture_view); + } } } } @@ -2333,6 +2332,7 @@ impl Global { trackers .buffers .set_single(&*buffer_guard, buffer_id, internal_use); + //TODO: Check if draining ALL buffers is correct! trackers.buffers.drain(); } diff --git a/wgpu-core/src/device/life.rs b/wgpu-core/src/device/life.rs index f2fb9800f4..3488c029ef 100644 --- a/wgpu-core/src/device/life.rs +++ b/wgpu-core/src/device/life.rs @@ -592,8 +592,11 @@ impl LifetimeTracker { .texture_views .unregister_locked(id.0, &mut *texture_views_locked) { - let parent_texture = hub.textures.get(res.parent_id.0).unwrap(); - self.suspected_resources.textures.push(parent_texture); + if let Some(parent_texture) = res.parent.as_ref() { + self.suspected_resources + .textures + .push(parent_texture.clone()); + } let submit_index = res.info.submission_index(); self.active .iter_mut() diff --git a/wgpu-core/src/device/resource.rs b/wgpu-core/src/device/resource.rs index 8a9d414d38..b5fd446bfa 100644 --- a/wgpu-core/src/device/resource.rs +++ b/wgpu-core/src/device/resource.rs @@ -20,7 +20,7 @@ use crate::{ pipeline, resource::ResourceInfo, resource::{ - self, Buffer, QuerySet, Resource, Sampler, Texture, TextureView, + self, Buffer, QuerySet, Resource, Sampler, Texture, TextureInner, TextureView, TextureViewNotRenderableReason, }, storage::Storage, @@ -1014,6 +1014,7 @@ impl Device { Ok(TextureView { raw: Some(raw), + parent: None, parent_id: id::Valid(texture_id), device: self.clone(), desc: resource::HalTextureViewDescriptor { @@ -1031,7 +1032,7 @@ impl Device { pub(crate) fn create_texture_view( self: &Arc, - texture: &Texture, + texture: &Arc>, texture_id: id::TextureId, desc: &resource::TextureViewDescriptor, ) -> Result, resource::CreateTextureViewError> { @@ -1042,14 +1043,20 @@ impl Device { .as_raw() .ok_or(resource::CreateTextureViewError::InvalidTexture)?; - self.create_texture_inner_view( + let mut result = self.create_texture_inner_view( texture_raw, texture_id, &texture.desc, &texture.hal_usage, &texture.format_features, desc, - ) + ); + if let TextureInner::Native { .. } = *texture.inner.as_ref().unwrap() { + if let Ok(ref mut texture_view) = result { + texture_view.parent = Some(texture.clone()); + } + } + result } pub(crate) fn create_sampler( diff --git a/wgpu-core/src/global.rs b/wgpu-core/src/global.rs index 8e89fd171c..b57cfcc1f6 100644 --- a/wgpu-core/src/global.rs +++ b/wgpu-core/src/global.rs @@ -133,7 +133,7 @@ impl Drop for Global { } #[cfg(all(feature = "metal", any(target_os = "macos", target_os = "ios")))] { - self.hubs.metal.clear(&mut surface_guard, true); + self.hubs.metal.clear(&surfaces_locked, true); } #[cfg(all(feature = "dx12", windows))] { diff --git a/wgpu-core/src/hub.rs b/wgpu-core/src/hub.rs index a3749c50c2..c5501da14f 100644 --- a/wgpu-core/src/hub.rs +++ b/wgpu-core/src/hub.rs @@ -242,7 +242,7 @@ impl Hub { use hal::Surface; let mut devices = self.devices.write(); - for element in devices.map.iter_mut() { + for element in devices.map.iter() { if let Element::Occupied(ref device, _) = *element { device.prepare_to_die(); } diff --git a/wgpu-core/src/resource.rs b/wgpu-core/src/resource.rs index 5ac00fd9ea..c45ac84e1a 100644 --- a/wgpu-core/src/resource.rs +++ b/wgpu-core/src/resource.rs @@ -746,6 +746,8 @@ pub enum TextureViewNotRenderableReason { #[derive(Debug)] pub struct TextureView { pub(crate) raw: Option, + // if it's a surface texture - it's none + pub(crate) parent: Option>>, // The parent's refcount is held alive, but the parent may still be deleted // if it's a surface texture. TODO: make this cleaner. pub(crate) parent_id: Valid, diff --git a/wgpu-core/src/track/buffer.rs b/wgpu-core/src/track/buffer.rs index 09875af3f0..f3e25b8cd4 100644 --- a/wgpu-core/src/track/buffer.rs +++ b/wgpu-core/src/track/buffer.rs @@ -548,8 +548,8 @@ impl BufferTracker { if self.metadata.contains_unchecked(index) { let existing_epoch = self.metadata.get_epoch_unchecked(index); let existing_ref_count = self.metadata.get_ref_count_unchecked(index); - - if existing_epoch == epoch && existing_ref_count == 1 { + //3 ref count: Registry, Device Tracker and suspected resource itself + if existing_epoch == epoch && existing_ref_count <= 3 { self.metadata.remove(index); return true; } diff --git a/wgpu-core/src/track/metadata.rs b/wgpu-core/src/track/metadata.rs index 4050bc0ff6..535de4e2b4 100644 --- a/wgpu-core/src/track/metadata.rs +++ b/wgpu-core/src/track/metadata.rs @@ -209,9 +209,7 @@ impl> ResourceMetadataProvider<'_, A, I, T #[inline(always)] pub(super) unsafe fn get_own(self, index: usize) -> (Epoch, Arc) { match self { - ResourceMetadataProvider::Direct { epoch, resource } => { - (epoch, resource.into_owned()) - } + ResourceMetadataProvider::Direct { epoch, resource } => (epoch, resource.into_owned()), ResourceMetadataProvider::Indirect { metadata } => { metadata.tracker_assert_in_bounds(index); (unsafe { *metadata.epochs.get_unchecked(index) }, { diff --git a/wgpu-core/src/track/stateless.rs b/wgpu-core/src/track/stateless.rs index 2ecf7f016d..f2375f5e4b 100644 --- a/wgpu-core/src/track/stateless.rs +++ b/wgpu-core/src/track/stateless.rs @@ -185,8 +185,8 @@ impl> StatelessTracker { if self.metadata.contains_unchecked(index) { let existing_epoch = self.metadata.get_epoch_unchecked(index); let existing_ref_count = self.metadata.get_ref_count_unchecked(index); - - if existing_epoch == epoch && existing_ref_count == 1 { + //3 ref count: Registry, Device Tracker and suspected resource itself + if existing_epoch == epoch && existing_ref_count <= 3 { self.metadata.remove(index); return true; } diff --git a/wgpu-core/src/track/texture.rs b/wgpu-core/src/track/texture.rs index 90fca95cd7..0c9868c9df 100644 --- a/wgpu-core/src/track/texture.rs +++ b/wgpu-core/src/track/texture.rs @@ -742,8 +742,8 @@ impl TextureTracker { if self.metadata.contains_unchecked(index) { let existing_epoch = self.metadata.get_epoch_unchecked(index); let existing_ref_count = self.metadata.get_ref_count_unchecked(index); - - if existing_epoch == epoch && existing_ref_count == 1 { + //3 ref count: Registry, Device Tracker and suspected resource itself + if existing_epoch == epoch && existing_ref_count <= 3 { self.start_set.complex.remove(&index32); self.end_set.complex.remove(&index32); diff --git a/wgpu/src/backend/direct.rs b/wgpu/src/backend/direct.rs index af2b03960a..36cdab0622 100644 --- a/wgpu/src/backend/direct.rs +++ b/wgpu/src/backend/direct.rs @@ -136,9 +136,8 @@ impl Context { ) -> Texture { let descriptor = desc.map_label_and_view_formats(|l| l.map(Borrowed), |v| v.to_vec()); let global = &self.0; - let (id, error) = unsafe { - global.create_texture_from_hal::(hal_texture, device.id, &descriptor, ()) - }; + let (id, error) = + unsafe { global.create_texture_from_hal::(hal_texture, device.id, &descriptor, ()) }; if let Some(cause) = error { self.handle_error( &device.error_sink, From a4b83a51d2640aba389e5de85f4e3519aadc78d2 Mon Sep 17 00:00:00 2001 From: Mauro Gentile Date: Tue, 4 Apr 2023 19:26:03 +0200 Subject: [PATCH 008/132] Fixing staging buffer issue --- wgpu-core/src/device/life.rs | 11 +----- wgpu-core/src/device/queue.rs | 73 +++++++++++++++++++++++------------ wgpu-core/src/resource.rs | 2 +- 3 files changed, 51 insertions(+), 35 deletions(-) diff --git a/wgpu-core/src/device/life.rs b/wgpu-core/src/device/life.rs index 3488c029ef..bce266472d 100644 --- a/wgpu-core/src/device/life.rs +++ b/wgpu-core/src/device/life.rs @@ -12,7 +12,7 @@ use crate::{ id, identity::GlobalIdentityHandlerFactory, pipeline::{ComputePipeline, RenderPipeline}, - resource::{self, Buffer, QuerySet, Resource, Sampler, StagingBuffer, Texture, TextureView}, + resource::{self, Buffer, QuerySet, Resource, Sampler, Texture, TextureView}, track::{BindGroupStates, RenderBundleScope, Tracker}, SubmissionIndex, }; @@ -113,7 +113,6 @@ impl SuspectedResources { #[derive(Debug)] struct NonReferencedResources { buffers: Vec>>, - staging_buffers: Vec>>, textures: Vec>>, texture_views: Vec>>, samplers: Vec>>, @@ -129,7 +128,6 @@ impl NonReferencedResources { fn new() -> Self { Self { buffers: Vec::new(), - staging_buffers: Vec::new(), textures: Vec::new(), texture_views: Vec::new(), samplers: Vec::new(), @@ -144,7 +142,6 @@ impl NonReferencedResources { fn extend(&mut self, other: Self) { self.buffers.extend(other.buffers); - self.staging_buffers.extend(other.staging_buffers); self.textures.extend(other.textures); self.texture_views.extend(other.texture_views); self.samplers.extend(other.samplers); @@ -161,10 +158,6 @@ impl NonReferencedResources { profiling::scope!("destroy_buffers"); self.buffers.clear(); } - if !self.staging_buffers.is_empty() { - profiling::scope!("destroy_staging_buffers"); - self.staging_buffers.clear(); - } if !self.textures.is_empty() { profiling::scope!("destroy_textures"); self.textures.clear(); @@ -343,7 +336,6 @@ impl LifetimeTracker { for res in temp_resources { match res { TempResource::Buffer(raw) => last_resources.buffers.push(raw), - TempResource::StagingBuffer(raw) => last_resources.staging_buffers.push(raw), TempResource::Texture(raw, views) => { last_resources.textures.push(raw); last_resources.texture_views.extend(views); @@ -443,7 +435,6 @@ impl LifetimeTracker { .map_or(&mut self.free_resources, |a| &mut a.last_resources); match temp_resource { TempResource::Buffer(raw) => resources.buffers.push(raw), - TempResource::StagingBuffer(raw) => resources.staging_buffers.push(raw), TempResource::Texture(raw, views) => { resources.texture_views.extend(views); resources.textures.push(raw); diff --git a/wgpu-core/src/device/queue.rs b/wgpu-core/src/device/queue.rs index c4d2b58ba9..1ac4052d7d 100644 --- a/wgpu-core/src/device/queue.rs +++ b/wgpu-core/src/device/queue.rs @@ -12,7 +12,7 @@ use crate::{ hal_api::HalApi, id, identity::{GlobalIdentityHandlerFactory, Input}, - init_tracker::{has_copy_partial_init_tracker_coverage, TextureInitRange}, + init_tracker::{has_copy_partial_init_tracker_coverage, BufferInitTracker, TextureInitRange}, resource::{ Buffer, BufferAccessError, BufferMapState, Resource, ResourceInfo, StagingBuffer, Texture, TextureInner, TextureView, @@ -21,6 +21,7 @@ use crate::{ }; use hal::{CommandEncoder as _, Device as _, Queue as _}; +use parking_lot::{Mutex, RwLock}; use smallvec::SmallVec; use std::{ iter, mem, ptr, @@ -28,6 +29,8 @@ use std::{ }; use thiserror::Error; +use super::Device; + /// Number of command buffers that we generate from the same pool /// for the write_xxx commands, before the pool is recycled. /// @@ -112,7 +115,6 @@ pub struct WrappedSubmissionIndex { #[derive(Debug)] pub enum TempResource { Buffer(Arc>), - StagingBuffer(Arc>), Texture(Arc>, SmallVec<[Arc>; 1]>), } @@ -184,9 +186,18 @@ impl PendingWrites { self.temp_resources.push(resource); } - fn consume(&mut self, buffer: Arc>) { + fn consume(&mut self, device: &Arc>, buffer: Arc>) { self.temp_resources - .push(TempResource::StagingBuffer(buffer)); + .push(TempResource::Buffer(Arc::new(Buffer:: { + raw: buffer.raw.lock().take(), + device: device.clone(), + usage: wgt::BufferUsages::empty(), + size: buffer.size, + initialization_status: RwLock::new(BufferInitTracker::new(buffer.size)), + sync_mapped_writes: Mutex::new(None), + map_state: Mutex::new(crate::resource::BufferMapState::Idle), + info: ResourceInfo::new(&buffer.info.label), + }))); } #[must_use] @@ -259,7 +270,7 @@ fn prepare_staging_buffer( let mapping = unsafe { device.map_buffer(&buffer, 0..size) }?; let staging_buffer = StagingBuffer { - raw: Arc::new(buffer), + raw: Mutex::new(Some(buffer)), size, info: ResourceInfo::new(""), is_coherent: mapping.is_coherent, @@ -271,9 +282,14 @@ fn prepare_staging_buffer( impl StagingBuffer { unsafe fn flush(&self, device: &A::Device) -> Result<(), DeviceError> { if !self.is_coherent { - unsafe { device.flush_mapped_ranges(&self.raw, iter::once(0..self.size)) }; + unsafe { + device.flush_mapped_ranges( + self.raw.lock().as_ref().unwrap(), + iter::once(0..self.size), + ) + }; } - unsafe { device.unmap_buffer(&self.raw)? }; + unsafe { device.unmap_buffer(self.raw.lock().as_ref().unwrap())? }; Ok(()) } } @@ -365,7 +381,7 @@ impl Global { .lock() .as_mut() .unwrap() - .consume(Arc::new(staging_buffer)); + .consume(&device, Arc::new(staging_buffer)); return Err(flush_error.into()); } @@ -381,7 +397,7 @@ impl Global { .lock() .as_mut() .unwrap() - .consume(Arc::new(staging_buffer)); + .consume(&device, Arc::new(staging_buffer)); result } @@ -441,7 +457,7 @@ impl Global { .lock() .as_mut() .unwrap() - .consume(staging_buffer); + .consume(&device, staging_buffer); return Err(flush_error.into()); } @@ -457,7 +473,7 @@ impl Global { .lock() .as_mut() .unwrap() - .consume(staging_buffer); + .consume(&device, staging_buffer); result } @@ -514,7 +530,7 @@ impl Global { fn queue_write_staging_buffer_impl( &self, - device: &super::Device, + device: &Device, staging_buffer: &StagingBuffer, buffer_id: id::BufferId, buffer_offset: u64, @@ -547,8 +563,9 @@ impl Global { dst_offset: buffer_offset, size, }); + let inner_buffer = staging_buffer.raw.lock(); let barriers = iter::once(hal::BufferBarrier { - buffer: staging_buffer.raw.as_ref(), + buffer: inner_buffer.as_ref().unwrap(), usage: hal::BufferUses::MAP_WRITE..hal::BufferUses::COPY_SRC, }) .chain(transition.map(|pending| pending.into_hal(dst))); @@ -557,7 +574,11 @@ impl Global { let encoder = pending_writes.activate(); unsafe { encoder.transition_buffers(barriers); - encoder.copy_buffer_to_buffer(&staging_buffer.raw, dst_raw, region.into_iter()); + encoder.copy_buffer_to_buffer( + inner_buffer.as_ref().unwrap(), + dst_raw, + region.into_iter(), + ); } pending_writes @@ -802,7 +823,7 @@ impl Global { } if let Err(e) = unsafe { staging_buffer.flush(device.raw.as_ref().unwrap()) } { - pending_writes.consume(Arc::new(staging_buffer)); + pending_writes.consume(&device, Arc::new(staging_buffer)); return Err(e.into()); } @@ -821,18 +842,22 @@ impl Global { size: hal_copy_size, } }); - let barrier = hal::BufferBarrier { - buffer: staging_buffer.raw.as_ref(), - usage: hal::BufferUses::MAP_WRITE..hal::BufferUses::COPY_SRC, - }; - unsafe { - encoder.transition_textures(transition.map(|pending| pending.into_hal(dst))); - encoder.transition_buffers(iter::once(barrier)); - encoder.copy_buffer_to_texture(&staging_buffer.raw, dst_raw, regions); + { + let inner_buffer = staging_buffer.raw.lock(); + let barrier = hal::BufferBarrier { + buffer: inner_buffer.as_ref().unwrap(), + usage: hal::BufferUses::MAP_WRITE..hal::BufferUses::COPY_SRC, + }; + + unsafe { + encoder.transition_textures(transition.map(|pending| pending.into_hal(dst))); + encoder.transition_buffers(iter::once(barrier)); + encoder.copy_buffer_to_texture(inner_buffer.as_ref().unwrap(), dst_raw, regions); + } } - pending_writes.consume(Arc::new(staging_buffer)); + pending_writes.consume(&device, Arc::new(staging_buffer)); pending_writes .dst_textures .insert(destination.texture, dst.clone()); diff --git a/wgpu-core/src/resource.rs b/wgpu-core/src/resource.rs index c45ac84e1a..61e7b79c55 100644 --- a/wgpu-core/src/resource.rs +++ b/wgpu-core/src/resource.rs @@ -407,7 +407,7 @@ impl Resource for Buffer { /// [`Device::pending_writes`]: crate::device::Device #[derive(Debug)] pub struct StagingBuffer { - pub(crate) raw: Arc, + pub(crate) raw: Mutex>, pub(crate) size: wgt::BufferAddress, pub(crate) is_coherent: bool, pub(crate) info: ResourceInfo, From e1c9b9151c7f55836f9f740edeca23bea191fca1 Mon Sep 17 00:00:00 2001 From: Niklas Korz Date: Fri, 7 Apr 2023 11:36:01 +0200 Subject: [PATCH 009/132] Fix compiler errors for Metal backend --- wgpu-hal/src/metal/mod.rs | 12 +++++++----- wgpu-hal/src/metal/surface.rs | 20 +++++++++++--------- 2 files changed, 18 insertions(+), 14 deletions(-) diff --git a/wgpu-hal/src/metal/mod.rs b/wgpu-hal/src/metal/mod.rs index bae68cc306..74b46602b6 100644 --- a/wgpu-hal/src/metal/mod.rs +++ b/wgpu-hal/src/metal/mod.rs @@ -29,7 +29,7 @@ use std::{ use arrayvec::ArrayVec; use foreign_types::ForeignTypeRef as _; -use parking_lot::Mutex; +use parking_lot::{Mutex, RwLock}; #[derive(Clone)] pub struct Api; @@ -300,8 +300,8 @@ pub struct Device { pub struct Surface { view: Option>, render_layer: Mutex, - swapchain_format: Option, - extent: wgt::Extent3d, + swapchain_format: RwLock>, + extent: RwLock, main_thread_id: thread::ThreadId, // Useful for UI-intensive applications that are sensitive to // window resizing. @@ -329,7 +329,7 @@ unsafe impl Sync for SurfaceTexture {} impl crate::Queue for Queue { unsafe fn submit( - &mut self, + &self, command_buffers: &[&CommandBuffer], signal_fence: Option<(&mut Fence, crate::FenceValue)>, ) -> Result<(), crate::DeviceError> { @@ -376,7 +376,7 @@ impl crate::Queue for Queue { Ok(()) } unsafe fn present( - &mut self, + &self, _surface: &Surface, texture: SurfaceTexture, ) -> Result<(), crate::SurfaceError> { @@ -654,6 +654,7 @@ impl PipelineStageInfo { } } +#[derive(Debug)] pub struct RenderPipeline { raw: metal::RenderPipelineState, #[allow(dead_code)] @@ -673,6 +674,7 @@ pub struct RenderPipeline { unsafe impl Send for RenderPipeline {} unsafe impl Sync for RenderPipeline {} +#[derive(Debug)] pub struct ComputePipeline { raw: metal::ComputePipelineState, #[allow(dead_code)] diff --git a/wgpu-hal/src/metal/surface.rs b/wgpu-hal/src/metal/surface.rs index ebfa79e8eb..896c631792 100644 --- a/wgpu-hal/src/metal/surface.rs +++ b/wgpu-hal/src/metal/surface.rs @@ -14,7 +14,7 @@ use objc::{ runtime::{Class, Object, Sel, BOOL, NO, YES}, sel, sel_impl, }; -use parking_lot::Mutex; +use parking_lot::{Mutex, RwLock}; #[cfg(target_os = "macos")] #[link(name = "QuartzCore", kind = "framework")] @@ -63,8 +63,8 @@ impl super::Surface { Self { view, render_layer: Mutex::new(layer), - swapchain_format: None, - extent: wgt::Extent3d::default(), + swapchain_format: RwLock::new(None), + extent: RwLock::new(wgt::Extent3d::default()), main_thread_id: thread::current().id(), present_with_transaction: false, } @@ -178,8 +178,8 @@ impl crate::Surface for super::Surface { log::info!("build swapchain {:?}", config); let caps = &device.shared.private_caps; - self.swapchain_format = Some(config.format); - self.extent = config.extent; + *self.swapchain_format.write() = Some(config.format); + *self.extent.write() = config.extent; let render_layer = self.render_layer.lock(); let framebuffer_only = config.usage == crate::TextureUses::COLOR_TARGET; @@ -234,7 +234,7 @@ impl crate::Surface for super::Surface { } unsafe fn unconfigure(&self, _device: &super::Device) { - self.swapchain_format = None; + *self.swapchain_format.write() = None; } unsafe fn acquire_texture( @@ -251,16 +251,18 @@ impl crate::Surface for super::Surface { None => return Ok(None), }; + let swapchain_format = self.swapchain_format.read().unwrap(); + let extent = self.extent.read(); let suf_texture = super::SurfaceTexture { texture: super::Texture { raw: texture, - format: self.swapchain_format.unwrap(), + format: swapchain_format, raw_type: metal::MTLTextureType::D2, array_layers: 1, mip_levels: 1, copy_size: crate::CopyExtent { - width: self.extent.width, - height: self.extent.height, + width: extent.width, + height: extent.height, depth: 1, }, }, From 9077b43563b8cdb6e63693e3cd9a98f0e6ec759c Mon Sep 17 00:00:00 2001 From: Niklas Korz Date: Fri, 7 Apr 2023 12:53:43 +0200 Subject: [PATCH 010/132] Fix usage of active_submission_index.fetch_add From https://doc.rust-lang.org/std/sync/atomic/struct.AtomicU64.html#method.fetch_add: > Adds to the current value, returning the previous value. --- wgpu-core/src/device/global.rs | 3 ++- wgpu-core/src/device/queue.rs | 9 ++++++--- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/wgpu-core/src/device/global.rs b/wgpu-core/src/device/global.rs index bdbc3d8753..b8031c4101 100644 --- a/wgpu-core/src/device/global.rs +++ b/wgpu-core/src/device/global.rs @@ -2454,7 +2454,8 @@ impl Global { buffer.info.use_at( device .active_submission_index - .fetch_add(1, Ordering::Relaxed), + .fetch_add(1, Ordering::Relaxed) + + 1, ); let region = wgt::BufferSize::new(buffer.size).map(|size| hal::BufferCopy { src_offset: 0, diff --git a/wgpu-core/src/device/queue.rs b/wgpu-core/src/device/queue.rs index 1ac4052d7d..347e43cdc0 100644 --- a/wgpu-core/src/device/queue.rs +++ b/wgpu-core/src/device/queue.rs @@ -555,7 +555,8 @@ impl Global { dst.info.use_at( device .active_submission_index - .fetch_add(1, Ordering::Relaxed), + .fetch_add(1, Ordering::Relaxed) + + 1, ); let region = wgt::BufferSize::new(src_buffer_size).map(|size| hal::BufferCopy { @@ -769,7 +770,8 @@ impl Global { dst.info.use_at( device .active_submission_index - .fetch_add(1, Ordering::Relaxed), + .fetch_add(1, Ordering::Relaxed) + + 1, ); let dst_raw = dst @@ -1086,7 +1088,8 @@ impl Global { let submit_index = device .active_submission_index - .fetch_add(1, Ordering::Relaxed); + .fetch_add(1, Ordering::Relaxed) + + 1; let mut active_executions = Vec::new(); let mut used_surface_textures = track::TextureUsageScope::new(); let mut pending_writes = device.pending_writes.lock(); From f72ff9df6bb76bfecc8e7efc211f221cfa034ca9 Mon Sep 17 00:00:00 2001 From: Niklas Korz Date: Fri, 7 Apr 2023 14:16:24 +0200 Subject: [PATCH 011/132] Fix release build --- wgpu-core/src/device/queue.rs | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/wgpu-core/src/device/queue.rs b/wgpu-core/src/device/queue.rs index 1ac4052d7d..10b30c5303 100644 --- a/wgpu-core/src/device/queue.rs +++ b/wgpu-core/src/device/queue.rs @@ -196,7 +196,12 @@ impl PendingWrites { initialization_status: RwLock::new(BufferInitTracker::new(buffer.size)), sync_mapped_writes: Mutex::new(None), map_state: Mutex::new(crate::resource::BufferMapState::Idle), - info: ResourceInfo::new(&buffer.info.label), + info: ResourceInfo::new( + #[cfg(debug_assertions)] + &buffer.info.label, + #[cfg(not(debug_assertions))] + "", + ), }))); } From 0fc93d8a2b6ec21317607ed56f4f2099db399f63 Mon Sep 17 00:00:00 2001 From: Mauro Gentile Date: Fri, 7 Apr 2023 18:45:19 +0200 Subject: [PATCH 012/132] Fixing integration issues --- wgpu-core/src/hub.rs | 12 +++++------ wgpu-core/src/instance.rs | 42 ++++++++++++++++++++------------------- 2 files changed, 27 insertions(+), 27 deletions(-) diff --git a/wgpu-core/src/hub.rs b/wgpu-core/src/hub.rs index 1dbfb3dece..0e26635022 100644 --- a/wgpu-core/src/hub.rs +++ b/wgpu-core/src/hub.rs @@ -156,7 +156,7 @@ use crate::{ hal_api::HalApi, id, identity::GlobalIdentityHandlerFactory, - instance::{Adapter, Surface}, + instance::{Adapter, HalSurface, Surface}, pipeline::{ComputePipeline, RenderPipeline, ShaderModule}, registry::Registry, resource::{Buffer, QuerySet, Sampler, StagingBuffer, Texture, TextureView}, @@ -359,14 +359,12 @@ impl Hub { pub(crate) fn surface_unconfigure( &self, device_id: id::Valid, - surface: &mut HalSurface, + surface: &HalSurface, ) { - use hal::Surface as _; - - let devices = self.devices.data.read(); - let device = &devices[device_id]; + let device = self.devices.get(device_id.0).unwrap(); unsafe { - surface.raw.unconfigure(&device.raw); + use hal::Surface; + surface.raw.unconfigure(device.raw.as_ref().unwrap()); } } diff --git a/wgpu-core/src/instance.rs b/wgpu-core/src/instance.rs index 5d0f071558..f303792768 100644 --- a/wgpu-core/src/instance.rs +++ b/wgpu-core/src/instance.rs @@ -635,32 +635,34 @@ impl Global { fn unconfigure( global: &Global, - surface: &mut HalSurface, + surface: &HalSurface, present: &Presentation, ) { let hub = HalApi::hub(global); - hub.surface_unconfigure(present.device_id.value, surface); + hub.surface_unconfigure(present.device_id, surface); } - + let surface = self.surfaces.unregister(id); if let Ok(surface) = Arc::try_unwrap(surface.unwrap()) { - if let Some(present) = surface.presentation.lock().take() { - match present.backend() { - #[cfg(all(feature = "vulkan", not(target_arch = "wasm32")))] - Backend::Vulkan => unconfigure(self, surface.vulkan.as_mut().unwrap(), &present), - #[cfg(all(feature = "metal", any(target_os = "macos", target_os = "ios")))] - Backend::Metal => unconfigure(self, surface.metal.as_mut().unwrap(), &present), - #[cfg(all(feature = "dx12", windows))] - Backend::Dx12 => unconfigure(self, surface.dx12.as_mut().unwrap(), &present), - #[cfg(all(feature = "dx11", windows))] - Backend::Dx11 => unconfigure(self, surface.dx11.as_mut().unwrap(), &present), - #[cfg(feature = "gles")] - Backend::Gl => unconfigure(self, surface.gl.as_mut().unwrap(), &present), - _ => unreachable!(), - } - } - - self.instance.destroy_surface(surface); + if let Some(present) = surface.presentation.lock().take() { + match present.backend() { + #[cfg(all(feature = "vulkan", not(target_arch = "wasm32")))] + Backend::Vulkan => { + unconfigure(self, surface.vulkan.as_ref().unwrap(), &present) + } + #[cfg(all(feature = "metal", any(target_os = "macos", target_os = "ios")))] + Backend::Metal => unconfigure(self, surface.metal.as_ref().unwrap(), &present), + #[cfg(all(feature = "dx12", windows))] + Backend::Dx12 => unconfigure(self, surface.dx12.as_ref().unwrap(), &present), + #[cfg(all(feature = "dx11", windows))] + Backend::Dx11 => unconfigure(self, surface.dx11.as_ref().unwrap(), &present), + #[cfg(feature = "gles")] + Backend::Gl => unconfigure(self, surface.gl.as_ref().unwrap(), &present), + _ => unreachable!(), + } + } + + self.instance.destroy_surface(surface); } else { panic!("Surface cannot be destroyed because is still in use"); } From 2c09b194b0c37d0988b3f721e122800019fcb29d Mon Sep 17 00:00:00 2001 From: Mauro Gentile Date: Fri, 7 Apr 2023 19:30:49 +0200 Subject: [PATCH 013/132] Fixing doc, fmt and gles --- wgpu-core/src/device/queue.rs | 27 +++-- wgpu-core/src/hub.rs | 46 +-------- wgpu-hal/src/gles/web.rs | 185 +++++++++++++++++++--------------- wgpu-hal/src/lib.rs | 2 +- 4 files changed, 123 insertions(+), 137 deletions(-) diff --git a/wgpu-core/src/device/queue.rs b/wgpu-core/src/device/queue.rs index 5d26bd6b65..725bacc0d0 100644 --- a/wgpu-core/src/device/queue.rs +++ b/wgpu-core/src/device/queue.rs @@ -913,6 +913,7 @@ impl Global { let src_width = source.source.width(); let src_height = source.source.height(); + let texture_guard = hub.textures.read(); let dst = hub.textures.get(destination.texture).unwrap(); if !conv::is_valid_external_image_copy_dst_texture_format(dst.desc.format) { @@ -985,10 +986,11 @@ impl Global { )?; let (selector, dst_base) = - extract_texture_selector(&destination.to_untagged(), &size, dst)?; + extract_texture_selector(&destination.to_untagged(), &size, &dst)?; let mut trackers = device.trackers.lock(); - let encoder = device.pending_writes.lock().activate(); + let mut pending_writes = device.pending_writes.lock(); + let encoder = pending_writes.as_mut().unwrap().activate(); // If the copy does not fully cover the layers, we need to initialize to // zero *first* as we don't keep track of partial texture layer inits. @@ -1001,7 +1003,7 @@ impl Global { } else { destination.origin.z..destination.origin.z + size.depth_or_array_layers }; - let dst_initialization_status = dst.initialization_status.write(); + let mut dst_initialization_status = dst.initialization_status.write(); if dst_initialization_status.mips[destination.mip_level as usize] .check(init_layer_range.clone()) .is_some() @@ -1021,7 +1023,7 @@ impl Global { encoder, &mut trackers.textures, &device.alignments, - &device.zero_buffer, + device.zero_buffer.as_ref().unwrap(), ) .map_err(QueueWriteError::from)?; } @@ -1031,24 +1033,27 @@ impl Global { } } - let dst = texture_guard.get(destination.texture).unwrap(); - let transitions = trackers .textures .set_single( - dst, + &dst, destination.texture, selector, hal::TextureUses::COPY_DST, ) .ok_or(TransferError::InvalidTexture(destination.texture))?; - dst.life_guard - .read() - .use_at(device.active_submission_index + 1); + dst.info.use_at( + device + .active_submission_index + .fetch_add(1, Ordering::Relaxed) + + 1, + ); let dst_raw = dst .inner + .as_ref() + .unwrap() .as_raw() .ok_or(TransferError::InvalidTexture(destination.texture))?; @@ -1064,7 +1069,7 @@ impl Global { }; unsafe { - encoder.transition_textures(transitions.map(|pending| pending.into_hal(dst))); + encoder.transition_textures(transitions.map(|pending| pending.into_hal(&dst))); encoder.copy_external_image_to_texture( source, dst_raw, diff --git a/wgpu-core/src/hub.rs b/wgpu-core/src/hub.rs index 0e26635022..09ef1006e5 100644 --- a/wgpu-core/src/hub.rs +++ b/wgpu-core/src/hub.rs @@ -208,53 +208,13 @@ impl HubReport { /// /// But most `wgpu` operations require access to several different /// kinds of resource, so you often need to hold locks on several -/// different fields of your [`Hub`] simultaneously. To avoid -/// deadlock, there is an ordering imposed on the fields, and you may -/// only acquire new locks on fields that come *after* all those you -/// are already holding locks on, in this ordering. (The ordering is -/// described in the documentation for the [`Access`] trait.) +/// different fields of your [`Hub`] simultaneously. /// -/// We use Rust's type system to statically check that `wgpu_core` can -/// only ever acquire locks in the correct order: +/// Inside the `Registry` there are `Arc` where `T` is a Resource +/// Lock of `Registry` happens only when accessing to get the specific resource /// -/// - A value of type [`Token`] represents proof that the owner -/// only holds locks on the `Hub` fields holding resources of type -/// `T` or earlier in the lock ordering. A special value of type -/// `Token`, obtained by calling [`Token::root`], represents -/// proof that no `Hub` field locks are held. -/// -/// - To lock the `Hub` field holding resources of type `T`, you must -/// call its [`read`] or [`write`] methods. These require you to -/// pass in a `&mut Token`, for some `A` that implements -/// [`Access`]. This implementation exists only if `T` follows `A` -/// in the field ordering, which statically ensures that you are -/// indeed allowed to lock this new `Hub` field. -/// -/// - The locking methods return both an [`RwLock`] guard that you can -/// use to access the field's resources, and a new `Token` value. -/// These both borrow from the lifetime of your `Token`, so since -/// you passed that by mutable reference, you cannot access it again -/// until you drop the new token and lock guard. -/// -/// Because a thread only ever has access to the `Token` for the -/// last resource type `T` it holds a lock for, and the `Access` trait -/// implementations only permit acquiring locks for types `U` that -/// follow `T` in the lock ordering, it is statically impossible for a -/// program to violate the locking order. -/// -/// This does assume that threads cannot call `Token` when they -/// already hold locks (dynamically enforced in debug builds) and that -/// threads cannot send their `Token`s to other threads (enforced by -/// making `Token` neither `Send` nor `Sync`). /// /// [`A::hub(global)`]: HalApi::hub -/// [`RwLock`]: parking_lot::RwLock -/// [`buffers`]: Hub::buffers -/// [`read`]: Registry::read -/// [`write`]: Registry::write -/// [`Token`]: Token -/// [`Access`]: Access -/// [#2272]: https://github.com/gfx-rs/wgpu/pull/2272 pub struct Hub { pub adapters: Registry, F>, pub devices: Registry, F>, diff --git a/wgpu-hal/src/gles/web.rs b/wgpu-hal/src/gles/web.rs index d7ecfa6932..9bd99d7255 100644 --- a/wgpu-hal/src/gles/web.rs +++ b/wgpu-hal/src/gles/web.rs @@ -1,5 +1,5 @@ use glow::HasContext; -use parking_lot::Mutex; +use parking_lot::{Mutex, RwLock}; use wasm_bindgen::JsCast; use super::TextureFormatDesc; @@ -85,9 +85,9 @@ impl Instance { Ok(Surface { webgl2_context, - srgb_present_program: None, + srgb_present_program: Mutex::new(None), swapchain: RwLock::new(None), - texture: None, + texture: Mutex::new(None), presentable: true, }) } @@ -159,13 +159,25 @@ impl crate::Instance for Instance { } } -#[derive(Clone, Debug)] +#[derive(Debug)] pub struct Surface { webgl2_context: web_sys::WebGl2RenderingContext, pub(super) swapchain: RwLock>, - texture: Option, + texture: Mutex>, pub(super) presentable: bool, - srgb_present_program: Option, + srgb_present_program: Mutex>, +} + +impl Clone for Surface { + fn clone(&self) -> Self { + Self { + webgl2_context: self.webgl2_context.clone(), + swapchain: RwLock::new(self.swapchain.read().clone()), + texture: Mutex::new(*self.texture.lock()), + presentable: self.presentable, + srgb_present_program: Mutex::new(*self.srgb_present_program.lock()), + } + } } // SAFE: Because web doesn't have threads ( yet ) @@ -187,13 +199,10 @@ impl Surface { _suf_texture: super::Texture, gl: &glow::Context, ) -> Result<(), crate::SurfaceError> { - let swapchain = self - .swapchain - .read() - .as_ref() - .ok_or(crate::SurfaceError::Other( - "need to configure surface before presenting", - ))?; + let swapchain = self.swapchain.read(); + let swapchain = swapchain.as_ref().ok_or(crate::SurfaceError::Other( + "need to configure surface before presenting", + ))?; if swapchain.format.is_srgb() { // Important to set the viewport since we don't know in what state the user left it. @@ -208,8 +217,8 @@ impl Surface { unsafe { gl.bind_framebuffer(glow::DRAW_FRAMEBUFFER, None) }; unsafe { gl.bind_sampler(0, None) }; unsafe { gl.active_texture(glow::TEXTURE0) }; - unsafe { gl.bind_texture(glow::TEXTURE_2D, self.texture) }; - unsafe { gl.use_program(self.srgb_present_program) }; + unsafe { gl.bind_texture(glow::TEXTURE_2D, *self.texture.lock()) }; + unsafe { gl.use_program(*self.srgb_present_program.lock()) }; unsafe { gl.disable(glow::DEPTH_TEST) }; unsafe { gl.disable(glow::STENCIL_TEST) }; unsafe { gl.disable(glow::SCISSOR_TEST) }; @@ -276,83 +285,94 @@ impl crate::Surface for Surface { ) -> Result<(), crate::SurfaceError> { let gl = &device.shared.context.lock(); - if let Some(swapchain) = self.swapchain.write().take() { - // delete all frame buffers already allocated - unsafe { gl.delete_framebuffer(swapchain.framebuffer) }; + { + let mut swapchain = self.swapchain.write(); + if let Some(swapchain) = swapchain.take() { + // delete all frame buffers already allocated + unsafe { gl.delete_framebuffer(swapchain.framebuffer) }; + } } - - if self.srgb_present_program.is_none() && config.format.is_srgb() { - self.srgb_present_program = Some(unsafe { Self::create_srgb_present_program(gl) }); + { + let mut srgb_present_program = self.srgb_present_program.lock(); + if srgb_present_program.is_none() && config.format.is_srgb() { + *srgb_present_program = Some(unsafe { Self::create_srgb_present_program(gl) }); + } } + { + let mut texture = self.texture.lock(); + if let Some(texture) = texture.take() { + unsafe { gl.delete_texture(texture) }; + } - if let Some(texture) = self.texture.take() { - unsafe { gl.delete_texture(texture) }; - } + *texture = Some(unsafe { gl.create_texture() }.map_err(|error| { + log::error!("Internal swapchain texture creation failed: {error}"); + crate::DeviceError::OutOfMemory + })?); - self.texture = Some(unsafe { gl.create_texture() }.map_err(|error| { - log::error!("Internal swapchain texture creation failed: {error}"); - crate::DeviceError::OutOfMemory - })?); - - let desc = device.shared.describe_texture_format(config.format); - unsafe { gl.bind_texture(glow::TEXTURE_2D, self.texture) }; - unsafe { - gl.tex_parameter_i32( - glow::TEXTURE_2D, - glow::TEXTURE_MIN_FILTER, - glow::NEAREST as _, - ) - }; - unsafe { - gl.tex_parameter_i32( - glow::TEXTURE_2D, - glow::TEXTURE_MAG_FILTER, - glow::NEAREST as _, - ) - }; - unsafe { - gl.tex_storage_2d( - glow::TEXTURE_2D, - 1, - desc.internal, - config.extent.width as i32, - config.extent.height as i32, - ) - }; + let desc = device.shared.describe_texture_format(config.format); + unsafe { gl.bind_texture(glow::TEXTURE_2D, *texture) }; + unsafe { + gl.tex_parameter_i32( + glow::TEXTURE_2D, + glow::TEXTURE_MIN_FILTER, + glow::NEAREST as _, + ) + }; + unsafe { + gl.tex_parameter_i32( + glow::TEXTURE_2D, + glow::TEXTURE_MAG_FILTER, + glow::NEAREST as _, + ) + }; + unsafe { + gl.tex_storage_2d( + glow::TEXTURE_2D, + 1, + desc.internal, + config.extent.width as i32, + config.extent.height as i32, + ) + }; - let framebuffer = unsafe { gl.create_framebuffer() }.map_err(|error| { - log::error!("Internal swapchain framebuffer creation failed: {error}"); - crate::DeviceError::OutOfMemory - })?; - unsafe { gl.bind_framebuffer(glow::READ_FRAMEBUFFER, Some(framebuffer)) }; - unsafe { - gl.framebuffer_texture_2d( - glow::READ_FRAMEBUFFER, - glow::COLOR_ATTACHMENT0, - glow::TEXTURE_2D, - self.texture, - 0, - ) - }; - unsafe { gl.bind_texture(glow::TEXTURE_2D, None) }; + let framebuffer = unsafe { gl.create_framebuffer() }.map_err(|error| { + log::error!("Internal swapchain framebuffer creation failed: {error}"); + crate::DeviceError::OutOfMemory + })?; + unsafe { gl.bind_framebuffer(glow::READ_FRAMEBUFFER, Some(framebuffer)) }; + unsafe { + gl.framebuffer_texture_2d( + glow::READ_FRAMEBUFFER, + glow::COLOR_ATTACHMENT0, + glow::TEXTURE_2D, + *texture, + 0, + ) + }; + unsafe { gl.bind_texture(glow::TEXTURE_2D, None) }; + + let mut swapchain = self.swapchain.write(); + *swapchain = Some(Swapchain { + extent: config.extent, + // channel: config.format.base_format().1, + format: config.format, + format_desc: desc, + framebuffer, + }); + } - let mut swapchain = self.swapchain.write(); - *swapchain = Some(Swapchain { - extent: config.extent, - // channel: config.format.base_format().1, - format: config.format, - format_desc: desc, - framebuffer, - }); Ok(()) } unsafe fn unconfigure(&self, device: &super::Device) { let gl = device.shared.context.lock(); - if let Some(swapchain) = self.swapchain.write().take() { - unsafe { gl.delete_framebuffer(swapchain.framebuffer) }; + { + let mut swapchain = self.swapchain.write(); + if let Some(swapchain) = swapchain.take() { + unsafe { gl.delete_framebuffer(swapchain.framebuffer) }; + } } - if let Some(renderbuffer) = self.texture.take() { + if let Some(renderbuffer) = self.texture.lock().take() { unsafe { gl.delete_texture(renderbuffer) }; } } @@ -361,10 +381,11 @@ impl crate::Surface for Surface { &self, _timeout_ms: Option, //TODO ) -> Result>, crate::SurfaceError> { - let sc = self.swapchain.read().as_ref().unwrap(); + let swapchain = self.swapchain.read(); + let sc = swapchain.as_ref().unwrap(); let texture = super::Texture { inner: super::TextureInner::Texture { - raw: self.texture.unwrap(), + raw: self.texture.lock().unwrap(), target: glow::TEXTURE_2D, }, drop_guard: None, diff --git a/wgpu-hal/src/lib.rs b/wgpu-hal/src/lib.rs index 2004fc56a3..113c24dbb2 100644 --- a/wgpu-hal/src/lib.rs +++ b/wgpu-hal/src/lib.rs @@ -87,7 +87,7 @@ pub mod api { use std::{ borrow::{Borrow, Cow}, fmt, - num::{NonZeroU32}, + num::NonZeroU32, ops::{Range, RangeInclusive}, ptr::NonNull, sync::atomic::AtomicBool, From fd92db5ef4e22bac0f175285b1be328e2f1e9246 Mon Sep 17 00:00:00 2001 From: Mauro Gentile Date: Fri, 7 Apr 2023 19:44:45 +0200 Subject: [PATCH 014/132] Missing doc changes --- wgpu-core/src/hub.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/wgpu-core/src/hub.rs b/wgpu-core/src/hub.rs index 09ef1006e5..7cfce75cad 100644 --- a/wgpu-core/src/hub.rs +++ b/wgpu-core/src/hub.rs @@ -191,7 +191,7 @@ impl HubReport { } #[allow(rustdoc::private_intra_doc_links)] -/// All the resources for a particular backend in a [`Global`]. +/// All the resources for a particular backend in a [`crate::global::Global`]. /// /// To obtain `global`'s `Hub` for some [`HalApi`] backend type `A`, /// call [`A::hub(global)`]. @@ -199,9 +199,9 @@ impl HubReport { /// ## Locking /// /// Each field in `Hub` is a [`Registry`] holding all the values of a -/// particular type of resource, all protected by a single [`RwLock`]. +/// particular type of resource, all protected by a single RwLock. /// So for example, to access any [`Buffer`], you must acquire a read -/// lock on the `Hub`s entire [`buffers`] registry. The lock guard +/// lock on the `Hub`s entire buffers registry. The lock guard /// gives you access to the `Registry`'s [`Storage`], which you can /// then index with the buffer's id. (Yes, this design causes /// contention; see [#2272].) From b6a5d2a7fde37b0d8e4dd5d2e65b0c689838a08f Mon Sep 17 00:00:00 2001 From: Mauro Gentile Date: Fri, 7 Apr 2023 19:53:15 +0200 Subject: [PATCH 015/132] Added _ due to non-exauhstive pattern addition --- deno_webgpu/error.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/deno_webgpu/error.rs b/deno_webgpu/error.rs index 17ff7c1ef4..8defdccc39 100644 --- a/deno_webgpu/error.rs +++ b/deno_webgpu/error.rs @@ -105,6 +105,7 @@ impl From for WebGpuError { DeviceError::Lost => WebGpuError::Lost, DeviceError::OutOfMemory => WebGpuError::OutOfMemory, DeviceError::Invalid => WebGpuError::Validation(fmt_err(&err)), + _ => WebGpuError::Validation(fmt_err(&err)), } } } From ea3120e933175e51724ae4b9db95232e0ca299b4 Mon Sep 17 00:00:00 2001 From: grovesNL Date: Sat, 8 Apr 2023 00:44:00 -0230 Subject: [PATCH 016/132] Avoid using `WasmAbi` functions on WebGPU backend --- wgpu/src/backend/direct.rs | 12 +- wgpu/src/backend/web.rs | 980 +++++++++++++++++++------------------ wgpu/src/context.rs | 31 +- wgpu/src/lib.rs | 14 +- 4 files changed, 539 insertions(+), 498 deletions(-) diff --git a/wgpu/src/backend/direct.rs b/wgpu/src/backend/direct.rs index 028bdaebe9..d79c11f8f0 100644 --- a/wgpu/src/backend/direct.rs +++ b/wgpu/src/backend/direct.rs @@ -2235,13 +2235,15 @@ impl crate::Context for Context { } } - fn queue_submit>( + fn queue_submit>( &self, queue: &Self::QueueId, _queue_data: &Self::QueueData, command_buffers: I, ) -> (Self::SubmissionIndex, Self::SubmissionIndexData) { - let temp_command_buffers = command_buffers.collect::>(); + let temp_command_buffers = command_buffers + .map(|(i, _)| i) + .collect::>(); let global = &self.0; let index = match wgc::gfx_select!(*queue => global.queue_submit(*queue, &temp_command_buffers)) @@ -2924,9 +2926,11 @@ impl crate::Context for Context { &self, _pass: &mut Self::RenderPassId, pass_data: &mut Self::RenderPassData, - render_bundles: Box + 'a>, + render_bundles: Box< + dyn Iterator + 'a, + >, ) { - let temp_render_bundles = render_bundles.collect::>(); + let temp_render_bundles = render_bundles.map(|(i, _)| i).collect::>(); unsafe { wgpu_render_pass_execute_bundles( pass_data, diff --git a/wgpu/src/backend/web.rs b/wgpu/src/backend/web.rs index f6f465c46d..ed2152414e 100644 --- a/wgpu/src/backend/web.rs +++ b/wgpu/src/backend/web.rs @@ -6,30 +6,27 @@ use std::{ cell::RefCell, fmt, future::Future, + marker::PhantomData, ops::Range, pin::Pin, rc::Rc, task::{self, Poll}, }; -use wasm_bindgen::{ - convert::{FromWasmAbi, IntoWasmAbi}, - prelude::*, - JsCast, -}; +use wasm_bindgen::{prelude::*, JsCast}; use crate::{ - context::{ObjectId, QueueWriteBuffer, Unused}, + context::{downcast_ref, ObjectId, QueueWriteBuffer, Unused}, UncapturedErrorHandler, }; -fn create_identified(value: T) -> Identified { +fn create_identified(value: T) -> (Identified, Sendable) { cfg_if::cfg_if! { if #[cfg(feature = "expose-ids")] { static NEXT_ID: std::sync::atomic::AtomicU64 = std::sync::atomic::AtomicU64::new(1); let id = NEXT_ID.fetch_add(1, std::sync::atomic::Ordering::Relaxed); - Identified(value, core::num::NonZeroU64::new(id).unwrap()) + (Identified(core::num::NonZeroU64::new(id).unwrap(), PhantomData), Sendable(value)) } else { - Identified(value) + (Identified(PhantomData), Sendable(value)) } } } @@ -42,42 +39,53 @@ fn create_identified(value: T) -> Identified { // type is (for now) harmless. Eventually wasm32 will support threading, and depending on how this // is integrated (or not integrated) with values like those in webgpu, this may become unsound. -impl + JsCast> From for Identified { +#[allow(unused_variables)] +impl From for Identified { fn from(object_id: ObjectId) -> Self { - let id = object_id.id().get() as u32; - // SAFETY: wasm_bindgen says an ABI representation may only be cast to a wrapper type if it was created - // using into_abi. - // - // This assumption we sadly have to assume to prevent littering the code with unsafe blocks. - let wasm = unsafe { JsValue::from_abi(id) }; - wgt::strict_assert!(wasm.is_instance_of::()); - // SAFETY: The ABI of the type must be a u32, and strict asserts ensure the right type is used. Self( - wasm.unchecked_into(), #[cfg(feature = "expose-ids")] object_id.global_id(), + PhantomData, ) } } -impl> From> for ObjectId { +#[allow(unused_variables)] +impl From> for ObjectId { fn from(identified: Identified) -> Self { - let id = core::num::NonZeroU64::new(identified.0.into_abi() as u64).unwrap(); Self::new( - id, + // TODO: the ID isn't used, so we hardcode it to 1 for now until we rework this + // API. + core::num::NonZeroU64::new(1).unwrap(), #[cfg(feature = "expose-ids")] identified.1, ) } } +#[allow(unused_variables)] +impl From<(Identified, Sendable)> for ObjectId { + fn from((id, _data): (Identified, Sendable)) -> Self { + Self::new( + // TODO: the ID isn't used, so we hardcode it to 1 for now until we rework this + // API. + core::num::NonZeroU64::new(1).unwrap(), + #[cfg(feature = "expose-ids")] + id.1, + ) + } +} + #[derive(Clone, Debug)] pub(crate) struct Sendable(T); unsafe impl Send for Sendable {} unsafe impl Sync for Sendable {} #[derive(Clone, Debug)] -pub(crate) struct Identified(T, #[cfg(feature = "expose-ids")] std::num::NonZeroU64); +pub(crate) struct Identified( + #[cfg(feature = "expose-ids")] std::num::NonZeroU64, + PhantomData, +); unsafe impl Send for Identified {} unsafe impl Sync for Identified {} @@ -457,8 +465,8 @@ fn map_texture_view_dimension( } fn map_buffer_copy_view(view: crate::ImageCopyBuffer) -> web_sys::GpuImageCopyBuffer { - let buffer = &<::BufferId>::from(view.buffer.id).0; - let mut mapped = web_sys::GpuImageCopyBuffer::new(buffer); + let buffer: &::BufferData = downcast_ref(view.buffer.data.as_ref()); + let mut mapped = web_sys::GpuImageCopyBuffer::new(&buffer.0); if let Some(bytes_per_row) = view.layout.bytes_per_row { mapped.bytes_per_row(bytes_per_row); } @@ -470,8 +478,9 @@ fn map_buffer_copy_view(view: crate::ImageCopyBuffer) -> web_sys::GpuImageCopyBu } fn map_texture_copy_view(view: crate::ImageCopyTexture) -> web_sys::GpuImageCopyTexture { - let texture = &<::TextureId>::from(view.texture.id).0; - let mut mapped = web_sys::GpuImageCopyTexture::new(texture); + let texture: &::TextureData = + downcast_ref(view.texture.data.as_ref()); + let mut mapped = web_sys::GpuImageCopyTexture::new(&texture.0); mapped.mip_level(view.mip_level); mapped.origin(&map_origin_3d(view.origin)); mapped @@ -480,8 +489,9 @@ fn map_texture_copy_view(view: crate::ImageCopyTexture) -> web_sys::GpuImageCopy fn map_tagged_texture_copy_view( view: crate::ImageCopyTextureTagged, ) -> web_sys::GpuImageCopyTextureTagged { - let texture = &<::TextureId>::from(view.texture.id).0; - let mut mapped = web_sys::GpuImageCopyTextureTagged::new(texture); + let texture: &::TextureData = + downcast_ref(view.texture.data.as_ref()); + let mut mapped = web_sys::GpuImageCopyTextureTagged::new(&texture.0); mapped.mip_level(view.mip_level); mapped.origin(&map_origin_3d(view.origin)); mapped.aspect(map_texture_aspect(view.aspect)); @@ -598,9 +608,14 @@ fn map_wgt_features(supported_features: web_sys::GpuSupportedFeatures) -> wgt::F type JsFutureResult = Result; -fn future_request_adapter(result: JsFutureResult) -> Option<(Identified, ())> { +fn future_request_adapter( + result: JsFutureResult, +) -> Option<( + Identified, + Sendable, +)> { match result.and_then(wasm_bindgen::JsCast::dyn_into) { - Ok(adapter) => Some((create_identified(adapter), ())), + Ok(adapter) => Some(create_identified(adapter)), Err(_) => None, } } @@ -610,23 +625,18 @@ fn future_request_device( ) -> Result< ( Identified, - (), + Sendable, Identified, - (), + Sendable, ), crate::RequestDeviceError, > { result .map(|js_value| { - let device_id = web_sys::GpuDevice::from(js_value); - let queue_id = device_id.queue(); + let (device_id, device_data) = create_identified(web_sys::GpuDevice::from(js_value)); + let (queue_id, queue_data) = create_identified(device_data.0.queue()); - ( - create_identified(device_id), - (), - create_identified(queue_id), - (), - ) + (device_id, device_data, queue_id, queue_data) }) .map_err(|_| crate::RequestDeviceError) } @@ -687,14 +697,26 @@ impl Context { pub fn instance_create_surface_from_canvas( &self, canvas: &web_sys::HtmlCanvasElement, - ) -> Result<::SurfaceId, crate::CreateSurfaceError> { + ) -> Result< + ( + ::SurfaceId, + ::SurfaceData, + ), + crate::CreateSurfaceError, + > { self.create_surface_from_context(canvas.get_context("webgpu")) } pub fn instance_create_surface_from_offscreen_canvas( &self, canvas: &web_sys::OffscreenCanvas, - ) -> Result<::SurfaceId, crate::CreateSurfaceError> { + ) -> Result< + ( + ::SurfaceId, + ::SurfaceData, + ), + crate::CreateSurfaceError, + > { self.create_surface_from_context(canvas.get_context("webgpu")) } @@ -705,7 +727,13 @@ impl Context { fn create_surface_from_context( &self, context_result: Result, wasm_bindgen::JsValue>, - ) -> Result<::SurfaceId, crate::CreateSurfaceError> { + ) -> Result< + ( + ::SurfaceId, + ::SurfaceData, + ), + crate::CreateSurfaceError, + > { let context: js_sys::Object = match context_result { Ok(Some(context)) => context, Ok(None) => { @@ -752,47 +780,47 @@ extern "C" { impl crate::context::Context for Context { type AdapterId = Identified; - type AdapterData = (); + type AdapterData = Sendable; type DeviceId = Identified; - type DeviceData = (); + type DeviceData = Sendable; type QueueId = Identified; - type QueueData = (); + type QueueData = Sendable; type ShaderModuleId = Identified; - type ShaderModuleData = (); + type ShaderModuleData = Sendable; type BindGroupLayoutId = Identified; - type BindGroupLayoutData = (); + type BindGroupLayoutData = Sendable; type BindGroupId = Identified; - type BindGroupData = (); + type BindGroupData = Sendable; type TextureViewId = Identified; - type TextureViewData = (); + type TextureViewData = Sendable; type SamplerId = Identified; - type SamplerData = (); + type SamplerData = Sendable; type BufferId = Identified; - type BufferData = (); + type BufferData = Sendable; type TextureId = Identified; - type TextureData = (); + type TextureData = Sendable; type QuerySetId = Identified; - type QuerySetData = (); + type QuerySetData = Sendable; type PipelineLayoutId = Identified; - type PipelineLayoutData = (); + type PipelineLayoutData = Sendable; type RenderPipelineId = Identified; - type RenderPipelineData = (); + type RenderPipelineData = Sendable; type ComputePipelineId = Identified; - type ComputePipelineData = (); + type ComputePipelineData = Sendable; type CommandEncoderId = Identified; - type CommandEncoderData = (); + type CommandEncoderData = Sendable; type ComputePassId = Identified; - type ComputePassData = (); + type ComputePassData = Sendable; type RenderPassId = Identified; - type RenderPassData = (); + type RenderPassData = Sendable; type CommandBufferId = Identified; - type CommandBufferData = (); + type CommandBufferData = Sendable; type RenderBundleEncoderId = Identified; - type RenderBundleEncoderData = (); + type RenderBundleEncoderData = Sendable; type RenderBundleId = Identified; - type RenderBundleData = (); + type RenderBundleData = Sendable; type SurfaceId = Identified; - type SurfaceData = (); + type SurfaceData = Sendable; type SurfaceOutputDetail = SurfaceOutputDetail; type SubmissionIndex = Unused; @@ -855,10 +883,7 @@ impl crate::context::Context for Context { .expect("expected to find single canvas") .into(); let canvas_element: web_sys::HtmlCanvasElement = canvas_node.into(); - Ok(( - self.instance_create_surface_from_canvas(&canvas_element)?, - (), - )) + Ok(self.instance_create_surface_from_canvas(&canvas_element)?) } fn instance_request_adapter( @@ -885,8 +910,8 @@ impl crate::context::Context for Context { fn adapter_request_device( &self, - adapter: &Self::AdapterId, - _adapter_data: &Self::AdapterData, + _adapter: &Self::AdapterId, + adapter_data: &Self::AdapterData, desc: &crate::DeviceDescriptor, trace_dir: Option<&std::path::Path>, ) -> Self::RequestDeviceFuture { @@ -914,7 +939,7 @@ impl crate::context::Context for Context { mapped_desc.label(label); } - let device_promise = adapter.0.request_device_with_descriptor(&mapped_desc); + let device_promise = adapter_data.0.request_device_with_descriptor(&mapped_desc); MakeSendFuture::new( wasm_bindgen_futures::JsFuture::from(device_promise), @@ -939,18 +964,18 @@ impl crate::context::Context for Context { fn adapter_features( &self, - adapter: &Self::AdapterId, - _adapter_data: &Self::AdapterData, + _adapter: &Self::AdapterId, + adapter_data: &Self::AdapterData, ) -> wgt::Features { - map_wgt_features(adapter.0.features()) + map_wgt_features(adapter_data.0.features()) } fn adapter_limits( &self, - adapter: &Self::AdapterId, - _adapter_data: &Self::AdapterData, + _adapter: &Self::AdapterId, + adapter_data: &Self::AdapterData, ) -> wgt::Limits { - let limits = adapter.0.limits(); + let limits = adapter_data.0.limits(); wgt::Limits { max_texture_dimension_1d: limits.max_texture_dimension_1d(), max_texture_dimension_2d: limits.max_texture_dimension_2d(), @@ -1022,9 +1047,9 @@ impl crate::context::Context for Context { fn surface_get_capabilities( &self, _surface: &Self::SurfaceId, - _adapter_data: &Self::AdapterData, - _adapter: &Self::AdapterId, _surface_data: &Self::SurfaceData, + _adapter: &Self::AdapterId, + _adapter_data: &Self::AdapterData, ) -> wgt::SurfaceCapabilities { wgt::SurfaceCapabilities { // https://gpuweb.github.io/gpuweb/#supported-context-formats @@ -1041,10 +1066,10 @@ impl crate::context::Context for Context { fn surface_configure( &self, - surface: &Self::SurfaceId, - _surface_data: &Self::SurfaceData, - device: &Self::DeviceId, - _device_data: &Self::DeviceData, + _surface: &Self::SurfaceId, + surface_data: &Self::SurfaceData, + _device: &Self::DeviceId, + device_data: &Self::DeviceData, config: &crate::SurfaceConfiguration, ) { if let wgt::PresentMode::Mailbox | wgt::PresentMode::Immediate = config.present_mode { @@ -1060,7 +1085,7 @@ impl crate::context::Context for Context { _ => web_sys::GpuCanvasAlphaMode::Opaque, }; let mut mapped = - web_sys::GpuCanvasConfiguration::new(&device.0, map_texture_format(config.format)); + web_sys::GpuCanvasConfiguration::new(&device_data.0, map_texture_format(config.format)); mapped.usage(config.usage.bits()); mapped.alpha_mode(alpha_mode); let mapped_view_formats = config @@ -1069,22 +1094,23 @@ impl crate::context::Context for Context { .map(|format| JsValue::from(map_texture_format(*format))) .collect::(); mapped.view_formats(&mapped_view_formats); - surface.0.configure(&mapped); + surface_data.0.configure(&mapped); } fn surface_get_current_texture( &self, - surface: &Self::SurfaceId, - _surface_data: &Self::SurfaceData, + _surface: &Self::SurfaceId, + surface_data: &Self::SurfaceData, ) -> ( Option, Option, wgt::SurfaceStatus, Self::SurfaceOutputDetail, ) { + let (surface_id, surface_data) = create_identified(surface_data.0.get_current_texture()); ( - Some(create_identified(surface.0.get_current_texture())), - Some(()), + Some(surface_id), + Some(surface_data), wgt::SurfaceStatus::Good, (), ) @@ -1104,10 +1130,10 @@ impl crate::context::Context for Context { fn device_features( &self, - device: &Self::DeviceId, - _device_data: &Self::DeviceData, + _device: &Self::DeviceId, + device_data: &Self::DeviceData, ) -> wgt::Features { - map_wgt_features(device.0.features()) + map_wgt_features(device_data.0.features()) } fn device_limits( @@ -1139,8 +1165,8 @@ impl crate::context::Context for Context { )] fn device_create_shader_module( &self, - device: &Self::DeviceId, - _device_data: &Self::DeviceData, + _device: &Self::DeviceId, + device_data: &Self::DeviceData, desc: crate::ShaderModuleDescriptor, _shader_bound_checks: wgt::ShaderBoundChecks, ) -> (Self::ShaderModuleId, Self::ShaderModuleData) { @@ -1220,10 +1246,7 @@ impl crate::context::Context for Context { if let Some(label) = desc.label { descriptor.label(label); } - ( - create_identified(device.0.create_shader_module(&descriptor)), - (), - ) + create_identified(device_data.0.create_shader_module(&descriptor)) } unsafe fn device_create_shader_module_spirv( @@ -1237,8 +1260,8 @@ impl crate::context::Context for Context { fn device_create_bind_group_layout( &self, - device: &Self::DeviceId, - _device_data: &Self::DeviceData, + _device: &Self::DeviceId, + device_data: &Self::DeviceData, desc: &crate::BindGroupLayoutDescriptor, ) -> (Self::BindGroupLayoutId, Self::BindGroupLayoutData) { let mapped_bindings = desc @@ -1331,16 +1354,13 @@ impl crate::context::Context for Context { if let Some(label) = desc.label { mapped_desc.label(label); } - ( - create_identified(device.0.create_bind_group_layout(&mapped_desc)), - (), - ) + create_identified(device_data.0.create_bind_group_layout(&mapped_desc)) } fn device_create_bind_group( &self, - device: &Self::DeviceId, - _device_data: &Self::DeviceData, + _device: &Self::DeviceId, + device_data: &Self::DeviceData, desc: &crate::BindGroupDescriptor, ) -> (Self::BindGroupId, Self::BindGroupData) { let mapped_entries = desc @@ -1353,8 +1373,9 @@ impl crate::context::Context for Context { offset, size, }) => { - let buffer = &<::BufferId>::from(buffer.id).0; - let mut mapped_buffer_binding = web_sys::GpuBufferBinding::new(buffer); + let buffer: &::BufferData = + downcast_ref(buffer.data.as_ref()); + let mut mapped_buffer_binding = web_sys::GpuBufferBinding::new(&buffer.0); mapped_buffer_binding.offset(offset as f64); if let Some(s) = size { mapped_buffer_binding.size(s.get() as f64); @@ -1365,13 +1386,17 @@ impl crate::context::Context for Context { panic!("Web backend does not support arrays of buffers") } crate::BindingResource::Sampler(sampler) => { - JsValue::from(Self::SamplerId::from(sampler.id).0) + let sampler: &::SamplerData = + downcast_ref(sampler.data.as_ref()); + JsValue::from(&sampler.0) } crate::BindingResource::SamplerArray(..) => { panic!("Web backend does not support arrays of samplers") } crate::BindingResource::TextureView(texture_view) => { - JsValue::from(Self::TextureViewId::from(texture_view.id).0) + let texture_view: &::TextureViewData = + downcast_ref(texture_view.data.as_ref()); + JsValue::from(&texture_view.0) } crate::BindingResource::TextureViewArray(..) => { panic!("Web backend does not support BINDING_INDEXING extension") @@ -1382,46 +1407,47 @@ impl crate::context::Context for Context { }) .collect::(); - let bgl = &<::BindGroupLayoutId>::from(desc.layout.id).0; - let mut mapped_desc = web_sys::GpuBindGroupDescriptor::new(&mapped_entries, bgl); + let bgl: &::BindGroupLayoutData = + downcast_ref(desc.layout.data.as_ref()); + let mut mapped_desc = web_sys::GpuBindGroupDescriptor::new(&mapped_entries, &bgl.0); if let Some(label) = desc.label { mapped_desc.label(label); } - ( - create_identified(device.0.create_bind_group(&mapped_desc)), - (), - ) + create_identified(device_data.0.create_bind_group(&mapped_desc)) } fn device_create_pipeline_layout( &self, - device: &Self::DeviceId, - _device_data: &Self::DeviceData, + _device: &Self::DeviceId, + device_data: &Self::DeviceData, desc: &crate::PipelineLayoutDescriptor, ) -> (Self::PipelineLayoutId, Self::PipelineLayoutData) { let temp_layouts = desc .bind_group_layouts .iter() - .map(|bgl| Self::BindGroupLayoutId::from(bgl.id).0) + .map(|bgl| { + let bgl: &::BindGroupLayoutData = + downcast_ref(bgl.data.as_ref()); + &bgl.0 + }) .collect::(); let mut mapped_desc = web_sys::GpuPipelineLayoutDescriptor::new(&temp_layouts); if let Some(label) = desc.label { mapped_desc.label(label); } - ( - create_identified(device.0.create_pipeline_layout(&mapped_desc)), - (), - ) + create_identified(device_data.0.create_pipeline_layout(&mapped_desc)) } fn device_create_render_pipeline( &self, - device: &Self::DeviceId, - _device_data: &Self::DeviceData, + _device: &Self::DeviceId, + device_data: &Self::DeviceData, desc: &crate::RenderPipelineDescriptor, ) -> (Self::RenderPipelineId, Self::RenderPipelineData) { - let module = &<::ShaderModuleId>::from(desc.vertex.module.id).0; - let mut mapped_vertex_state = web_sys::GpuVertexState::new(desc.vertex.entry_point, module); + let module: &::ShaderModuleData = + downcast_ref(desc.vertex.module.data.as_ref()); + let mut mapped_vertex_state = + web_sys::GpuVertexState::new(desc.vertex.entry_point, &module.0); let buffers = desc .vertex @@ -1454,7 +1480,11 @@ impl crate::context::Context for Context { let auto_layout = wasm_bindgen::JsValue::from(web_sys::GpuAutoLayoutMode::Auto); let mut mapped_desc = web_sys::GpuRenderPipelineDescriptor::new( &match desc.layout { - Some(layout) => JsValue::from(Self::PipelineLayoutId::from(layout.id).0), + Some(layout) => { + let layout: &::PipelineLayoutData = + downcast_ref(layout.data.as_ref()); + JsValue::from(&layout.0) + } None => auto_layout, }, &mapped_vertex_state, @@ -1489,9 +1519,10 @@ impl crate::context::Context for Context { None => wasm_bindgen::JsValue::null(), }) .collect::(); - let module = &<::ShaderModuleId>::from(frag.module.id).0; + let module: &::ShaderModuleData = + downcast_ref(frag.module.data.as_ref()); let mapped_fragment_desc = - web_sys::GpuFragmentState::new(frag.entry_point, module, &targets); + web_sys::GpuFragmentState::new(frag.entry_point, &module.0, &targets); mapped_desc.fragment(&mapped_fragment_desc); } @@ -1504,27 +1535,26 @@ impl crate::context::Context for Context { let mapped_primitive = map_primitive_state(&desc.primitive); mapped_desc.primitive(&mapped_primitive); - ( - create_identified(device.0.create_render_pipeline(&mapped_desc)), - (), - ) + create_identified(device_data.0.create_render_pipeline(&mapped_desc)) } fn device_create_compute_pipeline( &self, - device: &Self::DeviceId, - _device_data: &Self::DeviceData, + _device: &Self::DeviceId, + device_data: &Self::DeviceData, desc: &crate::ComputePipelineDescriptor, ) -> (Self::ComputePipelineId, Self::ComputePipelineData) { - let shader_module = &<::ShaderModuleId>::from(desc.module.id).0; + let shader_module: &::ShaderModuleData = + downcast_ref(desc.module.data.as_ref()); let mapped_compute_stage = - web_sys::GpuProgrammableStage::new(desc.entry_point, shader_module); + web_sys::GpuProgrammableStage::new(desc.entry_point, &shader_module.0); let auto_layout = wasm_bindgen::JsValue::from(web_sys::GpuAutoLayoutMode::Auto); let mut mapped_desc = web_sys::GpuComputePipelineDescriptor::new( &match desc.layout { Some(layout) => { - let layout = Self::PipelineLayoutId::from(layout.id); - JsValue::from(layout.0) + let layout: &::PipelineLayoutData = + downcast_ref(layout.data.as_ref()); + JsValue::from(&layout.0) } None => auto_layout, }, @@ -1533,16 +1563,13 @@ impl crate::context::Context for Context { if let Some(label) = desc.label { mapped_desc.label(label); } - ( - create_identified(device.0.create_compute_pipeline(&mapped_desc)), - (), - ) + create_identified(device_data.0.create_compute_pipeline(&mapped_desc)) } fn device_create_buffer( &self, - device: &Self::DeviceId, - _device_data: &Self::DeviceData, + _device: &Self::DeviceId, + device_data: &Self::DeviceData, desc: &crate::BufferDescriptor, ) -> (Self::BufferId, Self::BufferData) { let mut mapped_desc = @@ -1551,13 +1578,13 @@ impl crate::context::Context for Context { if let Some(label) = desc.label { mapped_desc.label(label); } - (create_identified(device.0.create_buffer(&mapped_desc)), ()) + create_identified(device_data.0.create_buffer(&mapped_desc)) } fn device_create_texture( &self, - device: &Self::DeviceId, - _device_data: &Self::DeviceData, + _device: &Self::DeviceId, + device_data: &Self::DeviceData, desc: &crate::TextureDescriptor, ) -> (Self::TextureId, Self::TextureData) { let mut mapped_desc = web_sys::GpuTextureDescriptor::new( @@ -1577,13 +1604,13 @@ impl crate::context::Context for Context { .map(|format| JsValue::from(map_texture_format(*format))) .collect::(); mapped_desc.view_formats(&mapped_view_formats); - (create_identified(device.0.create_texture(&mapped_desc)), ()) + create_identified(device_data.0.create_texture(&mapped_desc)) } fn device_create_sampler( &self, - device: &Self::DeviceId, - _device_data: &Self::DeviceData, + _device: &Self::DeviceId, + device_data: &Self::DeviceData, desc: &crate::SamplerDescriptor, ) -> (Self::SamplerId, Self::SamplerData) { let mut mapped_desc = web_sys::GpuSamplerDescriptor::new(); @@ -1603,16 +1630,13 @@ impl crate::context::Context for Context { if let Some(label) = desc.label { mapped_desc.label(label); } - ( - create_identified(device.0.create_sampler_with_descriptor(&mapped_desc)), - (), - ) + create_identified(device_data.0.create_sampler_with_descriptor(&mapped_desc)) } fn device_create_query_set( &self, - device: &Self::DeviceId, - _device_data: &Self::DeviceData, + _device: &Self::DeviceId, + device_data: &Self::DeviceData, desc: &wgt::QuerySetDescriptor, ) -> (Self::QuerySetId, Self::QuerySetData) { let ty = match desc.ty { @@ -1624,36 +1648,30 @@ impl crate::context::Context for Context { if let Some(label) = desc.label { mapped_desc.label(label); } - ( - create_identified(device.0.create_query_set(&mapped_desc)), - (), - ) + create_identified(device_data.0.create_query_set(&mapped_desc)) } fn device_create_command_encoder( &self, - device: &Self::DeviceId, - _device_data: &Self::DeviceData, + _device: &Self::DeviceId, + device_data: &Self::DeviceData, desc: &crate::CommandEncoderDescriptor, ) -> (Self::CommandEncoderId, Self::CommandEncoderData) { let mut mapped_desc = web_sys::GpuCommandEncoderDescriptor::new(); if let Some(label) = desc.label { mapped_desc.label(label); } - ( - create_identified( - device - .0 - .create_command_encoder_with_descriptor(&mapped_desc), - ), - (), + create_identified( + device_data + .0 + .create_command_encoder_with_descriptor(&mapped_desc), ) } fn device_create_render_bundle_encoder( &self, - device: &Self::DeviceId, - _device_data: &Self::DeviceData, + _device: &Self::DeviceId, + device_data: &Self::DeviceData, desc: &crate::RenderBundleEncoderDescriptor, ) -> (Self::RenderBundleEncoderId, Self::RenderBundleEncoderData) { let mapped_color_formats = desc @@ -1672,10 +1690,7 @@ impl crate::context::Context for Context { mapped_desc.depth_stencil_format(map_texture_format(ds.format)); } mapped_desc.sample_count(desc.sample_count); - ( - create_identified(device.0.create_render_bundle_encoder(&mapped_desc)), - (), - ) + create_identified(device_data.0.create_render_bundle_encoder(&mapped_desc)) } fn device_drop(&self, _device: &Self::DeviceId, _device_data: &Self::DeviceData) { @@ -1694,15 +1709,15 @@ impl crate::context::Context for Context { fn device_on_uncaptured_error( &self, - device: &Self::DeviceId, - _device_data: &Self::DeviceData, + _device: &Self::DeviceId, + device_data: &Self::DeviceData, handler: Box, ) { let f = Closure::wrap(Box::new(move |event: web_sys::GpuUncapturedErrorEvent| { let error = crate::Error::from_js(event.error().value_of()); handler(error); }) as Box); - device + device_data .0 .set_onuncapturederror(Some(f.as_ref().unchecked_ref())); // TODO: This will leak the memory associated with the error handler by default. @@ -1711,11 +1726,11 @@ impl crate::context::Context for Context { fn device_push_error_scope( &self, - device: &Self::DeviceId, - _device_data: &Self::DeviceData, + _device: &Self::DeviceId, + device_data: &Self::DeviceData, filter: crate::ErrorFilter, ) { - device.0.push_error_scope(match filter { + device_data.0.push_error_scope(match filter { crate::ErrorFilter::OutOfMemory => web_sys::GpuErrorFilter::OutOfMemory, crate::ErrorFilter::Validation => web_sys::GpuErrorFilter::Validation, }); @@ -1723,10 +1738,10 @@ impl crate::context::Context for Context { fn device_pop_error_scope( &self, - device: &Self::DeviceId, - _device_data: &Self::DeviceData, + _device: &Self::DeviceId, + device_data: &Self::DeviceData, ) -> Self::PopErrorScopeFuture { - let error_promise = device.0.pop_error_scope(); + let error_promise = device_data.0.pop_error_scope(); MakeSendFuture::new( wasm_bindgen_futures::JsFuture::from(error_promise), future_pop_error_scope, @@ -1735,13 +1750,13 @@ impl crate::context::Context for Context { fn buffer_map_async( &self, - buffer: &Self::BufferId, - _buffer_data: &Self::BufferData, + _buffer: &Self::BufferId, + buffer_data: &Self::BufferData, mode: crate::MapMode, range: Range, callback: Box) + Send + 'static>, ) { - let map_promise = buffer.0.map_async_with_f64_and_f64( + let map_promise = buffer_data.0.map_async_with_f64_and_f64( map_map_mode(mode), range.start as f64, (range.end - range.start) as f64, @@ -1752,11 +1767,11 @@ impl crate::context::Context for Context { fn buffer_get_mapped_range( &self, - buffer: &Self::BufferId, - _buffer_data: &Self::BufferData, + _buffer: &Self::BufferId, + buffer_data: &Self::BufferData, sub_range: Range, ) -> Box { - let array_buffer = buffer.0.get_mapped_range_with_f64_and_f64( + let array_buffer = buffer_data.0.get_mapped_range_with_f64_and_f64( sub_range.start as f64, (sub_range.end - sub_range.start) as f64, ); @@ -1768,14 +1783,14 @@ impl crate::context::Context for Context { }) } - fn buffer_unmap(&self, buffer: &Self::BufferId, _buffer_data: &Self::BufferData) { - buffer.0.unmap(); + fn buffer_unmap(&self, _buffer: &Self::BufferId, buffer_data: &Self::BufferData) { + buffer_data.0.unmap(); } fn texture_create_view( &self, - texture: &Self::TextureId, - _texture_data: &Self::TextureData, + _texture: &Self::TextureId, + texture_data: &Self::TextureData, desc: &crate::TextureViewDescriptor, ) -> (Self::TextureViewId, Self::TextureViewData) { let mut mapped = web_sys::GpuTextureViewDescriptor::new(); @@ -1797,10 +1812,7 @@ impl crate::context::Context for Context { if let Some(label) = desc.label { mapped.label(label); } - ( - create_identified(texture.0.create_view_with_descriptor(&mapped)), - (), - ) + create_identified(texture_data.0.create_view_with_descriptor(&mapped)) } fn surface_drop(&self, _surface: &Self::SurfaceId, _surface_data: &Self::SurfaceData) { @@ -1811,16 +1823,16 @@ impl crate::context::Context for Context { // Dropped automatically } - fn buffer_destroy(&self, buffer: &Self::BufferId, _buffer_data: &Self::BufferData) { - buffer.0.destroy(); + fn buffer_destroy(&self, _buffer: &Self::BufferId, buffer_data: &Self::BufferData) { + buffer_data.0.destroy(); } fn buffer_drop(&self, _buffer: &Self::BufferId, _buffer_data: &Self::BufferData) { // Dropped automatically } - fn texture_destroy(&self, texture: &Self::TextureId, _texture_data: &Self::TextureData) { - texture.0.destroy(); + fn texture_destroy(&self, _texture: &Self::TextureId, texture_data: &Self::TextureData) { + texture_data.0.destroy(); } fn texture_drop(&self, _texture: &Self::TextureId, _texture_data: &Self::TextureData) { @@ -1917,107 +1929,110 @@ impl crate::context::Context for Context { fn compute_pipeline_get_bind_group_layout( &self, - pipeline: &Self::ComputePipelineId, - _pipeline_data: &Self::ComputePipelineData, + _pipeline: &Self::ComputePipelineId, + pipeline_data: &Self::ComputePipelineData, index: u32, ) -> (Self::BindGroupLayoutId, Self::BindGroupLayoutData) { - ( - create_identified(pipeline.0.get_bind_group_layout(index)), - (), - ) + create_identified(pipeline_data.0.get_bind_group_layout(index)) } fn render_pipeline_get_bind_group_layout( &self, - pipeline: &Self::RenderPipelineId, - _pipeline_data: &Self::ComputePipelineData, + _pipeline: &Self::RenderPipelineId, + pipeline_data: &Self::RenderPipelineData, index: u32, ) -> (Self::BindGroupLayoutId, Self::BindGroupLayoutData) { - ( - create_identified(pipeline.0.get_bind_group_layout(index)), - (), - ) + create_identified(pipeline_data.0.get_bind_group_layout(index)) } fn command_encoder_copy_buffer_to_buffer( &self, - encoder: &Self::CommandEncoderId, - _encoder_data: &Self::CommandEncoderData, - source: &Self::BufferId, - _source_data: &Self::BufferData, + _encoder: &Self::CommandEncoderId, + encoder_data: &Self::CommandEncoderData, + _source: &Self::BufferId, + source_data: &Self::BufferData, source_offset: wgt::BufferAddress, - destination: &Self::BufferId, - _destination_data: &Self::BufferData, + _destination: &Self::BufferId, + destination_data: &Self::BufferData, destination_offset: wgt::BufferAddress, copy_size: wgt::BufferAddress, ) { - encoder.0.copy_buffer_to_buffer_with_f64_and_f64_and_f64( - &source.0, - source_offset as f64, - &destination.0, - destination_offset as f64, - copy_size as f64, - ) + encoder_data + .0 + .copy_buffer_to_buffer_with_f64_and_f64_and_f64( + &source_data.0, + source_offset as f64, + &destination_data.0, + destination_offset as f64, + copy_size as f64, + ) } fn command_encoder_copy_buffer_to_texture( &self, - encoder: &Self::CommandEncoderId, - _encoder_data: &Self::CommandEncoderData, + _encoder: &Self::CommandEncoderId, + encoder_data: &Self::CommandEncoderData, source: crate::ImageCopyBuffer, destination: crate::ImageCopyTexture, copy_size: wgt::Extent3d, ) { - encoder.0.copy_buffer_to_texture_with_gpu_extent_3d_dict( - &map_buffer_copy_view(source), - &map_texture_copy_view(destination), - &map_extent_3d(copy_size), - ) + encoder_data + .0 + .copy_buffer_to_texture_with_gpu_extent_3d_dict( + &map_buffer_copy_view(source), + &map_texture_copy_view(destination), + &map_extent_3d(copy_size), + ) } fn command_encoder_copy_texture_to_buffer( &self, - encoder: &Self::CommandEncoderId, - _encoder_data: &Self::CommandEncoderData, + _encoder: &Self::CommandEncoderId, + encoder_data: &Self::CommandEncoderData, source: crate::ImageCopyTexture, destination: crate::ImageCopyBuffer, copy_size: wgt::Extent3d, ) { - encoder.0.copy_texture_to_buffer_with_gpu_extent_3d_dict( - &map_texture_copy_view(source), - &map_buffer_copy_view(destination), - &map_extent_3d(copy_size), - ) + encoder_data + .0 + .copy_texture_to_buffer_with_gpu_extent_3d_dict( + &map_texture_copy_view(source), + &map_buffer_copy_view(destination), + &map_extent_3d(copy_size), + ) } fn command_encoder_copy_texture_to_texture( &self, - encoder: &Self::CommandEncoderId, - _encoder_data: &Self::CommandEncoderData, + _encoder: &Self::CommandEncoderId, + encoder_data: &Self::CommandEncoderData, source: crate::ImageCopyTexture, destination: crate::ImageCopyTexture, copy_size: wgt::Extent3d, ) { - encoder.0.copy_texture_to_texture_with_gpu_extent_3d_dict( - &map_texture_copy_view(source), - &map_texture_copy_view(destination), - &map_extent_3d(copy_size), - ) + encoder_data + .0 + .copy_texture_to_texture_with_gpu_extent_3d_dict( + &map_texture_copy_view(source), + &map_texture_copy_view(destination), + &map_extent_3d(copy_size), + ) } fn command_encoder_begin_compute_pass( &self, - encoder: &Self::CommandEncoderId, - _encoder_data: &Self::CommandEncoderData, + _encoder: &Self::CommandEncoderId, + encoder_data: &Self::CommandEncoderData, desc: &crate::ComputePassDescriptor, ) -> (Self::ComputePassId, Self::ComputePassData) { let mut mapped_desc = web_sys::GpuComputePassDescriptor::new(); if let Some(label) = desc.label { mapped_desc.label(label); } - ( - create_identified(encoder.0.begin_compute_pass_with_descriptor(&mapped_desc)), - (), + create_identified( + encoder_data + .0 + .begin_compute_pass_with_descriptor(&mapped_desc), ) } @@ -2025,16 +2040,16 @@ impl crate::context::Context for Context { &self, _encoder: &Self::CommandEncoderId, _encoder_data: &Self::CommandEncoderData, - pass: &mut Self::ComputePassId, - _pass_data: &mut Self::ComputePassData, + _pass: &mut Self::ComputePassId, + pass_data: &mut Self::ComputePassData, ) { - pass.0.end(); + pass_data.0.end(); } fn command_encoder_begin_render_pass( &self, - encoder: &Self::CommandEncoderId, - _encoder_data: &Self::CommandEncoderData, + _encoder: &Self::CommandEncoderId, + encoder_data: &Self::CommandEncoderData, desc: &crate::RenderPassDescriptor<'_, '_>, ) -> (Self::RenderPassId, Self::RenderPassData) { let mapped_color_attachments = desc @@ -2051,18 +2066,21 @@ impl crate::context::Context for Context { crate::LoadOp::Load => web_sys::GpuLoadOp::Load, }; + let view: &::TextureViewData = + downcast_ref(ca.view.data.as_ref()); + let mut mapped_color_attachment = web_sys::GpuRenderPassColorAttachment::new( load_value, map_store_op(ca.ops.store), - &<::TextureViewId>::from(ca.view.id).0, + &view.0, ); if let Some(cv) = clear_value { mapped_color_attachment.clear_value(&cv); } if let Some(rt) = ca.resolve_target { - let texture_view = - &<::TextureViewId>::from(rt.id).0; - mapped_color_attachment.resolve_target(texture_view); + let resolve_target_view: &::TextureViewData = + downcast_ref(rt.data.as_ref()); + mapped_color_attachment.resolve_target(&resolve_target_view.0); } mapped_color_attachment.store_op(map_store_op(ca.ops.store)); @@ -2107,10 +2125,10 @@ impl crate::context::Context for Context { } None => (web_sys::GpuLoadOp::Load, web_sys::GpuStoreOp::Store), }; + let dsa_view: &::TextureViewData = + downcast_ref(dsa.view.data.as_ref()); let mut mapped_depth_stencil_attachment = - web_sys::GpuRenderPassDepthStencilAttachment::new( - &<::TextureViewId>::from(dsa.view.id).0, - ); + web_sys::GpuRenderPassDepthStencilAttachment::new(&dsa_view.0); mapped_depth_stencil_attachment.depth_clear_value(depth_clear_value); mapped_depth_stencil_attachment.depth_load_op(depth_load_op); mapped_depth_stencil_attachment.depth_store_op(depth_store_op); @@ -2120,38 +2138,32 @@ impl crate::context::Context for Context { mapped_desc.depth_stencil_attachment(&mapped_depth_stencil_attachment); } - ( - create_identified(encoder.0.begin_render_pass(&mapped_desc)), - (), - ) + create_identified(encoder_data.0.begin_render_pass(&mapped_desc)) } fn command_encoder_end_render_pass( &self, _encoder: &Self::CommandEncoderId, _encoder_data: &Self::CommandEncoderData, - pass: &mut Self::RenderPassId, - _pass_data: &mut Self::RenderPassData, + _pass: &mut Self::RenderPassId, + pass_data: &mut Self::RenderPassData, ) { - pass.0.end(); + pass_data.0.end(); } fn command_encoder_finish( &self, - encoder: Self::CommandEncoderId, - _encoder_data: &mut Self::CommandEncoderData, + _encoder: Self::CommandEncoderId, + encoder_data: &mut Self::CommandEncoderData, ) -> (Self::CommandBufferId, Self::CommandBufferData) { - let label = encoder.0.label(); - ( - create_identified(if label.is_empty() { - encoder.0.finish() - } else { - let mut mapped_desc = web_sys::GpuCommandBufferDescriptor::new(); - mapped_desc.label(&label); - encoder.0.finish_with_descriptor(&mapped_desc) - }), - (), - ) + let label = encoder_data.0.label(); + create_identified(if label.is_empty() { + encoder_data.0.finish() + } else { + let mut mapped_desc = web_sys::GpuCommandBufferDescriptor::new(); + mapped_desc.label(&label); + encoder_data.0.finish_with_descriptor(&mapped_desc) + }) } fn command_encoder_clear_texture( @@ -2166,20 +2178,22 @@ impl crate::context::Context for Context { fn command_encoder_clear_buffer( &self, - encoder: &Self::CommandEncoderId, - _encoder_data: &Self::CommandEncoderData, + _encoder: &Self::CommandEncoderId, + encoder_data: &Self::CommandEncoderData, buffer: &crate::Buffer, offset: wgt::BufferAddress, size: Option, ) { - let buffer_id = &<::BufferId>::from(buffer.id).0; + let buffer: &::BufferData = downcast_ref(buffer.data.as_ref()); match size { - Some(size) => { - encoder - .0 - .clear_buffer_with_f64_and_f64(buffer_id, offset as f64, size.get() as f64) - } - None => encoder.0.clear_buffer_with_f64(buffer_id, offset as f64), + Some(size) => encoder_data.0.clear_buffer_with_f64_and_f64( + &buffer.0, + offset as f64, + size.get() as f64, + ), + None => encoder_data + .0 + .clear_buffer_with_f64(&buffer.0, offset as f64), } } @@ -2214,77 +2228,76 @@ impl crate::context::Context for Context { fn command_encoder_write_timestamp( &self, - encoder: &Self::CommandEncoderId, - _encoder_data: &Self::CommandEncoderData, - query_set: &Self::QuerySetId, - _query_set_data: &Self::QuerySetData, + _encoder: &Self::CommandEncoderId, + encoder_data: &Self::CommandEncoderData, + _query_set: &Self::QuerySetId, + query_set_data: &Self::QuerySetData, query_index: u32, ) { - encoder.0.write_timestamp(&query_set.0, query_index); + encoder_data + .0 + .write_timestamp(&query_set_data.0, query_index); } fn command_encoder_resolve_query_set( &self, - encoder: &Self::CommandEncoderId, - _encoder_data: &Self::CommandEncoderData, - query_set: &Self::QuerySetId, - _query_set_data: &Self::QuerySetData, + _encoder: &Self::CommandEncoderId, + encoder_data: &Self::CommandEncoderData, + _query_set: &Self::QuerySetId, + query_set_data: &Self::QuerySetData, first_query: u32, query_count: u32, - destination: &Self::BufferId, - _destination_data: &Self::BufferData, + _destination: &Self::BufferId, + destination_data: &Self::BufferData, destination_offset: wgt::BufferAddress, ) { - encoder.0.resolve_query_set_with_u32( - &query_set.0, + encoder_data.0.resolve_query_set_with_u32( + &query_set_data.0, first_query, query_count, - &destination.0, + &destination_data.0, destination_offset as u32, ); } fn render_bundle_encoder_finish( &self, - encoder: Self::RenderBundleEncoderId, - _encoder_data: Self::CommandEncoderData, + _encoder: Self::RenderBundleEncoderId, + encoder_data: Self::RenderBundleEncoderData, desc: &crate::RenderBundleDescriptor, ) -> (Self::RenderBundleId, Self::RenderBundleData) { - ( - create_identified(match desc.label { - Some(label) => { - let mut mapped_desc = web_sys::GpuRenderBundleDescriptor::new(); - mapped_desc.label(label); - encoder.0.finish_with_descriptor(&mapped_desc) - } - None => encoder.0.finish(), - }), - (), - ) + create_identified(match desc.label { + Some(label) => { + let mut mapped_desc = web_sys::GpuRenderBundleDescriptor::new(); + mapped_desc.label(label); + encoder_data.0.finish_with_descriptor(&mapped_desc) + } + None => encoder_data.0.finish(), + }) } fn queue_write_buffer( &self, - queue: &Self::QueueId, - _queue_data: &Self::QueueData, - buffer: &Self::BufferId, - _buffer_data: &Self::BufferData, + _queue: &Self::QueueId, + queue_data: &Self::QueueData, + _buffer: &Self::BufferId, + buffer_data: &Self::BufferData, offset: wgt::BufferAddress, data: &[u8], ) { /* Skip the copy once gecko allows BufferSource instead of ArrayBuffer - queue.0.write_buffer_with_f64_and_u8_array_and_f64_and_f64( - &buffer.0, + queue_data.0.write_buffer_with_f64_and_u8_array_and_f64_and_f64( + &buffer_data.0, offset as f64, data, 0f64, data.len() as f64, ); */ - queue + queue_data .0 .write_buffer_with_f64_and_buffer_source_and_f64_and_f64( - &buffer.0, + &buffer_data.0, offset as f64, &js_sys::Uint8Array::from(data).buffer(), 0f64, @@ -2296,12 +2309,12 @@ impl crate::context::Context for Context { &self, _queue: &Self::QueueId, _queue_data: &Self::QueueData, - buffer: &Self::BufferId, - _buffer_data: &Self::BufferData, + _buffer: &Self::BufferId, + buffer_data: &Self::BufferData, offset: wgt::BufferAddress, size: wgt::BufferSize, ) -> Option<()> { - let usage = wgt::BufferUsages::from_bits_truncate(buffer.0.usage()); + let usage = wgt::BufferUsages::from_bits_truncate(buffer_data.0.usage()); // TODO: actually send this down the error scope if !usage.contains(wgt::BufferUsages::COPY_DST) { log::error!("Destination buffer is missing the `COPY_DST` usage flag"); @@ -2322,8 +2335,8 @@ impl crate::context::Context for Context { ); return None; } - if write_size + offset > buffer.0.size() as u64 { - log::error!("copy of {}..{} would end up overrunning the bounds of the destination buffer of size {}", offset, offset + write_size, buffer.0.size()); + if write_size + offset > buffer_data.0.size() as u64 { + log::error!("copy of {}..{} would end up overrunning the bounds of the destination buffer of size {}", offset, offset + write_size, buffer_data.0.size()); return None; } Some(()) @@ -2366,8 +2379,8 @@ impl crate::context::Context for Context { fn queue_write_texture( &self, - queue: &Self::QueueId, - _queue_data: &Self::QueueData, + _queue: &Self::QueueId, + queue_data: &Self::QueueData, texture: crate::ImageCopyTexture, data: &[u8], data_layout: wgt::ImageDataLayout, @@ -2383,14 +2396,14 @@ impl crate::context::Context for Context { mapped_data_layout.offset(data_layout.offset as f64); /* Skip the copy once gecko allows BufferSource instead of ArrayBuffer - queue.0.write_texture_with_u8_array_and_gpu_extent_3d_dict( + queue_data.0.write_texture_with_u8_array_and_gpu_extent_3d_dict( &map_texture_copy_view(texture), data, &mapped_data_layout, &map_extent_3d(size), ); */ - queue + queue_data .0 .write_texture_with_buffer_source_and_gpu_extent_3d_dict( &map_texture_copy_view(texture), @@ -2402,13 +2415,13 @@ impl crate::context::Context for Context { fn queue_copy_external_image_to_texture( &self, - queue: &Self::QueueId, - _queue_data: &Self::QueueData, + _queue: &Self::QueueId, + queue_data: &Self::QueueData, source: &wgt::ImageCopyExternalImage, dest: crate::ImageCopyTextureTagged, size: wgt::Extent3d, ) { - queue + queue_data .0 .copy_external_image_to_texture_with_gpu_extent_3d_dict( &map_external_texture_copy_view(source), @@ -2417,15 +2430,17 @@ impl crate::context::Context for Context { ); } - fn queue_submit>( + fn queue_submit>( &self, - queue: &Self::QueueId, - _queue_data: &Self::QueueData, + _queue: &Self::QueueId, + queue_data: &Self::QueueData, command_buffers: I, ) -> (Self::SubmissionIndex, Self::SubmissionIndexData) { - let temp_command_buffers = command_buffers.map(|i| i.0).collect::(); + let temp_command_buffers = command_buffers + .map(|(_, data)| data.0) + .collect::(); - queue.0.submit(&temp_command_buffers); + queue_data.0.submit(&temp_command_buffers); (Unused, ()) } @@ -2452,27 +2467,28 @@ impl crate::context::Context for Context { fn compute_pass_set_pipeline( &self, - pass: &mut Self::ComputePassId, - _pass_data: &mut Self::ComputePassData, - pipeline: &Self::ComputePipelineId, - _pipeline_data: &Self::ComputePipelineData, + _pass: &mut Self::ComputePassId, + pass_data: &mut Self::ComputePassData, + _pipeline: &Self::ComputePipelineId, + pipeline_data: &Self::ComputePipelineData, ) { - pass.0.set_pipeline(&pipeline.0) + pass_data.0.set_pipeline(&pipeline_data.0) } fn compute_pass_set_bind_group( &self, - pass: &mut Self::ComputePassId, - _pass_data: &mut Self::ComputePassData, + _pass: &mut Self::ComputePassId, + pass_data: &mut Self::ComputePassData, index: u32, - bind_group: &Self::BindGroupId, - _bind_group_data: &Self::BindGroupData, + _bind_group: &Self::BindGroupId, + bind_group_data: &Self::BindGroupData, offsets: &[wgt::DynamicOffset], ) { - pass.0 + pass_data + .0 .set_bind_group_with_u32_array_and_f64_and_dynamic_offsets_data_length( index, - &bind_group.0, + &bind_group_data.0, offsets, 0f64, offsets.len() as u32, @@ -2550,52 +2566,54 @@ impl crate::context::Context for Context { fn compute_pass_dispatch_workgroups( &self, - pass: &mut Self::ComputePassId, - _pass_data: &mut Self::ComputePassData, + _pass: &mut Self::ComputePassId, + pass_data: &mut Self::ComputePassData, x: u32, y: u32, z: u32, ) { - pass.0 + pass_data + .0 .dispatch_workgroups_with_workgroup_count_y_and_workgroup_count_z(x, y, z); } fn compute_pass_dispatch_workgroups_indirect( &self, - pass: &mut Self::ComputePassId, - _pass_data: &mut Self::ComputePassData, - indirect_buffer: &Self::BufferId, - _indirect_buffer_data: &Self::BufferData, + _pass: &mut Self::ComputePassId, + pass_data: &mut Self::ComputePassData, + _indirect_buffer: &Self::BufferId, + indirect_buffer_data: &Self::BufferData, indirect_offset: wgt::BufferAddress, ) { - pass.0 - .dispatch_workgroups_indirect_with_f64(&indirect_buffer.0, indirect_offset as f64); + pass_data + .0 + .dispatch_workgroups_indirect_with_f64(&indirect_buffer_data.0, indirect_offset as f64); } fn render_bundle_encoder_set_pipeline( &self, - encoder: &mut Self::RenderBundleEncoderId, - _encoder_data: &mut Self::RenderBundleEncoderData, - pipeline: &Self::RenderPipelineId, - _pipeline_data: &Self::RenderPipelineData, + _encoder: &mut Self::RenderBundleEncoderId, + encoder_data: &mut Self::RenderBundleEncoderData, + _pipeline: &Self::RenderPipelineId, + pipeline_data: &Self::RenderPipelineData, ) { - encoder.0.set_pipeline(&pipeline.0); + encoder_data.0.set_pipeline(&pipeline_data.0); } fn render_bundle_encoder_set_bind_group( &self, - encoder: &mut Self::RenderBundleEncoderId, - _encoder_data: &mut Self::RenderBundleEncoderData, + _encoder: &mut Self::RenderBundleEncoderId, + encoder_data: &mut Self::RenderBundleEncoderData, index: u32, - bind_group: &Self::BindGroupId, - _bind_group_data: &Self::BindGroupData, + _bind_group: &Self::BindGroupId, + bind_group_data: &Self::BindGroupData, offsets: &[wgt::DynamicOffset], ) { - encoder + encoder_data .0 .set_bind_group_with_u32_array_and_f64_and_dynamic_offsets_data_length( index, - &bind_group.0, + &bind_group_data.0, offsets, 0f64, offsets.len() as u32, @@ -2604,26 +2622,26 @@ impl crate::context::Context for Context { fn render_bundle_encoder_set_index_buffer( &self, - encoder: &mut Self::RenderBundleEncoderId, - _encoder_data: &mut Self::RenderBundleEncoderData, - buffer: &Self::BufferId, - _buffer_data: &Self::BufferData, + _encoder: &mut Self::RenderBundleEncoderId, + encoder_data: &mut Self::RenderBundleEncoderData, + _buffer: &Self::BufferId, + buffer_data: &Self::BufferData, index_format: wgt::IndexFormat, offset: wgt::BufferAddress, size: Option, ) { match size { Some(s) => { - encoder.0.set_index_buffer_with_f64_and_f64( - &buffer.0, + encoder_data.0.set_index_buffer_with_f64_and_f64( + &buffer_data.0, map_index_format(index_format), offset as f64, s.get() as f64, ); } None => { - encoder.0.set_index_buffer_with_f64( - &buffer.0, + encoder_data.0.set_index_buffer_with_f64( + &buffer_data.0, map_index_format(index_format), offset as f64, ); @@ -2633,27 +2651,27 @@ impl crate::context::Context for Context { fn render_bundle_encoder_set_vertex_buffer( &self, - encoder: &mut Self::RenderBundleEncoderId, - _encoder_data: &mut Self::RenderBundleEncoderData, + _encoder: &mut Self::RenderBundleEncoderId, + encoder_data: &mut Self::RenderBundleEncoderData, slot: u32, - buffer: &Self::BufferId, - _buffer_data: &Self::BufferData, + _buffer: &Self::BufferId, + buffer_data: &Self::BufferData, offset: wgt::BufferAddress, size: Option, ) { match size { Some(s) => { - encoder.0.set_vertex_buffer_with_f64_and_f64( + encoder_data.0.set_vertex_buffer_with_f64_and_f64( slot, - &buffer.0, + &buffer_data.0, offset as f64, s.get() as f64, ); } None => { - encoder + encoder_data .0 - .set_vertex_buffer_with_f64(slot, &buffer.0, offset as f64); + .set_vertex_buffer_with_f64(slot, &buffer_data.0, offset as f64); } }; } @@ -2671,12 +2689,12 @@ impl crate::context::Context for Context { fn render_bundle_encoder_draw( &self, - encoder: &mut Self::RenderBundleEncoderId, - _encoder_data: &mut Self::RenderBundleEncoderData, + _encoder: &mut Self::RenderBundleEncoderId, + encoder_data: &mut Self::RenderBundleEncoderData, vertices: Range, instances: Range, ) { - encoder + encoder_data .0 .draw_with_instance_count_and_first_vertex_and_first_instance( vertices.end - vertices.start, @@ -2688,13 +2706,13 @@ impl crate::context::Context for Context { fn render_bundle_encoder_draw_indexed( &self, - encoder: &mut Self::RenderBundleEncoderId, - _encoder_data: &mut Self::RenderBundleEncoderData, + _encoder: &mut Self::RenderBundleEncoderId, + encoder_data: &mut Self::RenderBundleEncoderData, indices: Range, base_vertex: i32, instances: Range, ) { - encoder + encoder_data .0 .draw_indexed_with_instance_count_and_first_index_and_base_vertex_and_first_instance( indices.end - indices.start, @@ -2707,28 +2725,28 @@ impl crate::context::Context for Context { fn render_bundle_encoder_draw_indirect( &self, - encoder: &mut Self::RenderBundleEncoderId, - _encoder_data: &mut Self::RenderBundleEncoderData, - indirect_buffer: &Self::BufferId, - _indirect_buffer_data: &Self::BufferData, + _encoder: &mut Self::RenderBundleEncoderId, + encoder_data: &mut Self::RenderBundleEncoderData, + _indirect_buffer: &Self::BufferId, + indirect_buffer_data: &Self::BufferData, indirect_offset: wgt::BufferAddress, ) { - encoder + encoder_data .0 - .draw_indirect_with_f64(&indirect_buffer.0, indirect_offset as f64); + .draw_indirect_with_f64(&indirect_buffer_data.0, indirect_offset as f64); } fn render_bundle_encoder_draw_indexed_indirect( &self, - encoder: &mut Self::RenderBundleEncoderId, - _encoder_data: &mut Self::RenderBundleEncoderData, - indirect_buffer: &Self::BufferId, - _indirect_buffer_data: &Self::BufferData, + _encoder: &mut Self::RenderBundleEncoderId, + encoder_data: &mut Self::RenderBundleEncoderData, + _indirect_buffer: &Self::BufferId, + indirect_buffer_data: &Self::BufferData, indirect_offset: wgt::BufferAddress, ) { - encoder + encoder_data .0 - .draw_indexed_indirect_with_f64(&indirect_buffer.0, indirect_offset as f64); + .draw_indexed_indirect_with_f64(&indirect_buffer_data.0, indirect_offset as f64); } fn render_bundle_encoder_multi_draw_indirect( @@ -2789,27 +2807,28 @@ impl crate::context::Context for Context { fn render_pass_set_pipeline( &self, - pass: &mut Self::RenderPassId, - _pass_data: &mut Self::RenderPassData, - pipeline: &Self::RenderPipelineId, - _pipeline_data: &Self::RenderPipelineData, + _pass: &mut Self::RenderPassId, + pass_data: &mut Self::RenderPassData, + _pipeline: &Self::RenderPipelineId, + pipeline_data: &Self::RenderPipelineData, ) { - pass.0.set_pipeline(&pipeline.0); + pass_data.0.set_pipeline(&pipeline_data.0); } fn render_pass_set_bind_group( &self, - pass: &mut Self::RenderPassId, - _pass_data: &mut Self::RenderPassData, + _pass: &mut Self::RenderPassId, + pass_data: &mut Self::RenderPassData, index: u32, - bind_group: &Self::BindGroupId, - _bind_group_data: &Self::BindGroupData, + _bind_group: &Self::BindGroupId, + bind_group_data: &Self::BindGroupData, offsets: &[wgt::DynamicOffset], ) { - pass.0 + pass_data + .0 .set_bind_group_with_u32_array_and_f64_and_dynamic_offsets_data_length( index, - &bind_group.0, + &bind_group_data.0, offsets, 0f64, offsets.len() as u32, @@ -2818,26 +2837,26 @@ impl crate::context::Context for Context { fn render_pass_set_index_buffer( &self, - pass: &mut Self::RenderPassId, - _pass_data: &mut Self::RenderPassData, - buffer: &Self::BufferId, - _buffer_data: &Self::BufferData, + _pass: &mut Self::RenderPassId, + pass_data: &mut Self::RenderPassData, + _buffer: &Self::BufferId, + buffer_data: &Self::BufferData, index_format: wgt::IndexFormat, offset: wgt::BufferAddress, size: Option, ) { match size { Some(s) => { - pass.0.set_index_buffer_with_f64_and_f64( - &buffer.0, + pass_data.0.set_index_buffer_with_f64_and_f64( + &buffer_data.0, map_index_format(index_format), offset as f64, s.get() as f64, ); } None => { - pass.0.set_index_buffer_with_f64( - &buffer.0, + pass_data.0.set_index_buffer_with_f64( + &buffer_data.0, map_index_format(index_format), offset as f64, ); @@ -2847,26 +2866,27 @@ impl crate::context::Context for Context { fn render_pass_set_vertex_buffer( &self, - pass: &mut Self::RenderPassId, - _pass_data: &mut Self::RenderPassData, + _pass: &mut Self::RenderPassId, + pass_data: &mut Self::RenderPassData, slot: u32, - buffer: &Self::BufferId, - _buffer_data: &Self::BufferData, + _buffer: &Self::BufferId, + buffer_data: &Self::BufferData, offset: wgt::BufferAddress, size: Option, ) { match size { Some(s) => { - pass.0.set_vertex_buffer_with_f64_and_f64( + pass_data.0.set_vertex_buffer_with_f64_and_f64( slot, - &buffer.0, + &buffer_data.0, offset as f64, s.get() as f64, ); } None => { - pass.0 - .set_vertex_buffer_with_f64(slot, &buffer.0, offset as f64); + pass_data + .0 + .set_vertex_buffer_with_f64(slot, &buffer_data.0, offset as f64); } }; } @@ -2884,12 +2904,13 @@ impl crate::context::Context for Context { fn render_pass_draw( &self, - pass: &mut Self::RenderPassId, - _pass_data: &mut Self::RenderPassData, + _pass: &mut Self::RenderPassId, + pass_data: &mut Self::RenderPassData, vertices: Range, instances: Range, ) { - pass.0 + pass_data + .0 .draw_with_instance_count_and_first_vertex_and_first_instance( vertices.end - vertices.start, instances.end - instances.start, @@ -2900,13 +2921,14 @@ impl crate::context::Context for Context { fn render_pass_draw_indexed( &self, - pass: &mut Self::RenderPassId, - _pass_data: &mut Self::RenderPassData, + _pass: &mut Self::RenderPassId, + pass_data: &mut Self::RenderPassData, indices: Range, base_vertex: i32, instances: Range, ) { - pass.0 + pass_data + .0 .draw_indexed_with_instance_count_and_first_index_and_base_vertex_and_first_instance( indices.end - indices.start, instances.end - instances.start, @@ -2918,26 +2940,28 @@ impl crate::context::Context for Context { fn render_pass_draw_indirect( &self, - pass: &mut Self::RenderPassId, - _pass_data: &mut Self::RenderPassData, - indirect_buffer: &Self::BufferId, - _indirect_buffer_data: &Self::BufferData, + _pass: &mut Self::RenderPassId, + pass_data: &mut Self::RenderPassData, + _indirect_buffer: &Self::BufferId, + indirect_buffer_data: &Self::BufferData, indirect_offset: wgt::BufferAddress, ) { - pass.0 - .draw_indirect_with_f64(&indirect_buffer.0, indirect_offset as f64); + pass_data + .0 + .draw_indirect_with_f64(&indirect_buffer_data.0, indirect_offset as f64); } fn render_pass_draw_indexed_indirect( &self, - pass: &mut Self::RenderPassId, - _pass_data: &mut Self::RenderPassData, - indirect_buffer: &Self::BufferId, - _indirect_buffer_data: &Self::BufferData, + _pass: &mut Self::RenderPassId, + pass_data: &mut Self::RenderPassData, + _indirect_buffer: &Self::BufferId, + indirect_buffer_data: &Self::BufferData, indirect_offset: wgt::BufferAddress, ) { - pass.0 - .draw_indexed_indirect_with_f64(&indirect_buffer.0, indirect_offset as f64); + pass_data + .0 + .draw_indexed_indirect_with_f64(&indirect_buffer_data.0, indirect_offset as f64); } fn render_pass_multi_draw_indirect( @@ -2998,30 +3022,31 @@ impl crate::context::Context for Context { fn render_pass_set_blend_constant( &self, - pass: &mut Self::RenderPassId, - _pass_data: &mut Self::RenderPassData, + _pass: &mut Self::RenderPassId, + pass_data: &mut Self::RenderPassData, color: wgt::Color, ) { - pass.0 + pass_data + .0 .set_blend_constant_with_gpu_color_dict(&map_color(color)); } fn render_pass_set_scissor_rect( &self, - pass: &mut Self::RenderPassId, - _pass_data: &mut Self::RenderPassData, + _pass: &mut Self::RenderPassId, + pass_data: &mut Self::RenderPassData, x: u32, y: u32, width: u32, height: u32, ) { - pass.0.set_scissor_rect(x, y, width, height); + pass_data.0.set_scissor_rect(x, y, width, height); } fn render_pass_set_viewport( &self, - pass: &mut Self::RenderPassId, - _pass_data: &mut Self::RenderPassData, + _pass: &mut Self::RenderPassId, + pass_data: &mut Self::RenderPassData, x: f32, y: f32, width: f32, @@ -3029,17 +3054,18 @@ impl crate::context::Context for Context { min_depth: f32, max_depth: f32, ) { - pass.0 + pass_data + .0 .set_viewport(x, y, width, height, min_depth, max_depth); } fn render_pass_set_stencil_reference( &self, - pass: &mut Self::RenderPassId, - _pass_data: &mut Self::RenderPassData, + _pass: &mut Self::RenderPassId, + pass_data: &mut Self::RenderPassData, reference: u32, ) { - pass.0.set_stencil_reference(reference); + pass_data.0.set_stencil_reference(reference); } fn render_pass_insert_debug_marker( @@ -3103,14 +3129,16 @@ impl crate::context::Context for Context { fn render_pass_execute_bundles<'a>( &self, - pass: &mut Self::RenderPassId, - _pass_data: &mut Self::RenderPassData, - render_bundles: Box + 'a>, + _pass: &mut Self::RenderPassId, + pass_data: &mut Self::RenderPassData, + render_bundles: Box< + dyn Iterator + 'a, + >, ) { let mapped = render_bundles - .map(|bundle| bundle.0) + .map(|(_, bundle_data)| &bundle_data.0) .collect::(); - pass.0.execute_bundles(&mapped); + pass_data.0.execute_bundles(&mapped); } } diff --git a/wgpu/src/context.rs b/wgpu/src/context.rs index d45f2a6321..11a495aa7c 100644 --- a/wgpu/src/context.rs +++ b/wgpu/src/context.rs @@ -570,7 +570,7 @@ pub trait Context: Debug + Send + Sized + Sync { dest: crate::ImageCopyTextureTagged, size: wgt::Extent3d, ); - fn queue_submit>( + fn queue_submit>( &self, queue: &Self::QueueId, queue_data: &Self::QueueData, @@ -987,7 +987,9 @@ pub trait Context: Debug + Send + Sized + Sync { &self, pass: &mut Self::RenderPassId, pass_data: &mut Self::RenderPassData, - render_bundles: Box + 'a>, + render_bundles: Box< + dyn Iterator + 'a, + >, ); } @@ -1025,6 +1027,7 @@ impl ObjectId { } } + #[allow(dead_code)] pub fn id(&self) -> NonZeroU64 { self.id.unwrap() } @@ -1038,7 +1041,7 @@ impl ObjectId { static_assertions::assert_impl_all!(ObjectId: Send, Sync); -fn downcast_ref(data: &crate::Data) -> &T { +pub(crate) fn downcast_ref(data: &crate::Data) -> &T { strict_assert!(data.is::()); // Copied from std. unsafe { &*(data as *const dyn Any as *const T) } @@ -1501,7 +1504,7 @@ pub(crate) trait DynContext: Debug + Send + Sync { &self, queue: &ObjectId, queue_data: &crate::Data, - command_buffers: Box + 'a>, + command_buffers: Box)> + 'a>, ) -> (ObjectId, Arc); fn queue_get_timestamp_period(&self, queue: &ObjectId, queue_data: &crate::Data) -> f32; fn queue_on_submitted_work_done( @@ -1902,7 +1905,7 @@ pub(crate) trait DynContext: Debug + Send + Sync { &self, pass: &mut ObjectId, pass_data: &mut crate::Data, - render_bundles: Box + 'a>, + render_bundles: Box + 'a>, ); } @@ -2902,11 +2905,14 @@ where &self, queue: &ObjectId, queue_data: &crate::Data, - command_buffers: Box + 'a>, + command_buffers: Box)> + 'a>, ) -> (ObjectId, Arc) { let queue = ::from(*queue); let queue_data = downcast_ref(queue_data); - let command_buffers = command_buffers.into_iter().map(::from); + let command_buffers = command_buffers.into_iter().map(|(id, data)| { + let command_buffer_data: ::CommandBufferData = *data.downcast().unwrap(); + (::from(id), command_buffer_data) + }); let (submission_index, data) = Context::queue_submit(self, &queue, queue_data, command_buffers); (submission_index.into(), Arc::new(data) as _) @@ -3843,15 +3849,14 @@ where &self, pass: &mut ObjectId, pass_data: &mut crate::Data, - render_bundles: Box + 'a>, + render_bundles: Box + 'a>, ) { let mut pass = ::from(*pass); let pass_data = downcast_mut::(pass_data); - let render_bundles = Box::new( - render_bundles - .into_iter() - .map(|id| ::from(*id)), - ); + let render_bundles = Box::new(render_bundles.into_iter().map(|(id, data)| { + let render_bundle_data: &::RenderBundleData = downcast_ref(data); + (::from(*id), render_bundle_data) + })); Context::render_pass_execute_bundles(self, &mut pass, pass_data, render_bundles) } } diff --git a/wgpu/src/lib.rs b/wgpu/src/lib.rs index 253262dc7e..8e77391d84 100644 --- a/wgpu/src/lib.rs +++ b/wgpu/src/lib.rs @@ -567,7 +567,7 @@ impl ComputePipeline { pub struct CommandBuffer { context: Arc, id: Option, - data: Box, + data: Option>, } static_assertions::assert_impl_all!(CommandBuffer: Send, Sync); @@ -575,7 +575,7 @@ impl Drop for CommandBuffer { fn drop(&mut self) { if !thread::panicking() { if let Some(ref id) = self.id { - self.context.command_buffer_drop(id, self.data.as_ref()); + self.context.command_buffer_drop(id, &self.data.take()); } } } @@ -2740,7 +2740,7 @@ impl CommandEncoder { CommandBuffer { context: Arc::clone(&self.context), id: Some(id), - data, + data: Some(data), } } @@ -3224,7 +3224,11 @@ impl<'a> RenderPass<'a> { &*self.parent.context, &mut self.id, self.data.as_mut(), - Box::new(render_bundles.into_iter().map(|rb| &rb.id)), + Box::new( + render_bundles + .into_iter() + .map(|rb| (&rb.id, rb.data.as_ref())), + ), ) } } @@ -4034,7 +4038,7 @@ impl Queue { Box::new( command_buffers .into_iter() - .map(|mut comb| comb.id.take().unwrap()), + .map(|mut comb| (comb.id.take().unwrap(), comb.data.take().unwrap())), ), ); From c6c8259e76cebe9f27425c8bd09d19c9f0857e7a Mon Sep 17 00:00:00 2001 From: grovesNL Date: Sat, 8 Apr 2023 00:50:05 -0230 Subject: [PATCH 017/132] Clippy --- wgpu/src/backend/web.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/wgpu/src/backend/web.rs b/wgpu/src/backend/web.rs index ed2152414e..fd945db2d2 100644 --- a/wgpu/src/backend/web.rs +++ b/wgpu/src/backend/web.rs @@ -883,7 +883,7 @@ impl crate::context::Context for Context { .expect("expected to find single canvas") .into(); let canvas_element: web_sys::HtmlCanvasElement = canvas_node.into(); - Ok(self.instance_create_surface_from_canvas(&canvas_element)?) + self.instance_create_surface_from_canvas(&canvas_element) } fn instance_request_adapter( From 42b119927d718c1c7c9671949768e3b106709571 Mon Sep 17 00:00:00 2001 From: grovesNL Date: Sat, 8 Apr 2023 00:53:13 -0230 Subject: [PATCH 018/132] Changelog entry --- CHANGELOG.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 31d54440a5..b4e040964d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -170,6 +170,10 @@ By @cwfitzgerald in [#3610](https://github.com/gfx-rs/wgpu/pull/3610). - Fix metal erroring on an `array_stride` of 0. By @teoxoy in [#3538](https://github.com/gfx-rs/wgpu/pull/3538) +#### WebGPU + +- Avoid using `WasmAbi` functions for WebGPU backend. By @grovesNL in [#3657](https://github.com/gfx-rs/wgpu/pull/3657) + #### General - `copyTextureToTexture` src/dst aspects must both refer to all aspects of src/dst format. By @teoxoy in [#3431](https://github.com/gfx-rs/wgpu/pull/3431) From 96e1f502ad7aebdb9c21b24bea6f054446220efc Mon Sep 17 00:00:00 2001 From: Mauro Gentile Date: Sun, 9 Apr 2023 09:42:18 +0200 Subject: [PATCH 019/132] Remove wrongly created license in subfolders --- wgpu-core/LICENSE.APACHE | 176 -------------------------------------- wgpu-core/LICENSE.MIT | 21 ----- wgpu-hal/LICENSE.APACHE | 176 -------------------------------------- wgpu-hal/LICENSE.MIT | 21 ----- wgpu-types/LICENSE.APACHE | 176 -------------------------------------- wgpu-types/LICENSE.MIT | 21 ----- wgpu/LICENSE.APACHE | 176 -------------------------------------- wgpu/LICENSE.MIT | 21 ----- 8 files changed, 788 deletions(-) delete mode 100644 wgpu-core/LICENSE.APACHE delete mode 100644 wgpu-core/LICENSE.MIT delete mode 100644 wgpu-hal/LICENSE.APACHE delete mode 100644 wgpu-hal/LICENSE.MIT delete mode 100644 wgpu-types/LICENSE.APACHE delete mode 100644 wgpu-types/LICENSE.MIT delete mode 100644 wgpu/LICENSE.APACHE delete mode 100644 wgpu/LICENSE.MIT diff --git a/wgpu-core/LICENSE.APACHE b/wgpu-core/LICENSE.APACHE deleted file mode 100644 index d9a10c0d8e..0000000000 --- a/wgpu-core/LICENSE.APACHE +++ /dev/null @@ -1,176 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS diff --git a/wgpu-core/LICENSE.MIT b/wgpu-core/LICENSE.MIT deleted file mode 100644 index 4699691b8e..0000000000 --- a/wgpu-core/LICENSE.MIT +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2021 The gfx-rs developers - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/wgpu-hal/LICENSE.APACHE b/wgpu-hal/LICENSE.APACHE deleted file mode 100644 index d9a10c0d8e..0000000000 --- a/wgpu-hal/LICENSE.APACHE +++ /dev/null @@ -1,176 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS diff --git a/wgpu-hal/LICENSE.MIT b/wgpu-hal/LICENSE.MIT deleted file mode 100644 index 4699691b8e..0000000000 --- a/wgpu-hal/LICENSE.MIT +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2021 The gfx-rs developers - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/wgpu-types/LICENSE.APACHE b/wgpu-types/LICENSE.APACHE deleted file mode 100644 index d9a10c0d8e..0000000000 --- a/wgpu-types/LICENSE.APACHE +++ /dev/null @@ -1,176 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS diff --git a/wgpu-types/LICENSE.MIT b/wgpu-types/LICENSE.MIT deleted file mode 100644 index 4699691b8e..0000000000 --- a/wgpu-types/LICENSE.MIT +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2021 The gfx-rs developers - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/wgpu/LICENSE.APACHE b/wgpu/LICENSE.APACHE deleted file mode 100644 index d9a10c0d8e..0000000000 --- a/wgpu/LICENSE.APACHE +++ /dev/null @@ -1,176 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS diff --git a/wgpu/LICENSE.MIT b/wgpu/LICENSE.MIT deleted file mode 100644 index 4699691b8e..0000000000 --- a/wgpu/LICENSE.MIT +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2021 The gfx-rs developers - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. From df9801b0c1cfc412398a850b86918ce53fe746e6 Mon Sep 17 00:00:00 2001 From: Mauro Gentile Date: Sun, 9 Apr 2023 10:50:32 +0200 Subject: [PATCH 020/132] Updating according to trace::Action::CreateTexture --- player/tests/data/clear-buffer-texture.ron | 2 +- player/tests/data/pipeline-statistics-query.ron | 2 +- player/tests/data/quad.ron | 7 +------ player/tests/data/zero-init-texture-binding.ron | 14 ++------------ .../tests/data/zero-init-texture-copytobuffer.ron | 2 +- .../tests/data/zero-init-texture-rendertarget.ron | 7 +------ 6 files changed, 7 insertions(+), 27 deletions(-) diff --git a/player/tests/data/clear-buffer-texture.ron b/player/tests/data/clear-buffer-texture.ron index c6879e31da..dad6a86175 100644 --- a/player/tests/data/clear-buffer-texture.ron +++ b/player/tests/data/clear-buffer-texture.ron @@ -20,7 +20,7 @@ ) ], actions: [ - CreateTexture(Id(0, 1, Empty), ( + CreateTexture(Id(0, 1, Empty), None, ( label: Some("Output Texture"), size: ( width: 64, diff --git a/player/tests/data/pipeline-statistics-query.ron b/player/tests/data/pipeline-statistics-query.ron index 0975d8e126..203b992388 100644 --- a/player/tests/data/pipeline-statistics-query.ron +++ b/player/tests/data/pipeline-statistics-query.ron @@ -1,5 +1,5 @@ ( - features: 0x0000_0000_0000_0100, // PIPELINE_STATISTICS_QUERY + features: 0x0000_0000_0010_0000, // PIPELINE_STATISTICS_QUERY expectations: [ ( name: "Queried number of compute invocations is correct", diff --git a/player/tests/data/quad.ron b/player/tests/data/quad.ron index 563ba24b84..0a3962f605 100644 --- a/player/tests/data/quad.ron +++ b/player/tests/data/quad.ron @@ -17,7 +17,7 @@ ), data: "quad.wgsl", ), - CreateTexture(Id(0, 1, Empty), ( + CreateTexture(Id(0, 1, Empty), Some(Id(0, 1, Empty)), ( label: Some("Output Texture"), size: ( width: 64, @@ -30,11 +30,6 @@ usage: 27, view_formats: [], )), - CreateTextureView( - id: Id(0, 1, Empty), - parent_id: Id(0, 1, Empty), - desc: (), - ), CreateBuffer( Id(0, 1, Empty), ( diff --git a/player/tests/data/zero-init-texture-binding.ron b/player/tests/data/zero-init-texture-binding.ron index e94255cfc3..f526bcf455 100644 --- a/player/tests/data/zero-init-texture-binding.ron +++ b/player/tests/data/zero-init-texture-binding.ron @@ -17,7 +17,7 @@ // MISSING: Partial views ], actions: [ - CreateTexture(Id(0, 1, Empty), ( + CreateTexture(Id(0, 1, Empty), Some(Id(0, 1, Empty)), ( label: Some("Sampled Texture"), size: ( width: 64, @@ -30,11 +30,6 @@ usage: 5, // SAMPLED + COPY_SRC view_formats: [], )), - CreateTextureView( - id: Id(0, 1, Empty), - parent_id: Id(0, 1, Empty), - desc: (), - ), CreateBuffer( Id(0, 1, Empty), ( @@ -44,7 +39,7 @@ mapped_at_creation: false, ), ), - CreateTexture(Id(1, 1, Empty), ( + CreateTexture(Id(1, 1, Empty), Some(Id(1, 1, Empty)), ( label: Some("Storage Texture"), size: ( width: 64, @@ -57,11 +52,6 @@ usage: 9, // STORAGE + COPY_SRC view_formats: [], )), - CreateTextureView( - id: Id(1, 1, Empty), - parent_id: Id(1, 1, Empty), - desc: (), - ), CreateBuffer( Id(1, 1, Empty), ( diff --git a/player/tests/data/zero-init-texture-copytobuffer.ron b/player/tests/data/zero-init-texture-copytobuffer.ron index 0bb16ccebb..5ff1cebf90 100644 --- a/player/tests/data/zero-init-texture-copytobuffer.ron +++ b/player/tests/data/zero-init-texture-copytobuffer.ron @@ -10,7 +10,7 @@ // MISSING: Partial copies ], actions: [ - CreateTexture(Id(0, 1, Empty), ( + CreateTexture(Id(0, 1, Empty), Some(Id(0, 1, Empty)), ( label: Some("Copy To Buffer Texture"), size: ( width: 64, diff --git a/player/tests/data/zero-init-texture-rendertarget.ron b/player/tests/data/zero-init-texture-rendertarget.ron index 831af942a2..5f90c72a6b 100644 --- a/player/tests/data/zero-init-texture-rendertarget.ron +++ b/player/tests/data/zero-init-texture-rendertarget.ron @@ -10,7 +10,7 @@ // MISSING: Partial view. ], actions: [ - CreateTexture(Id(0, 1, Empty), ( + CreateTexture(Id(0, 1, Empty), Some(Id(0, 1, Empty)), ( label: Some("Render Target Texture"), size: ( width: 64, @@ -23,11 +23,6 @@ usage: 17, // RENDER_ATTACHMENT + COPY_SRC view_formats: [], )), - CreateTextureView( - id: Id(0, 1, Empty), - parent_id: Id(0, 1, Empty), - desc: (), - ), CreateBuffer( Id(0, 1, Empty), ( From b9d3c597339246d400d137ed36fc5c8e4f7905de Mon Sep 17 00:00:00 2001 From: Mauro Gentile Date: Sun, 9 Apr 2023 20:08:05 +0200 Subject: [PATCH 021/132] Moving set_id from registry to storage --- wgpu-core/src/registry.rs | 1 - wgpu-core/src/storage.rs | 17 ++++++++++++++--- 2 files changed, 14 insertions(+), 4 deletions(-) diff --git a/wgpu-core/src/registry.rs b/wgpu-core/src/registry.rs index 2da9e35189..a2574f12c4 100644 --- a/wgpu-core/src/registry.rs +++ b/wgpu-core/src/registry.rs @@ -51,7 +51,6 @@ impl> FutureId<'_, I, T> { } pub fn assign(self, value: T) -> (id::Valid, Arc) { - value.info().set_id(self.id); self.data.write().insert(self.id, value); ( id::Valid(self.id), diff --git a/wgpu-core/src/storage.rs b/wgpu-core/src/storage.rs index 70cb30b5c5..576e843da7 100644 --- a/wgpu-core/src/storage.rs +++ b/wgpu-core/src/storage.rs @@ -44,13 +44,19 @@ pub(crate) struct InvalidId; /// values, so you should use an id allocator like `IdentityManager` /// that keeps the index values dense and close to zero. #[derive(Debug)] -pub struct Storage { +pub struct Storage +where + T: Resource, +{ pub(crate) map: Vec>, kind: &'static str, _phantom: PhantomData, } -impl ops::Index> for Storage { +impl ops::Index> for Storage +where + T: Resource, +{ type Output = Arc; fn index(&self, id: id::Valid) -> &Arc { self.get(id.0).unwrap() @@ -69,7 +75,10 @@ where } } -impl Storage { +impl Storage +where + T: Resource, +{ pub(crate) fn from_kind(kind: &'static str) -> Self { Self { map: Vec::new(), @@ -175,6 +184,7 @@ impl Storage { pub(crate) fn insert(&mut self, id: I, value: T) { let (index, epoch, _) = id.unzip(); + value.info().set_id(id); self.insert_impl(index as usize, Element::Occupied(Arc::new(value), epoch)) } @@ -185,6 +195,7 @@ impl Storage { pub(crate) fn force_replace(&mut self, id: I, value: T) { let (index, epoch, _) = id.unzip(); + value.info().set_id(id); self.map[index as usize] = Element::Occupied(Arc::new(value), epoch); } From 1c2112f303b5723013a6ea8225920edae8155928 Mon Sep 17 00:00:00 2001 From: Mauro Gentile Date: Tue, 11 Apr 2023 20:35:50 +0200 Subject: [PATCH 022/132] Expose cleare fence for device polling --- wgpu-core/src/command/clear.rs | 22 ++--- wgpu-core/src/command/query.rs | 14 +-- wgpu-core/src/command/render.rs | 3 +- wgpu-core/src/command/transfer.rs | 69 ++++++++------- wgpu-core/src/device/global.rs | 133 +++++++++++++++------------- wgpu-core/src/device/life.rs | 32 +++---- wgpu-core/src/device/queue.rs | 140 +++++++++++++++++------------- wgpu-core/src/device/resource.rs | 72 +++++++-------- wgpu-core/src/track/buffer.rs | 7 +- 9 files changed, 267 insertions(+), 225 deletions(-) diff --git a/wgpu-core/src/command/clear.rs b/wgpu-core/src/command/clear.rs index f42911492e..76016e46c9 100644 --- a/wgpu-core/src/command/clear.rs +++ b/wgpu-core/src/command/clear.rs @@ -86,18 +86,20 @@ impl Global { .map_err(|_| ClearError::InvalidCommandEncoder(command_encoder_id))?; let mut cmd_buf_data = cmd_buf.data.lock(); let cmd_buf_data = cmd_buf_data.as_mut().unwrap(); - let buffer_guard = hub.buffers.read(); #[cfg(feature = "trace")] if let Some(ref mut list) = cmd_buf_data.commands { list.push(TraceCommand::ClearBuffer { dst, offset, size }); } - let (dst_buffer, dst_pending) = cmd_buf_data - .trackers - .buffers - .set_single(&*buffer_guard, dst, hal::BufferUses::COPY_DST) - .ok_or(ClearError::InvalidBuffer(dst))?; + let (dst_buffer, dst_pending) = { + let buffer_guard = hub.buffers.read(); + cmd_buf_data + .trackers + .buffers + .set_single(&*buffer_guard, dst, hal::BufferUses::COPY_DST) + .ok_or(ClearError::InvalidBuffer(dst))? + }; let dst_raw = dst_buffer .raw .as_ref() @@ -142,7 +144,7 @@ impl Global { ), ); // actual hal barrier & operation - let dst_barrier = dst_pending.map(|pending| pending.into_hal(dst_buffer)); + let dst_barrier = dst_pending.map(|pending| pending.into_hal(&dst_buffer)); let cmd_buf_raw = cmd_buf_data.encoder.open(); unsafe { cmd_buf_raw.transition_buffers(dst_barrier.into_iter()); @@ -166,8 +168,6 @@ impl Global { let mut cmd_buf_data = cmd_buf.data.lock(); let cmd_buf_data = cmd_buf_data.as_mut().unwrap(); - let texture_guard = hub.textures.read(); - #[cfg(feature = "trace")] if let Some(ref mut list) = cmd_buf_data.commands { list.push(TraceCommand::ClearTexture { @@ -180,7 +180,8 @@ impl Global { return Err(ClearError::MissingClearTextureFeature); } - let dst_texture = texture_guard + let dst_texture = hub + .textures .get(dst) .map_err(|_| ClearError::InvalidTexture(dst))?; @@ -220,6 +221,7 @@ impl Global { let device = &cmd_buf.device; let (encoder, tracker) = cmd_buf_data.open_encoder_and_tracker(); + let texture_guard = hub.textures.read(); clear_texture( &*texture_guard, Valid(dst), diff --git a/wgpu-core/src/command/query.rs b/wgpu-core/src/command/query.rs index 60ea675fc9..b2dcfb180d 100644 --- a/wgpu-core/src/command/query.rs +++ b/wgpu-core/src/command/query.rs @@ -369,12 +369,14 @@ impl Global { .add_single(&*query_set_guard, query_set_id) .ok_or(QueryError::InvalidQuerySet(query_set_id))?; - let buffer_guard = hub.buffers.read(); - let (dst_buffer, dst_pending) = tracker - .buffers - .set_single(&*buffer_guard, destination, hal::BufferUses::COPY_DST) - .ok_or(QueryError::InvalidBuffer(destination))?; - let dst_barrier = dst_pending.map(|pending| pending.into_hal(dst_buffer)); + let (dst_buffer, dst_pending) = { + let buffer_guard = hub.buffers.read(); + tracker + .buffers + .set_single(&*buffer_guard, destination, hal::BufferUses::COPY_DST) + .ok_or(QueryError::InvalidBuffer(destination))? + }; + let dst_barrier = dst_pending.map(|pending| pending.into_hal(&dst_buffer)); if !dst_buffer.usage.contains(wgt::BufferUsages::QUERY_RESOLVE) { return Err(ResolveError::MissingBufferUsage.into()); diff --git a/wgpu-core/src/command/render.rs b/wgpu-core/src/command/render.rs index 111ae31279..4f1deae37a 100644 --- a/wgpu-core/src/command/render.rs +++ b/wgpu-core/src/command/render.rs @@ -2145,12 +2145,11 @@ impl Global { (trackers, query_reset_state, pending_discard_init_fixups) }; - let cmb_guard = hub.command_buffers.read(); let query_set_guard = hub.query_sets.read(); let buffer_guard = hub.buffers.read(); let texture_guard = hub.textures.read(); - let cmd_buf = cmb_guard.get(encoder_id).unwrap(); + let cmd_buf = hub.command_buffers.get(encoder_id).unwrap(); let mut cmd_buf_data = cmd_buf.data.lock(); let cmd_buf_data = cmd_buf_data.as_mut().unwrap(); let (encoder, status, tracker, _, _) = cmd_buf_data.raw_mut(); diff --git a/wgpu-core/src/command/transfer.rs b/wgpu-core/src/command/transfer.rs index af52b34591..096e2f0829 100644 --- a/wgpu-core/src/command/transfer.rs +++ b/wgpu-core/src/command/transfer.rs @@ -575,7 +575,6 @@ impl Global { let cmd_buf = CommandBuffer::get_encoder(hub, command_encoder_id)?; let mut cmd_buf_data = cmd_buf.data.lock(); let cmd_buf_data = cmd_buf_data.as_mut().unwrap(); - let buffer_guard = hub.buffers.read(); let device = &cmd_buf.device; @@ -590,11 +589,14 @@ impl Global { }); } - let (src_buffer, src_pending) = cmd_buf_data - .trackers - .buffers - .set_single(&*buffer_guard, source, hal::BufferUses::COPY_SRC) - .ok_or(TransferError::InvalidBuffer(source))?; + let (src_buffer, src_pending) = { + let buffer_guard = hub.buffers.read(); + cmd_buf_data + .trackers + .buffers + .set_single(&*buffer_guard, source, hal::BufferUses::COPY_SRC) + .ok_or(TransferError::InvalidBuffer(source))? + }; let src_raw = src_buffer .raw .as_ref() @@ -603,13 +605,16 @@ impl Global { return Err(TransferError::MissingCopySrcUsageFlag.into()); } // expecting only a single barrier - let src_barrier = src_pending.map(|pending| pending.into_hal(src_buffer)); - - let (dst_buffer, dst_pending) = cmd_buf_data - .trackers - .buffers - .set_single(&*buffer_guard, destination, hal::BufferUses::COPY_DST) - .ok_or(TransferError::InvalidBuffer(destination))?; + let src_barrier = src_pending.map(|pending| pending.into_hal(&src_buffer)); + + let (dst_buffer, dst_pending) = { + let buffer_guard = hub.buffers.read(); + cmd_buf_data + .trackers + .buffers + .set_single(&*buffer_guard, destination, hal::BufferUses::COPY_DST) + .ok_or(TransferError::InvalidBuffer(destination))? + }; let dst_raw = dst_buffer .raw .as_ref() @@ -617,7 +622,7 @@ impl Global { if !dst_buffer.usage.contains(BufferUsages::COPY_DST) { return Err(TransferError::MissingCopyDstUsageFlag(Some(destination), None).into()); } - let dst_barrier = dst_pending.map(|pending| pending.into_hal(dst_buffer)); + let dst_barrier = dst_pending.map(|pending| pending.into_hal(&dst_buffer)); if size % wgt::COPY_BUFFER_ALIGNMENT != 0 { return Err(TransferError::UnalignedCopySize(size).into()); @@ -729,7 +734,6 @@ impl Global { } let (encoder, _, tracker, buffer_memory_init_actions, texture_memory_actions) = cmd_buf_data.raw_mut(); - let buffer_guard = hub.buffers.read(); let texture_guard = hub.textures.read(); let device = &cmd_buf.device; @@ -765,10 +769,13 @@ impl Global { &texture_guard, )?; - let (src_buffer, src_pending) = tracker - .buffers - .set_single(&*buffer_guard, source.buffer, hal::BufferUses::COPY_SRC) - .ok_or(TransferError::InvalidBuffer(source.buffer))?; + let (src_buffer, src_pending) = { + let buffer_guard = hub.buffers.read(); + tracker + .buffers + .set_single(&*buffer_guard, source.buffer, hal::BufferUses::COPY_SRC) + .ok_or(TransferError::InvalidBuffer(source.buffer))? + }; let src_raw = src_buffer .raw .as_ref() @@ -776,7 +783,7 @@ impl Global { if !src_buffer.usage.contains(BufferUsages::COPY_SRC) { return Err(TransferError::MissingCopySrcUsageFlag.into()); } - let src_barrier = src_pending.map(|pending| pending.into_hal(src_buffer)); + let src_barrier = src_pending.map(|pending| pending.into_hal(&src_buffer)); let dst_pending = tracker .textures @@ -881,7 +888,6 @@ impl Global { let (encoder, _, tracker, buffer_memory_init_actions, texture_memory_actions) = cmd_buf_data.raw_mut(); - let buffer_guard = hub.buffers.read(); let texture_guard = hub.textures.read(); let device = &cmd_buf.device; @@ -946,14 +952,17 @@ impl Global { } let src_barrier = src_pending.map(|pending| pending.into_hal(src_texture)); - let (dst_buffer, dst_pending) = tracker - .buffers - .set_single( - &*buffer_guard, - destination.buffer, - hal::BufferUses::COPY_DST, - ) - .ok_or(TransferError::InvalidBuffer(destination.buffer))?; + let (dst_buffer, dst_pending) = { + let buffer_guard = hub.buffers.read(); + tracker + .buffers + .set_single( + &*buffer_guard, + destination.buffer, + hal::BufferUses::COPY_DST, + ) + .ok_or(TransferError::InvalidBuffer(destination.buffer))? + }; let dst_raw = dst_buffer .raw .as_ref() @@ -963,7 +972,7 @@ impl Global { TransferError::MissingCopyDstUsageFlag(Some(destination.buffer), None).into(), ); } - let dst_barrier = dst_pending.map(|pending| pending.into_hal(dst_buffer)); + let dst_barrier = dst_pending.map(|pending| pending.into_hal(&dst_buffer)); if !src_base.aspect.is_one() { return Err(TransferError::CopyAspectNotOne.into()); diff --git a/wgpu-core/src/device/global.rs b/wgpu-core/src/device/global.rs index b8031c4101..a19c5731c9 100644 --- a/wgpu-core/src/device/global.rs +++ b/wgpu-core/src/device/global.rs @@ -311,7 +311,7 @@ impl Global { buffer_id: id::BufferId, ) -> Result<(), WaitIdleError> { let hub = A::hub(self); - let device_guard = hub.devices.read(); + let last_submission = { let buffer_guard = hub.buffers.write(); match buffer_guard.get(buffer_id) { @@ -320,7 +320,7 @@ impl Global { } }; - device_guard + hub.devices .get(device_id) .map_err(|_| DeviceError::Invalid)? .wait_for_submit(last_submission) @@ -481,7 +481,7 @@ impl Global { } let temp = queue::TempResource::Buffer(buffer.clone()); - let mut pending_writes = device.pending_writes.lock(); + let mut pending_writes = device.pending_writes.write(); let pending_writes = pending_writes.as_mut().unwrap(); if pending_writes.dst_buffers.contains_key(&buffer_id) { pending_writes.temp_resources.push(temp); @@ -506,13 +506,13 @@ impl Global { log::debug!("buffer {:?} is dropped", buffer_id); let hub = A::hub(self); - let mut buffer_guard = hub.buffers.write(); let (last_submit_index, buffer) = { + let mut buffer_guard = hub.buffers.write(); match buffer_guard.get(buffer_id) { Ok(buffer) => { let last_submit_index = buffer.info.submission_index(); - (last_submit_index, buffer) + (last_submit_index, buffer.clone()) } Err(_) => { hub.buffers.unregister_locked(buffer_id, &mut *buffer_guard); @@ -523,18 +523,24 @@ impl Global { let device = &buffer.device; { - let mut life_lock = device.lock_life(); if device .pending_writes - .lock() + .read() .as_ref() .unwrap() .dst_buffers .contains_key(&buffer_id) { - life_lock.future_suspected_buffers.push(buffer.clone()); + device + .lock_life() + .future_suspected_buffers + .push(buffer.clone()); } else { - life_lock.suspected_resources.buffers.push(buffer.clone()); + device + .lock_life() + .suspected_resources + .buffers + .push(buffer.clone()); } } @@ -759,7 +765,7 @@ impl Global { resource::TextureInner::Native { ref raw } => { if !raw.is_none() { let temp = queue::TempResource::Texture(texture.clone(), clear_views); - let mut pending_writes = device.pending_writes.lock(); + let mut pending_writes = device.pending_writes.write(); let pending_writes = pending_writes.as_mut().unwrap(); if pending_writes.dst_textures.contains_key(&texture_id) { pending_writes.temp_resources.push(temp); @@ -785,13 +791,13 @@ impl Global { log::debug!("texture {:?} is dropped", texture_id); let hub = A::hub(self); - let mut texture_guard = hub.textures.write(); let (last_submit_index, texture) = { + let mut texture_guard = hub.textures.write(); match texture_guard.get(texture_id) { Ok(texture) => { let last_submit_index = texture.info.submission_index(); - (last_submit_index, texture) + (last_submit_index, texture.clone()) } Err(_) => { hub.textures @@ -806,7 +812,7 @@ impl Global { let mut life_lock = device.lock_life(); if device .pending_writes - .lock() + .write() .as_ref() .unwrap() .dst_textures @@ -880,13 +886,13 @@ impl Global { log::debug!("texture view {:?} is dropped", texture_view_id); let hub = A::hub(self); - let mut texture_view_guard = hub.texture_views.write(); let (last_submit_index, view) = { + let mut texture_view_guard = hub.texture_views.write(); match texture_view_guard.get(texture_view_id) { Ok(view) => { let last_submit_index = view.info.submission_index(); - (last_submit_index, view) + (last_submit_index, view.clone()) } Err(_) => { hub.texture_views @@ -961,11 +967,11 @@ impl Global { log::debug!("sampler {:?} is dropped", sampler_id); let hub = A::hub(self); - let mut sampler_guard = hub.samplers.write(); let sampler = { + let mut sampler_guard = hub.samplers.write(); match sampler_guard.get(sampler_id) { - Ok(sampler) => sampler, + Ok(sampler) => sampler.clone(), Err(_) => { hub.samplers .unregister_locked(sampler_id, &mut *sampler_guard); @@ -1056,11 +1062,11 @@ impl Global { log::debug!("bind group layout {:?} is dropped", bind_group_layout_id); let hub = A::hub(self); - let mut bind_group_layout_guard = hub.bind_group_layouts.write(); let layout = { + let mut bind_group_layout_guard = hub.bind_group_layouts.write(); match bind_group_layout_guard.get(bind_group_layout_id) { - Ok(layout) => layout, + Ok(layout) => layout.clone(), Err(_) => { hub.bind_group_layouts .unregister_locked(bind_group_layout_id, &mut *bind_group_layout_guard); @@ -1126,10 +1132,11 @@ impl Global { log::debug!("pipeline layout {:?} is dropped", pipeline_layout_id); let hub = A::hub(self); - let mut pipeline_layout_guard = hub.pipeline_layouts.write(); + let layout = { + let mut pipeline_layout_guard = hub.pipeline_layouts.write(); match pipeline_layout_guard.get(pipeline_layout_id) { - Ok(layout) => layout, + Ok(layout) => layout.clone(), Err(_) => { hub.pipeline_layouts .unregister_locked(pipeline_layout_id, &mut *pipeline_layout_guard); @@ -1199,11 +1206,11 @@ impl Global { log::debug!("bind group {:?} is dropped", bind_group_id); let hub = A::hub(self); - let mut bind_group_guard = hub.bind_groups.write(); let bind_group = { + let mut bind_group_guard = hub.bind_groups.write(); match bind_group_guard.get(bind_group_id) { - Ok(bind_group) => bind_group, + Ok(bind_group) => bind_group.clone(), Err(_) => { hub.bind_groups .unregister_locked(bind_group_id, &mut *bind_group_guard); @@ -1473,11 +1480,11 @@ impl Global { profiling::scope!("RenderBundle::drop"); log::debug!("render bundle {:?} is dropped", render_bundle_id); let hub = A::hub(self); - let mut bundle_guard = hub.render_bundles.write(); let bundle = { + let mut bundle_guard = hub.render_bundles.write(); match bundle_guard.get(render_bundle_id) { - Ok(bundle) => bundle, + Ok(bundle) => bundle.clone(), Err(_) => { hub.render_bundles .unregister_locked(render_bundle_id, &mut *bundle_guard); @@ -1668,11 +1675,11 @@ impl Global { profiling::scope!("RenderPipeline::drop"); log::debug!("render pipeline {:?} is dropped", render_pipeline_id); let hub = A::hub(self); - let mut pipeline_guard = hub.render_pipelines.write(); let (pipeline, layout_id) = { + let mut pipeline_guard = hub.render_pipelines.write(); match pipeline_guard.get(render_pipeline_id) { - Ok(pipeline) => (pipeline, pipeline.layout_id), + Ok(pipeline) => (pipeline.clone(), pipeline.layout_id), Err(_) => { hub.render_pipelines .unregister_locked(render_pipeline_id, &mut *pipeline_guard); @@ -1788,11 +1795,11 @@ impl Global { profiling::scope!("ComputePipeline::drop"); log::debug!("compute pipeline {:?} is dropped", compute_pipeline_id); let hub = A::hub(self); - let mut pipeline_guard = hub.compute_pipelines.write(); let (pipeline, layout_id) = { + let mut pipeline_guard = hub.compute_pipelines.write(); match pipeline_guard.get(compute_pipeline_id) { - Ok(pipeline) => (pipeline, pipeline.layout_id), + Ok(pipeline) => (pipeline.clone(), pipeline.layout_id), Err(_) => { hub.compute_pipelines .unregister_locked(compute_pipeline_id, &mut *pipeline_guard); @@ -2082,10 +2089,13 @@ impl Global { } let hub = A::hub(self); - hub.devices + let device = hub + .devices .get(device_id) - .map_err(|_| DeviceError::Invalid)? - .maintain(hub, maintain)? + .map_err(|_| DeviceError::Invalid)?; + let fence = device.fence.read(); + let fence = fence.as_ref().unwrap(); + device.maintain(hub, fence, maintain)? }; closures.fire(); @@ -2118,7 +2128,9 @@ impl Global { } else { wgt::Maintain::Poll }; - let (cbs, queue_empty) = device.maintain(hub, maintain)?; + let fence = device.fence.read(); + let fence = fence.as_ref().unwrap(); + let (cbs, queue_empty) = device.maintain(hub, fence, maintain)?; all_queue_empty = all_queue_empty && queue_empty; // If the device's own `RefCount` is the only one left, and @@ -2213,7 +2225,7 @@ impl Global { // need to wait for submissions or triage them. We know we were // just polled, so `life_tracker.free_resources` is empty. debug_assert!(device.lock_life().queue_empty()); - device.pending_writes.lock().as_mut().unwrap().deactivate(); + device.pending_writes.write().as_mut().unwrap().deactivate(); let adapter = hub.adapters.get(device.adapter_id.0).unwrap(); // Adapter is only referenced by the device and itself. @@ -2261,7 +2273,6 @@ impl Global { profiling::scope!("Buffer::map_async"); let hub = A::hub(self); - let buffer_guard = hub.buffers.read(); let (pub_usage, internal_use) = match op.host { HostMap::Read => (wgt::BufferUsages::MAP_READ, hal::BufferUses::MAP_READ), @@ -2272,8 +2283,9 @@ impl Global { return Err((op, BufferAccessError::UnalignedRange)); } - let (device, buffer) = { - let buffer = buffer_guard + let buffer = { + let buffer = hub + .buffers .get(buffer_id) .map_err(|_| BufferAccessError::Invalid); @@ -2306,29 +2318,30 @@ impl Global { }, )); } - let mut map_state = buffer.map_state.lock(); - *map_state = match *map_state { - resource::BufferMapState::Init { .. } | resource::BufferMapState::Active { .. } => { - return Err((op, BufferAccessError::AlreadyMapped)); - } - resource::BufferMapState::Waiting(_) => { - return Err((op, BufferAccessError::MapAlreadyPending)); - } - resource::BufferMapState::Idle => { - resource::BufferMapState::Waiting(resource::BufferPendingMapping { - range, - op, - _parent_buffer: buffer.clone(), - }) - } - }; + { + let map_state = &mut *buffer.map_state.lock(); + *map_state = match *map_state { + resource::BufferMapState::Init { .. } + | resource::BufferMapState::Active { .. } => { + return Err((op, BufferAccessError::AlreadyMapped)); + } + resource::BufferMapState::Waiting(_) => { + return Err((op, BufferAccessError::MapAlreadyPending)); + } + resource::BufferMapState::Idle => { + resource::BufferMapState::Waiting(resource::BufferPendingMapping { + range, + op, + _parent_buffer: buffer.clone(), + }) + } + }; + } log::debug!("Buffer {:?} map state -> Waiting", buffer_id); - let device = &buffer.device; - { - let mut trackers = device.as_ref().trackers.lock(); - + let mut trackers = buffer.device.as_ref().trackers.lock(); + let buffer_guard = hub.buffers.read(); trackers .buffers .set_single(&*buffer_guard, buffer_id, internal_use); @@ -2336,10 +2349,10 @@ impl Global { trackers.buffers.drain(); } - (device, buffer) + buffer }; - device.lock_life().map(buffer); + buffer.device.lock_life().map(&buffer); Ok(()) } @@ -2470,7 +2483,7 @@ impl Global { buffer: raw_buf, usage: hal::BufferUses::empty()..hal::BufferUses::COPY_DST, }; - let mut pending_writes = device.pending_writes.lock(); + let mut pending_writes = device.pending_writes.write(); let pending_writes = pending_writes.as_mut().unwrap(); let encoder = pending_writes.activate(); unsafe { diff --git a/wgpu-core/src/device/life.rs b/wgpu-core/src/device/life.rs index 85d31b0d91..fa14c1685f 100644 --- a/wgpu-core/src/device/life.rs +++ b/wgpu-core/src/device/life.rs @@ -508,7 +508,6 @@ impl LifetimeTracker { profiling::scope!("triage_suspected"); if !self.suspected_resources.render_bundles.is_empty() { - let mut render_bundles_locked = hub.render_bundles.write(); let mut trackers = trackers.lock(); while let Some(bundle) = self.suspected_resources.render_bundles.pop() { @@ -522,7 +521,7 @@ impl LifetimeTracker { if let Some(res) = hub .render_bundles - .unregister_locked(id.0, &mut *render_bundles_locked) + .unregister(id.0) { self.suspected_resources.add_render_bundle_scope(&res.used); } @@ -531,7 +530,6 @@ impl LifetimeTracker { } if !self.suspected_resources.bind_groups.is_empty() { - let mut bind_groups_locked = hub.bind_groups.write(); let mut trackers = trackers.lock(); while let Some(resource) = self.suspected_resources.bind_groups.pop() { @@ -545,7 +543,7 @@ impl LifetimeTracker { if let Some(res) = hub .bind_groups - .unregister_locked(id.0, &mut *bind_groups_locked) + .unregister(id.0) { self.suspected_resources.add_bind_group_states(&res.used); let bind_group_layout = @@ -567,7 +565,6 @@ impl LifetimeTracker { } if !self.suspected_resources.texture_views.is_empty() { - let mut texture_views_locked = hub.texture_views.write(); let mut trackers = trackers.lock(); let mut list = mem::take(&mut self.suspected_resources.texture_views); @@ -582,7 +579,7 @@ impl LifetimeTracker { if let Some(res) = hub .texture_views - .unregister_locked(id.0, &mut *texture_views_locked) + .unregister(id.0) { if let Some(parent_texture) = res.parent.as_ref() { self.suspected_resources @@ -603,7 +600,6 @@ impl LifetimeTracker { } if !self.suspected_resources.textures.is_empty() { - let mut textures_locked = hub.textures.write(); let mut trackers = trackers.lock(); for texture in self.suspected_resources.textures.drain(..) { @@ -615,7 +611,7 @@ impl LifetimeTracker { t.add(trace::Action::DestroyTexture(id.0)); } - if let Some(res) = hub.textures.unregister_locked(id.0, &mut *textures_locked) { + if let Some(res) = hub.textures.unregister(id.0) { let submit_index = res.info.submission_index(); let non_referenced_resources = self .active @@ -638,7 +634,6 @@ impl LifetimeTracker { } if !self.suspected_resources.samplers.is_empty() { - let mut samplers_locked = hub.samplers.write(); let mut trackers = trackers.lock(); for sampler in self.suspected_resources.samplers.drain(..) { @@ -650,7 +645,7 @@ impl LifetimeTracker { t.add(trace::Action::DestroySampler(id.0)); } - if let Some(res) = hub.samplers.unregister_locked(id.0, &mut *samplers_locked) { + if let Some(res) = hub.samplers.unregister(id.0) { let submit_index = res.info.submission_index(); self.active .iter_mut() @@ -664,7 +659,6 @@ impl LifetimeTracker { } if !self.suspected_resources.buffers.is_empty() { - let mut buffers_locked = hub.buffers.write(); let mut trackers = trackers.lock(); for buffer in self.suspected_resources.buffers.drain(..) { @@ -676,7 +670,7 @@ impl LifetimeTracker { t.add(trace::Action::DestroyBuffer(id.0)); } - if let Some(res) = hub.buffers.unregister_locked(id.0, &mut *buffers_locked) { + if let Some(res) = hub.buffers.unregister(id.0) { let submit_index = res.info.submission_index(); if let resource::BufferMapState::Init { ref stage_buffer, .. @@ -696,7 +690,6 @@ impl LifetimeTracker { } if !self.suspected_resources.compute_pipelines.is_empty() { - let mut compute_pipelines_locked = hub.compute_pipelines.write(); let mut trackers = trackers.lock(); for compute_pipeline in self.suspected_resources.compute_pipelines.drain(..) { @@ -710,7 +703,7 @@ impl LifetimeTracker { if let Some(res) = hub .compute_pipelines - .unregister_locked(id.0, &mut *compute_pipelines_locked) + .unregister(id.0) { let submit_index = res.info.submission_index(); self.active @@ -725,7 +718,6 @@ impl LifetimeTracker { } if !self.suspected_resources.render_pipelines.is_empty() { - let mut render_pipelines_locked = hub.render_pipelines.write(); let mut trackers = trackers.lock(); for render_pipeline in self.suspected_resources.render_pipelines.drain(..) { @@ -739,7 +731,7 @@ impl LifetimeTracker { if let Some(res) = hub .render_pipelines - .unregister_locked(id.0, &mut *render_pipelines_locked) + .unregister(id.0) { let submit_index = res.info.submission_index(); self.active @@ -806,7 +798,6 @@ impl LifetimeTracker { } if !self.suspected_resources.query_sets.is_empty() { - let mut query_sets_locked = hub.query_sets.write(); let mut trackers = trackers.lock(); for query_set in self.suspected_resources.query_sets.drain(..) { @@ -817,7 +808,7 @@ impl LifetimeTracker { // trace.map(|t| t.add(trace::Action::DestroyComputePipeline(id.0))); if let Some(res) = hub .query_sets - .unregister_locked(id.0, &mut *query_sets_locked) + .unregister(id.0) { let submit_index = res.info.submission_index(); self.active @@ -873,18 +864,17 @@ impl LifetimeTracker { if self.ready_to_map.is_empty() { return Vec::new(); } - let mut buffers_locked = hub.buffers.write(); let mut pending_callbacks: Vec = Vec::with_capacity(self.ready_to_map.len()); let mut trackers = trackers.lock(); for buffer in self.ready_to_map.drain(..) { let buffer_id = buffer.info.id(); - if buffer.is_unique() && trackers.buffers.remove_abandoned(buffer_id) { + if trackers.buffers.remove_abandoned(buffer_id) { *buffer.map_state.lock() = resource::BufferMapState::Idle; log::debug!("Mapping request is dropped because the buffer is destroyed."); if let Some(buf) = hub .buffers - .unregister_locked(buffer_id.0, &mut *buffers_locked) + .unregister(buffer_id.0) { self.free_resources.buffers.push(buf); } diff --git a/wgpu-core/src/device/queue.rs b/wgpu-core/src/device/queue.rs index d8fb0b4a63..eb26d7e1b0 100644 --- a/wgpu-core/src/device/queue.rs +++ b/wgpu-core/src/device/queue.rs @@ -385,7 +385,7 @@ impl Global { } { device .pending_writes - .lock() + .write() .as_mut() .unwrap() .consume(&device, Arc::new(staging_buffer)); @@ -401,7 +401,7 @@ impl Global { device .pending_writes - .lock() + .write() .as_mut() .unwrap() .consume(&device, Arc::new(staging_buffer)); @@ -461,7 +461,7 @@ impl Global { if let Err(flush_error) = unsafe { staging_buffer.flush(device.raw.as_ref().unwrap()) } { device .pending_writes - .lock() + .write() .as_mut() .unwrap() .consume(&device, staging_buffer); @@ -477,7 +477,7 @@ impl Global { device .pending_writes - .lock() + .write() .as_mut() .unwrap() .consume(&device, staging_buffer); @@ -544,20 +544,21 @@ impl Global { ) -> Result<(), QueueWriteError> { let hub = A::hub(self); - let buffer_guard = hub.buffers.read(); - let mut trackers = device.trackers.lock(); - let (dst, transition) = trackers - .buffers - .set_single(&buffer_guard, buffer_id, hal::BufferUses::COPY_DST) - .ok_or(TransferError::InvalidBuffer(buffer_id))?; + let (dst, transition) = { + let buffer_guard = hub.buffers.read(); + trackers + .buffers + .set_single(&buffer_guard, buffer_id, hal::BufferUses::COPY_DST) + .ok_or(TransferError::InvalidBuffer(buffer_id))? + }; let dst_raw = dst .raw .as_ref() .ok_or(TransferError::InvalidBuffer(buffer_id))?; let src_buffer_size = staging_buffer.size; - self.queue_validate_write_buffer_impl(dst, buffer_id, buffer_offset, src_buffer_size)?; + self.queue_validate_write_buffer_impl(&dst, buffer_id, buffer_offset, src_buffer_size)?; dst.info.use_at( device @@ -576,8 +577,8 @@ impl Global { buffer: inner_buffer.as_ref().unwrap(), usage: hal::BufferUses::MAP_WRITE..hal::BufferUses::COPY_SRC, }) - .chain(transition.map(|pending| pending.into_hal(dst))); - let mut pending_writes = device.pending_writes.lock(); + .chain(transition.map(|pending| pending.into_hal(&dst))); + let mut pending_writes = device.pending_writes.write(); let pending_writes = pending_writes.as_mut().unwrap(); let encoder = pending_writes.activate(); unsafe { @@ -588,17 +589,12 @@ impl Global { region.into_iter(), ); } - - pending_writes - .dst_buffers - .insert(buffer_id, buffer_guard.get(buffer_id).unwrap().clone()); + let dst = hub.buffers.get(buffer_id).unwrap(); + pending_writes.dst_buffers.insert(buffer_id, dst.clone()); // Ensure the overwritten bytes are marked as initialized so // they don't need to be nulled prior to mapping or binding. { - drop(buffer_guard); - - let dst = hub.buffers.get(buffer_id).unwrap(); dst.initialization_status .write() .drain(buffer_offset..(buffer_offset + src_buffer_size)); @@ -640,8 +636,8 @@ impl Global { return Ok(()); } - let texture_guard = hub.textures.read(); - let dst = texture_guard + let dst = hub + .textures .get(destination.texture) .map_err(|_| TransferError::InvalidTexture(destination.texture))?; @@ -656,7 +652,7 @@ impl Global { let (hal_copy_size, array_layer_count) = validate_texture_copy_range(destination, &dst.desc, CopySide::Destination, size)?; - let (selector, dst_base) = extract_texture_selector(destination, size, dst)?; + let (selector, dst_base) = extract_texture_selector(destination, size, &dst)?; if !dst_base.aspect.is_one() { return Err(TransferError::CopyAspectNotOne.into()); @@ -714,7 +710,7 @@ impl Global { let stage_size = stage_bytes_per_row as u64 * block_rows_in_copy as u64; let mut trackers = device.trackers.lock(); - let mut pending_writes = device.pending_writes.lock(); + let mut pending_writes = device.pending_writes.write(); let pending_writes = pending_writes.as_mut().unwrap(); let encoder = pending_writes.activate(); @@ -739,6 +735,7 @@ impl Global { .drain(init_layer_range) .collect::>>() { + let texture_guard = hub.textures.read(); crate::command::clear_texture( &*texture_guard, id::Valid(destination.texture), @@ -763,11 +760,11 @@ impl Global { // `texture_guard.get` above ends in time for the `clear_texture` // call above. Since we've held `texture_guard` the whole time, we know // the texture hasn't gone away in the mean time, so we can unwrap. - let dst = texture_guard.get(destination.texture).unwrap(); + let dst = hub.textures.get(destination.texture).unwrap(); let transition = trackers .textures .set_single( - dst, + &dst, destination.texture, selector, hal::TextureUses::COPY_DST, @@ -860,7 +857,7 @@ impl Global { }; unsafe { - encoder.transition_textures(transition.map(|pending| pending.into_hal(dst))); + encoder.transition_textures(transition.map(|pending| pending.into_hal(&dst))); encoder.transition_buffers(iter::once(barrier)); encoder.copy_buffer_to_texture(inner_buffer.as_ref().unwrap(), dst_raw, regions); } @@ -991,7 +988,7 @@ impl Global { extract_texture_selector(&destination.to_untagged(), &size, &dst)?; let mut trackers = device.trackers.lock(); - let mut pending_writes = device.pending_writes.lock(); + let mut pending_writes = device.pending_writes.write(); let encoder = pending_writes.as_mut().unwrap().activate(); // If the copy does not fully cover the layers, we need to initialize to @@ -1098,14 +1095,15 @@ impl Global { .get(queue_id) .map_err(|_| DeviceError::Invalid)?; + let mut fence = device.fence.write(); + let fence = fence.as_mut().unwrap(); + let submit_index = device .active_submission_index .fetch_add(1, Ordering::Relaxed) + 1; let mut active_executions = Vec::new(); let mut used_surface_textures = track::TextureUsageScope::new(); - let mut pending_writes = device.pending_writes.lock(); - let pending_writes = pending_writes.as_mut().unwrap(); { let mut command_buffer_guard = hub.command_buffers.write(); @@ -1374,7 +1372,14 @@ impl Global { let texture_guard = hub.textures.read(); used_surface_textures.set_size(texture_guard.len()); - for (&id, texture) in pending_writes.dst_textures.iter() { + for (&id, texture) in device + .pending_writes + .read() + .as_ref() + .unwrap() + .dst_textures + .iter() + { match *texture.inner.as_ref().unwrap() { TextureInner::Native { raw: None } => { return Err(QueueSubmitError::DestroyedTexture(id)); @@ -1408,46 +1413,60 @@ impl Global { }); unsafe { - pending_writes + device + .pending_writes + .write() + .as_mut() + .unwrap() .command_encoder .transition_textures(texture_barriers); }; } } - let refs = pending_writes - .pre_submit() - .into_iter() - .chain( - active_executions - .iter() - .flat_map(|pool_execution| pool_execution.cmd_buffers.iter()), - ) - .collect::>(); - unsafe { - device - .queue - .as_ref() - .unwrap() - .submit( - &refs, - Some((device.fence.lock().as_mut().unwrap(), submit_index)), + { + let mut pending_writes = device.pending_writes.write(); + let pending_writes = pending_writes.as_mut().unwrap(); + let refs = pending_writes + .pre_submit() + .into_iter() + .chain( + active_executions + .iter() + .flat_map(|pool_execution| pool_execution.cmd_buffers.iter()), ) - .map_err(DeviceError::from)?; + .collect::>(); + unsafe { + device + .queue + .as_ref() + .unwrap() + .submit(&refs, Some((fence, submit_index))) + .map_err(DeviceError::from)?; + } } } profiling::scope!("cleanup"); - if let Some(pending_execution) = pending_writes.post_submit( - device.command_allocator.lock().as_mut().unwrap(), - device.raw.as_ref().unwrap(), - device.queue.as_ref().unwrap(), - ) { + if let Some(pending_execution) = + device.pending_writes.write().as_mut().unwrap().post_submit( + device.command_allocator.lock().as_mut().unwrap(), + device.raw.as_ref().unwrap(), + device.queue.as_ref().unwrap(), + ) + { active_executions.push(pending_execution); } // this will register the new submission to the life time tracker - let mut pending_write_resources = mem::take(&mut pending_writes.temp_resources); + let mut pending_write_resources = mem::take( + &mut device + .pending_writes + .write() + .as_mut() + .unwrap() + .temp_resources, + ); device.lock_life().track_submission( submit_index, pending_write_resources.drain(..), @@ -1456,7 +1475,7 @@ impl Global { // This will schedule destruction of all resources that are no longer needed // by the user but used in the command stream, among other things. - let (closures, _) = match device.maintain(hub, wgt::Maintain::Poll) { + let (closures, _) = match device.maintain(hub, fence, wgt::Maintain::Poll) { Ok(closures) => closures, Err(WaitIdleError::Device(err)) => return Err(QueueSubmitError::Queue(err)), Err(WaitIdleError::StuckGpu) => return Err(QueueSubmitError::StuckGpu), @@ -1465,7 +1484,12 @@ impl Global { // pending_write_resources has been drained, so it's empty, but we // want to retain its heap allocation. - pending_writes.temp_resources = pending_write_resources; + device + .pending_writes + .write() + .as_mut() + .unwrap() + .temp_resources = pending_write_resources; device.lock_life().post_submit(); (submit_index, closures) diff --git a/wgpu-core/src/device/resource.rs b/wgpu-core/src/device/resource.rs index b5fd446bfa..99f39e108a 100644 --- a/wgpu-core/src/device/resource.rs +++ b/wgpu-core/src/device/resource.rs @@ -76,7 +76,7 @@ pub struct Device { pub(crate) command_allocator: Mutex>>, pub(crate) active_submission_index: AtomicU64, //SubmissionIndex, - pub(crate) fence: Mutex>, + pub(crate) fence: RwLock>, /// All live resources allocated with this [`Device`]. /// @@ -91,7 +91,7 @@ pub struct Device { pub(crate) limits: wgt::Limits, pub(crate) features: wgt::Features, pub(crate) downlevel: wgt::DownlevelCapabilities, - pub(crate) pending_writes: Mutex>>, + pub(crate) pending_writes: RwLock>>, #[cfg(feature = "trace")] pub(crate) trace: Mutex>, } @@ -110,12 +110,12 @@ impl std::fmt::Debug for Device { impl Drop for Device { fn drop(&mut self) { let raw = self.raw.take().unwrap(); - let pending_writes = self.pending_writes.lock().take().unwrap(); + let pending_writes = self.pending_writes.write().take().unwrap(); pending_writes.dispose(&raw); self.command_allocator.lock().take().unwrap().dispose(&raw); unsafe { raw.destroy_buffer(self.zero_buffer.take().unwrap()); - raw.destroy_fence(self.fence.lock().take().unwrap()); + raw.destroy_fence(self.fence.write().take().unwrap()); raw.exit(self.queue.take().unwrap()); } } @@ -212,7 +212,7 @@ impl Device { info: ResourceInfo::new(""), command_allocator: Mutex::new(Some(com_alloc)), active_submission_index: AtomicU64::new(0), - fence: Mutex::new(Some(fence)), + fence: RwLock::new(Some(fence)), trackers: Mutex::new(Tracker::new()), life_tracker: Mutex::new(life::LifetimeTracker::new()), temp_suspected: Mutex::new(life::SuspectedResources::new()), @@ -234,7 +234,7 @@ impl Device { limits: desc.limits.clone(), features: desc.features, downlevel, - pending_writes: Mutex::new(Some(pending_writes)), + pending_writes: RwLock::new(Some(pending_writes)), }) } @@ -258,28 +258,31 @@ impl Device { pub(crate) fn maintain<'this, 'token: 'this, G: GlobalIdentityHandlerFactory>( &'this self, hub: &Hub, + fence: &A::Fence, maintain: wgt::Maintain, ) -> Result<(UserClosures, bool), WaitIdleError> { profiling::scope!("Device::maintain"); - let mut life_tracker = self.lock_life(); - - // Normally, `temp_suspected` exists only to save heap - // allocations: it's cleared at the start of the function - // call, and cleared by the end. But `Global::queue_submit` is - // fallible; if it exits early, it may leave some resources in - // `temp_suspected`. - life_tracker - .suspected_resources - .extend(&self.temp_suspected.lock()); - self.temp_suspected.lock().clear(); - - life_tracker.triage_suspected( - hub, - &self.trackers, - #[cfg(feature = "trace")] - self.trace.lock().as_mut(), - ); - life_tracker.triage_mapped(); + { + let mut life_tracker = self.lock_life(); + + // Normally, `temp_suspected` exists only to save heap + // allocations: it's cleared at the start of the function + // call, and cleared by the end. But `Global::queue_submit` is + // fallible; if it exits early, it may leave some resources in + // `temp_suspected`. + life_tracker + .suspected_resources + .extend(&self.temp_suspected.lock()); + self.temp_suspected.lock().clear(); + + life_tracker.triage_suspected( + hub, + &self.trackers, + #[cfg(feature = "trace")] + self.trace.lock().as_mut(), + ); + life_tracker.triage_mapped(); + } let last_done_index = if maintain.is_wait() { let index_to_wait_for = match maintain { @@ -294,11 +297,7 @@ impl Device { self.raw .as_ref() .unwrap() - .wait( - self.fence.lock().as_ref().unwrap(), - index_to_wait_for, - CLEANUP_WAIT_MS, - ) + .wait(fence, index_to_wait_for, CLEANUP_WAIT_MS) .map_err(DeviceError::from)? }; index_to_wait_for @@ -307,11 +306,12 @@ impl Device { self.raw .as_ref() .unwrap() - .get_fence_value(self.fence.lock().as_ref().unwrap()) + .get_fence_value(fence) .map_err(DeviceError::from)? } }; + let mut life_tracker = self.lock_life(); let submission_closures = life_tracker.triage_submissions( last_done_index, self.command_allocator.lock().as_mut().unwrap(), @@ -2990,7 +2990,7 @@ impl Device { self.raw .as_ref() .unwrap() - .get_fence_value(self.fence.lock().as_ref().unwrap()) + .get_fence_value(self.fence.read().as_ref().unwrap()) .map_err(DeviceError::from)? }; if last_done_index < submission_index { @@ -2999,7 +2999,7 @@ impl Device { self.raw .as_ref() .unwrap() - .wait(self.fence.lock().as_ref().unwrap(), submission_index, !0) + .wait(self.fence.read().as_ref().unwrap(), submission_index, !0) .map_err(DeviceError::from)? }; let closures = self.lock_life().triage_submissions( @@ -3073,12 +3073,12 @@ impl Device { /// Wait for idle and remove resources that we can, before we die. pub(crate) fn prepare_to_die(&self) { - self.pending_writes.lock().as_mut().unwrap().deactivate(); - let mut life_tracker = self.life_tracker.lock(); + self.pending_writes.write().as_mut().unwrap().deactivate(); + let mut life_tracker = self.lock_life(); let current_index = self.active_submission_index.load(Ordering::Relaxed); if let Err(error) = unsafe { self.raw.as_ref().unwrap().wait( - self.fence.lock().as_ref().unwrap(), + self.fence.read().as_ref().unwrap(), current_index, CLEANUP_WAIT_MS, ) diff --git a/wgpu-core/src/track/buffer.rs b/wgpu-core/src/track/buffer.rs index f3e25b8cd4..ff55ccb02e 100644 --- a/wgpu-core/src/track/buffer.rs +++ b/wgpu-core/src/track/buffer.rs @@ -252,6 +252,9 @@ impl BufferUsageScope { } } +pub(crate) type SetSingleResult = + Option<(Arc>, Option>)>; + /// Stores all buffer state within a command buffer or device. pub(crate) struct BufferTracker { start: Vec, @@ -360,7 +363,7 @@ impl BufferTracker { storage: &'a Storage, BufferId>, id: BufferId, state: BufferUses, - ) -> Option<(&'a Buffer, Option>)> { + ) -> SetSingleResult { let buffer = storage.get(id).ok()?; let (index32, epoch, _) = id.unzip(); @@ -389,7 +392,7 @@ impl BufferTracker { strict_assert!(self.temp.len() <= 1); - Some((buffer, self.temp.pop())) + Some((buffer.clone(), self.temp.pop())) } /// Sets the given state for all buffers in the given tracker. From ffa080292630d56c98468eeb7704d724d78f9702 Mon Sep 17 00:00:00 2001 From: Mauro Gentile Date: Wed, 12 Apr 2023 21:38:49 +0200 Subject: [PATCH 023/132] Fix format --- wgpu-core/src/device/life.rs | 35 +++++++---------------------------- 1 file changed, 7 insertions(+), 28 deletions(-) diff --git a/wgpu-core/src/device/life.rs b/wgpu-core/src/device/life.rs index fa14c1685f..06cee6cbc1 100644 --- a/wgpu-core/src/device/life.rs +++ b/wgpu-core/src/device/life.rs @@ -519,10 +519,7 @@ impl LifetimeTracker { t.add(trace::Action::DestroyRenderBundle(id.0)); } - if let Some(res) = hub - .render_bundles - .unregister(id.0) - { + if let Some(res) = hub.render_bundles.unregister(id.0) { self.suspected_resources.add_render_bundle_scope(&res.used); } } @@ -541,10 +538,7 @@ impl LifetimeTracker { t.add(trace::Action::DestroyBindGroup(id.0)); } - if let Some(res) = hub - .bind_groups - .unregister(id.0) - { + if let Some(res) = hub.bind_groups.unregister(id.0) { self.suspected_resources.add_bind_group_states(&res.used); let bind_group_layout = hub.bind_group_layouts.get(res.layout_id.0).unwrap(); @@ -577,10 +571,7 @@ impl LifetimeTracker { t.add(trace::Action::DestroyTextureView(id.0)); } - if let Some(res) = hub - .texture_views - .unregister(id.0) - { + if let Some(res) = hub.texture_views.unregister(id.0) { if let Some(parent_texture) = res.parent.as_ref() { self.suspected_resources .textures @@ -701,10 +692,7 @@ impl LifetimeTracker { t.add(trace::Action::DestroyComputePipeline(id.0)); } - if let Some(res) = hub - .compute_pipelines - .unregister(id.0) - { + if let Some(res) = hub.compute_pipelines.unregister(id.0) { let submit_index = res.info.submission_index(); self.active .iter_mut() @@ -729,10 +717,7 @@ impl LifetimeTracker { t.add(trace::Action::DestroyRenderPipeline(id.0)); } - if let Some(res) = hub - .render_pipelines - .unregister(id.0) - { + if let Some(res) = hub.render_pipelines.unregister(id.0) { let submit_index = res.info.submission_index(); self.active .iter_mut() @@ -806,10 +791,7 @@ impl LifetimeTracker { log::debug!("Query set {:?} will be destroyed", id); // #[cfg(feature = "trace")] // trace.map(|t| t.add(trace::Action::DestroyComputePipeline(id.0))); - if let Some(res) = hub - .query_sets - .unregister(id.0) - { + if let Some(res) = hub.query_sets.unregister(id.0) { let submit_index = res.info.submission_index(); self.active .iter_mut() @@ -872,10 +854,7 @@ impl LifetimeTracker { if trackers.buffers.remove_abandoned(buffer_id) { *buffer.map_state.lock() = resource::BufferMapState::Idle; log::debug!("Mapping request is dropped because the buffer is destroyed."); - if let Some(buf) = hub - .buffers - .unregister(buffer_id.0) - { + if let Some(buf) = hub.buffers.unregister(buffer_id.0) { self.free_resources.buffers.push(buf); } } else { From ea90aa4362b2f7dbc7e4742c1a494da366dadedd Mon Sep 17 00:00:00 2001 From: Mauro Gentile Date: Sat, 15 Apr 2023 10:16:27 +0200 Subject: [PATCH 024/132] Integrating (#3686) --- wgpu-core/src/device/resource.rs | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/wgpu-core/src/device/resource.rs b/wgpu-core/src/device/resource.rs index 99f39e108a..d3cb316644 100644 --- a/wgpu-core/src/device/resource.rs +++ b/wgpu-core/src/device/resource.rs @@ -1456,6 +1456,16 @@ impl Device { Some(wgt::Features::TEXTURE_BINDING_ARRAY), WritableStorage::No, ), + Bt::Texture { + multisampled: true, + sample_type: TextureSampleType::Float { filterable: true }, + .. + } => { + return Err(binding_model::CreateBindGroupLayoutError::Entry { + binding: entry.binding, + error: binding_model::BindGroupLayoutEntryError::SampleTypeFloatFilterableBindingMultisampled, + }); + } Bt::Texture { .. } => ( Some(wgt::Features::TEXTURE_BINDING_ARRAY), WritableStorage::No, From 2f1867601d94c43f1576bf6ffcb30d51db8ed209 Mon Sep 17 00:00:00 2001 From: Mauro Gentile Date: Sat, 15 Apr 2023 10:24:40 +0200 Subject: [PATCH 025/132] Fixing integration --- wgpu-core/src/device/mod.rs | 2 +- wgpu-core/src/device/resource.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/wgpu-core/src/device/mod.rs b/wgpu-core/src/device/mod.rs index d0abc37f24..56789371b5 100644 --- a/wgpu-core/src/device/mod.rs +++ b/wgpu-core/src/device/mod.rs @@ -14,7 +14,7 @@ use arrayvec::ArrayVec; use hal::Device as _; use smallvec::SmallVec; use thiserror::Error; -use wgt::{BufferAddress, TextureFormat, TextureSampleType}; +use wgt::{BufferAddress, TextureFormat}; use std::{iter, num::NonZeroU32, ptr}; diff --git a/wgpu-core/src/device/resource.rs b/wgpu-core/src/device/resource.rs index d3cb316644..d6bb432610 100644 --- a/wgpu-core/src/device/resource.rs +++ b/wgpu-core/src/device/resource.rs @@ -34,7 +34,7 @@ use hal::{CommandEncoder as _, Device as _}; use parking_lot::{Mutex, MutexGuard, RwLock}; use smallvec::SmallVec; use thiserror::Error; -use wgt::{TextureFormat, TextureViewDimension}; +use wgt::{TextureFormat, TextureSampleType, TextureViewDimension}; use std::{ borrow::Cow, From de80b11ad283c7678e1847770bbe8e26d9d15332 Mon Sep 17 00:00:00 2001 From: Mauro Gentile Date: Sat, 15 Apr 2023 11:38:36 +0200 Subject: [PATCH 026/132] Fixing wat on gles --- wgpu-core/src/device/resource.rs | 17 ++++++++++------- wgpu-hal/src/gles/device.rs | 32 ++++++++++++++++---------------- 2 files changed, 26 insertions(+), 23 deletions(-) diff --git a/wgpu-core/src/device/resource.rs b/wgpu-core/src/device/resource.rs index d6bb432610..1987506646 100644 --- a/wgpu-core/src/device/resource.rs +++ b/wgpu-core/src/device/resource.rs @@ -2996,11 +2996,13 @@ impl Device { &self, submission_index: SubmissionIndex, ) -> Result<(), WaitIdleError> { + let fence = self.fence.read(); + let fence = fence.as_ref().unwrap(); let last_done_index = unsafe { self.raw .as_ref() .unwrap() - .get_fence_value(self.fence.read().as_ref().unwrap()) + .get_fence_value(fence) .map_err(DeviceError::from)? }; if last_done_index < submission_index { @@ -3009,7 +3011,7 @@ impl Device { self.raw .as_ref() .unwrap() - .wait(self.fence.read().as_ref().unwrap(), submission_index, !0) + .wait(fence, submission_index, !0) .map_err(DeviceError::from)? }; let closures = self.lock_life().triage_submissions( @@ -3087,11 +3089,12 @@ impl Device { let mut life_tracker = self.lock_life(); let current_index = self.active_submission_index.load(Ordering::Relaxed); if let Err(error) = unsafe { - self.raw.as_ref().unwrap().wait( - self.fence.read().as_ref().unwrap(), - current_index, - CLEANUP_WAIT_MS, - ) + let fence = self.fence.read(); + let fence = fence.as_ref().unwrap(); + self.raw + .as_ref() + .unwrap() + .wait(fence, current_index, CLEANUP_WAIT_MS) } { log::error!("failed to wait for the device: {:?}", error); } diff --git a/wgpu-hal/src/gles/device.rs b/wgpu-hal/src/gles/device.rs index 0a1cfaf241..c22570eb72 100644 --- a/wgpu-hal/src/gles/device.rs +++ b/wgpu-hal/src/gles/device.rs @@ -1280,27 +1280,27 @@ impl crate::Device for super::Device { } else { (timeout_ms as u64 * 1_000_000).min(!0u32 as u64) }; - let &(_, sync) = fence + if let Some(&(_, sync)) = fence .pending .iter() .find(|&&(value, _)| value >= wait_value) - .unwrap(); - match unsafe { - gl.client_wait_sync(sync, glow::SYNC_FLUSH_COMMANDS_BIT, timeout_ns as i32) - } { - // for some reason firefox returns WAIT_FAILED, to investigate - #[cfg(target_arch = "wasm32")] - glow::WAIT_FAILED => { - log::warn!("wait failed!"); - Ok(false) - } - glow::TIMEOUT_EXPIRED => Ok(false), - glow::CONDITION_SATISFIED | glow::ALREADY_SIGNALED => Ok(true), - _ => Err(crate::DeviceError::Lost), + { + return match unsafe { + gl.client_wait_sync(sync, glow::SYNC_FLUSH_COMMANDS_BIT, timeout_ns as i32) + } { + // for some reason firefox returns WAIT_FAILED, to investigate + #[cfg(target_arch = "wasm32")] + glow::WAIT_FAILED => { + log::warn!("wait failed!"); + Ok(false) + } + glow::TIMEOUT_EXPIRED => Ok(false), + glow::CONDITION_SATISFIED | glow::ALREADY_SIGNALED => Ok(true), + _ => Err(crate::DeviceError::Lost), + }; } - } else { - Ok(true) } + Ok(true) } unsafe fn start_capture(&self) -> bool { From e49aff2652f6a943e3eb00d8e0f1fc073da40753 Mon Sep 17 00:00:00 2001 From: Mauro Gentile Date: Sat, 15 Apr 2023 12:30:37 +0200 Subject: [PATCH 027/132] Bumping extern libs --- Cargo.lock | 405 +++++++++++++++++++++++++++-------------------------- 1 file changed, 206 insertions(+), 199 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c609b258dc..fe16415368 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4,9 +4,9 @@ version = 3 [[package]] name = "addr2line" -version = "0.17.0" +version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9ecd88a8c8378ca913a680cd98f0f13ac67383d35993f86c90a70e3f137816b" +checksum = "a76fd60b23679b7d19bd066031410fb7e458ccc5e958eb5c325888ce4baedc97" dependencies = [ "gimli", ] @@ -23,7 +23,7 @@ version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fcb51a0695d8f838b1ee009b3fbf66bda078cd64590202a864a8f3e8c4315c47" dependencies = [ - "getrandom 0.2.8", + "getrandom 0.2.9", "once_cell", "version_check", ] @@ -54,9 +54,9 @@ checksum = "7de8ce5e0f9f8d88245311066a578d72b7af3e7088f32783804676302df237e4" [[package]] name = "arrayref" -version = "0.3.6" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4c527152e37cf757a3f78aae5a06fbeefdb07ccc535c980a3208ee3060dd544" +checksum = "6b4930d2cb77ce62f89ee5d5289b4ac049559b1c45539271f5ed4fdc7db34545" [[package]] name = "arrayvec" @@ -98,19 +98,18 @@ dependencies = [ [[package]] name = "async-lock" -version = "2.6.0" +version = "2.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8101efe8695a6c17e02911402145357e718ac92d3ff88ae8419e84b1707b685" +checksum = "fa24f727524730b077666307f2734b4a1a1c57acb79193127dcc8914d5242dd7" dependencies = [ "event-listener", - "futures-lite", ] [[package]] name = "async-task" -version = "4.3.0" +version = "4.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a40729d2133846d9ed0ea60a8b9541bccddab49cd30f0715a1da672fe9a2524" +checksum = "ecc7ab41815b3c653ccd2978ec3255c81349336702dfdf62ee6f7069b12a3aae" [[package]] name = "async-trait" @@ -120,7 +119,7 @@ checksum = "b9ccdd8f2a161be9bd5c023df56f1b2a0bd1d83872ae53b71a84a12c9bf6e842" dependencies = [ "proc-macro2", "quote", - "syn 2.0.11", + "syn 2.0.15", ] [[package]] @@ -131,15 +130,15 @@ checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "backtrace" -version = "0.3.66" +version = "0.3.67" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cab84319d616cfb654d03394f38ab7e6f0919e181b1b57e1fd15e7fb4077d9a7" +checksum = "233d376d6d185f2a3093e58f283f60f880315b6c60075b01f36b3b85154564ca" dependencies = [ "addr2line", "cc", "cfg-if", "libc", - "miniz_oxide 0.5.4", + "miniz_oxide 0.6.2", "object", "rustc-demangle", ] @@ -205,9 +204,9 @@ checksum = "0d8c1fef690941d3e7788d328517591fecc684c084084702d6ff1641e993699a" [[package]] name = "bumpalo" -version = "3.11.1" +version = "3.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "572f695136211188308f16ad2ca5c851a712c464060ae6974944458eb83880ba" +checksum = "0d261e256854913907f67ed06efbc3338dfe6179796deefc1ff763fc1aee5535" [[package]] name = "bytemuck" @@ -220,13 +219,13 @@ dependencies = [ [[package]] name = "bytemuck_derive" -version = "1.4.0" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1aca418a974d83d40a0c1f0c5cba6ff4bc28d8df099109ca459a2118d40b6322" +checksum = "fdde5c9cd29ebd706ce1b35600920a33550e402fc998a2e53ad3b42c3c47a192" dependencies = [ "proc-macro2", "quote", - "syn 1.0.105", + "syn 2.0.15", ] [[package]] @@ -243,12 +242,12 @@ checksum = "ec8a7b6a70fde80372154c65702f00a0f56f3e1c36abbc6c440484be248856db" [[package]] name = "calloop" -version = "0.10.2" +version = "0.10.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "595eb0438b3c6d262395fe30e6de9a61beb57ea56290b00a07f227fe6e20cbf2" +checksum = "1a59225be45a478d772ce015d9743e49e92798ece9e34eda9a6aa2a6a7f40192" dependencies = [ "log", - "nix", + "nix 0.25.1", "slotmap", "thiserror", "vec_map", @@ -268,9 +267,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.0.77" +version = "1.0.79" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e9f73505338f7d905b19d18738976aae232eb46b8efc15554ffc56deb5d9ebe4" +checksum = "50d30906286121d95be3d479533b458f87493b30a4b5f79a607db8f5d11aa91f" [[package]] name = "cfg-if" @@ -289,9 +288,9 @@ dependencies = [ [[package]] name = "cmake" -version = "0.1.49" +version = "0.1.50" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db34956e100b30725f2eb215f90d4871051239535632f84fea3bc92722c66b7c" +checksum = "a31c789563b815f77f4250caee12365734369f942439b7defd71e18a48197130" dependencies = [ "cc", ] @@ -314,9 +313,9 @@ dependencies = [ [[package]] name = "cocoa-foundation" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ade49b65d560ca58c403a479bb396592b155c0185eada742ee323d1d68d6318" +checksum = "931d3837c286f56e3c58423ce4eba12d08db2374461a785c86f672b08b5650d6" dependencies = [ "bitflags 1.3.2", "block", @@ -351,9 +350,9 @@ checksum = "bf43edc576402991846b093a7ca18a3477e0ef9c588cde84964b5d3e43016642" [[package]] name = "concurrent-queue" -version = "2.0.0" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd7bef69dc86e3c610e4e7aed41035e2a7ed12e72dd7530f61327a6579a4390b" +checksum = "62ec6771ecfa0762d24683ee5a32ad78487a3d3afdc0fb8cae19d2c5deb50b7c" dependencies = [ "crossbeam-utils", ] @@ -396,9 +395,9 @@ dependencies = [ [[package]] name = "core-foundation-sys" -version = "0.8.3" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5827cebf4670468b8772dd191856768aedcb1b0278a04f989f7766351917b9dc" +checksum = "e496a50fda8aacccc86d7529e2c1e0892dbd0f898a6b5645b5561b89c3210efa" [[package]] name = "core-graphics" @@ -448,9 +447,9 @@ dependencies = [ [[package]] name = "crossbeam-utils" -version = "0.8.14" +version = "0.8.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fb766fa798726286dbbb842f174001dab8abc7b627a1dd86e0b7222a95d929f" +checksum = "3c063cd8cc95f5c377ed0d4b49a4b21f632396ff690e8470c29b3359b346984b" dependencies = [ "cfg-if", ] @@ -529,7 +528,7 @@ dependencies = [ "proc-macro2", "quote", "strsim", - "syn 1.0.105", + "syn 1.0.109", ] [[package]] @@ -540,7 +539,7 @@ checksum = "9c972679f83bdf9c42bd905396b6c3588a843a17f0f16dfcfa3e2c5d57441835" dependencies = [ "darling_core", "quote", - "syn 1.0.105", + "syn 1.0.109", ] [[package]] @@ -606,7 +605,7 @@ dependencies = [ "proc-macro2", "quote", "regex", - "syn 1.0.105", + "syn 1.0.109", ] [[package]] @@ -668,7 +667,7 @@ dependencies = [ "proc-macro2", "quote", "rustc_version 0.4.0", - "syn 1.0.105", + "syn 1.0.109", ] [[package]] @@ -756,17 +755,6 @@ dependencies = [ "termcolor", ] -[[package]] -name = "errno" -version = "0.2.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f639046355ee4f37944e44f60642c6f3a7efa3cf6b78c78a0d989a8ce6c396a1" -dependencies = [ - "errno-dragonfly", - "libc", - "winapi", -] - [[package]] name = "errno" version = "0.3.1" @@ -806,9 +794,9 @@ dependencies = [ [[package]] name = "fastrand" -version = "1.8.0" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7a407cfaa3385c4ae6b23e84623d48c2798d06e3e6a1878f7f59f17b3f86499" +checksum = "e51093e27b0797c359783294ca4f0a911c270184cb10f85783b118614a1501be" dependencies = [ "instant", ] @@ -865,13 +853,13 @@ dependencies = [ [[package]] name = "foreign-types-macros" -version = "0.2.2" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8469d0d40519bc608ec6863f1cc88f3f1deee15913f2f3b3e573d81ed38cccc" +checksum = "1a5c6c585bc94aaf2c7b51dd4c2ba22680844aba4c687be581871a6f518c5742" dependencies = [ "proc-macro2", "quote", - "syn 1.0.105", + "syn 2.0.15", ] [[package]] @@ -988,9 +976,9 @@ checksum = "4fff74096e71ed47f8e023204cfd0aa1289cd54ae5430a9523be060cdb849964" [[package]] name = "futures-lite" -version = "1.12.0" +version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7694489acd39452c77daa48516b894c153f192c3578d5a839b62c58099fcbf48" +checksum = "49a9d51ce47660b1e808d3c990b4709f2f415d928835a17dfd16991515c46bce" dependencies = [ "fastrand", "futures-core", @@ -1009,7 +997,7 @@ checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" dependencies = [ "proc-macro2", "quote", - "syn 2.0.11", + "syn 2.0.15", ] [[package]] @@ -1055,9 +1043,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.8" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c05aeb6a22b8f62540c194aac980f2115af067bfe15a0734d7277a768d396b31" +checksum = "c85e1d9ab2eadba7e5040d4e09cbd6d072b76a557ad64e797c2cb9d4da21d7e4" dependencies = [ "cfg-if", "libc", @@ -1066,9 +1054,9 @@ dependencies = [ [[package]] name = "gimli" -version = "0.26.2" +version = "0.27.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22030e2c5a68ec659fde1e949a745124b48e6fa8b045b7ed5bd1fe4ccc5c4e5d" +checksum = "ad0a93d233ebf96623465aad4046a8d3aa4da22d4f4beba5388838c8a434bbb4" [[package]] name = "gl_generator" @@ -1320,9 +1308,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "1.9.2" +version = "1.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1885e79c1fc4b10f0e172c475f458b7f7b93061064d98c3293e98c5ba0c8b399" +checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" dependencies = [ "autocfg", "hashbrown", @@ -1343,31 +1331,32 @@ dependencies = [ [[package]] name = "io-lifetimes" -version = "1.0.5" +version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1abeb7a0dd0f8181267ff8adc397075586500b81b28a73e8a0208b00fc170fb3" +checksum = "9c66c74d2ae7e79a5a8f7ac924adbe38ee42a859c6539ad869eb51f0b52dc220" dependencies = [ + "hermit-abi 0.3.1", "libc", - "windows-sys 0.45.0", + "windows-sys 0.48.0", ] [[package]] name = "is-terminal" -version = "0.4.3" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22e18b0a45d56fe973d6db23972bf5bc46f988a4a2385deac9cc29572f09daef" +checksum = "adcf93614601c8129ddf72e2d5633df827ba6551541c6d8c59520a371475be1f" dependencies = [ "hermit-abi 0.3.1", "io-lifetimes", - "rustix 0.36.8", - "windows-sys 0.45.0", + "rustix", + "windows-sys 0.48.0", ] [[package]] name = "itoa" -version = "1.0.4" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4217ad341ebadf8d8e724e264f13e593e0648f5b3e94b3896a5df283be015ecc" +checksum = "453ad9f582a441959e5f0d088b02ce04cfe8d51a8eaf077f12ac6d3e94164ca6" [[package]] name = "jni-sys" @@ -1439,12 +1428,6 @@ dependencies = [ "windows-sys 0.48.0", ] -[[package]] -name = "linux-raw-sys" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f051f77a7c8e6957c0696eac88f26b0117e54f52d3fc682ab19397a8812846a4" - [[package]] name = "linux-raw-sys" version = "0.3.1" @@ -1487,9 +1470,9 @@ checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" [[package]] name = "memmap2" -version = "0.5.8" +version = "0.5.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b182332558b18d807c4ce1ca8ca983b34c3ee32765e47b3f0f69b90355cc1dc" +checksum = "83faa42c0a078c393f6b29d5db232d8be22776a891f8f56e5284faee4a20b327" dependencies = [ "libc", ] @@ -1532,6 +1515,15 @@ dependencies = [ "adler", ] +[[package]] +name = "miniz_oxide" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b275950c28b37e794e8c55d88aeb5e139d0ce23fdbbeda68f8d7174abdf9e8fa" +dependencies = [ + "adler", +] + [[package]] name = "miniz_oxide" version = "0.7.1" @@ -1544,14 +1536,14 @@ dependencies = [ [[package]] name = "mio" -version = "0.8.5" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5d732bc30207a6423068df043e3d02e0735b155ad7ce1a6f76fe2baa5b158de" +checksum = "5b9d9a46eff5b4ff64b45a9e316a6d1e0bc719ef429cbec4dc630684212bfdf9" dependencies = [ "libc", "log", "wasi 0.11.0+wasi-snapshot-preview1", - "windows-sys 0.42.0", + "windows-sys 0.45.0", ] [[package]] @@ -1628,7 +1620,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 1.0.105", + "syn 1.0.109", ] [[package]] @@ -1652,6 +1644,19 @@ dependencies = [ "memoffset", ] +[[package]] +name = "nix" +version = "0.25.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f346ff70e7dbfd675fe90590b92d59ef2de15a8779ae305ebcbfd3f0caf59be4" +dependencies = [ + "autocfg", + "bitflags 1.3.2", + "cfg-if", + "libc", + "memoffset", +] + [[package]] name = "noise" version = "0.7.0" @@ -1664,9 +1669,9 @@ dependencies = [ [[package]] name = "nom" -version = "7.1.1" +version = "7.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8903e5a29a317527874d0402f867152a3d21c908bb0b933e416c65e301d4c36" +checksum = "d273983c5a657a70a3e8f2a01329822f3b8c8172b73826411a55751e404a0a4a" dependencies = [ "memchr", "minimal-lexical", @@ -1723,23 +1728,23 @@ dependencies = [ [[package]] name = "num_enum" -version = "0.5.7" +version = "0.5.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf5395665662ef45796a4ff5486c5d41d29e0c09640af4c5f17fd94ee2c119c9" +checksum = "1f646caf906c20226733ed5b1374287eb97e3c2a5c227ce668c1f2ce20ae57c9" dependencies = [ "num_enum_derive", ] [[package]] name = "num_enum_derive" -version = "0.5.7" +version = "0.5.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b0498641e53dd6ac1a4f22547548caa6864cc4933784319cd1775271c5a46ce" +checksum = "dcbff9bc912032c62bf65ef1d5aea88983b420f4f839db1e9b0c281a25c9c799" dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 1.0.105", + "syn 1.0.109", ] [[package]] @@ -1769,9 +1774,9 @@ dependencies = [ [[package]] name = "object" -version = "0.29.0" +version = "0.30.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21158b2c33aa6d4561f1c0a6ea283ca92bc54802a93b263e910746d679a7eb53" +checksum = "ea86265d3d3dcb6a27fc51bd29a4bf387fae9d2986b823079d4986af253eb439" dependencies = [ "memchr", ] @@ -1799,9 +1804,9 @@ checksum = "4030760ffd992bef45b0ae3f10ce1aba99e33464c90d14dd7c039884963ddc7a" [[package]] name = "parking" -version = "2.0.0" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "427c3892f9e783d91cc128285287e70a59e206ca452770ece88a76f7a3eddd72" +checksum = "14f2252c834a40ed9bb5422029649578e63aa341ac401f74e719dd1afda8394e" [[package]] name = "parking_lot" @@ -1811,7 +1816,7 @@ checksum = "7d17b78036a60663b797adeaee46f5c9dfebb86948d1255007a1d6be0271ff99" dependencies = [ "instant", "lock_api", - "parking_lot_core 0.8.5", + "parking_lot_core 0.8.6", ] [[package]] @@ -1821,14 +1826,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" dependencies = [ "lock_api", - "parking_lot_core 0.9.5", + "parking_lot_core 0.9.7", ] [[package]] name = "parking_lot_core" -version = "0.8.5" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d76e8e1493bcac0d2766c42737f34458f1c8c50c0d23bcb24ea953affb273216" +checksum = "60a2cfe6f0ad2bfc16aefa463b497d5c7a5ecd44a23efa72aa342d90177356dc" dependencies = [ "cfg-if", "instant", @@ -1840,15 +1845,15 @@ dependencies = [ [[package]] name = "parking_lot_core" -version = "0.9.5" +version = "0.9.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ff9f3fef3968a3ec5945535ed654cb38ff72d7495a25619e2247fb15a2ed9ba" +checksum = "9069cbb9f99e3a5083476ccb29ceb1de18b9118cafa53e90c9551235de2b9521" dependencies = [ "cfg-if", "libc", "redox_syscall 0.2.16", "smallvec", - "windows-sys 0.42.0", + "windows-sys 0.45.0", ] [[package]] @@ -1859,9 +1864,9 @@ checksum = "478c572c3d73181ff3c2539045f6eb99e5491218eae919370993b890cdbdd98e" [[package]] name = "petgraph" -version = "0.6.2" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6d5014253a1331579ce62aa67443b4a658c5e7dd03d4bc6d302b94474888143" +checksum = "4dd7d28ee937e54fe3080c91faa1c3a46c06de6252988a7f4592ba2310ef22a4" dependencies = [ "fixedbitset", "indexmap", @@ -1890,7 +1895,7 @@ checksum = "069bdb1e05adc7a8990dce9cc75370895fbe4e3d58b9b73bf1aee56359344a55" dependencies = [ "proc-macro2", "quote", - "syn 1.0.105", + "syn 1.0.109", ] [[package]] @@ -1933,7 +1938,7 @@ checksum = "3894e5d549cccbe44afecf72922f277f603cd4bb0219c8342631ef18fffbe004" dependencies = [ "proc-macro2", "quote", - "syn 1.0.105", + "syn 1.0.109", ] [[package]] @@ -1972,20 +1977,19 @@ checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" [[package]] name = "proc-macro-crate" -version = "1.2.1" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eda0fc3b0fb7c975631757e14d9049da17374063edb6ebbcbc54d880d4fe94e9" +checksum = "7f4c021e1093a56626774e81216a4ce732a735e5bad4868a03f3ed65ca0c3919" dependencies = [ "once_cell", - "thiserror", - "toml", + "toml_edit", ] [[package]] name = "proc-macro2" -version = "1.0.54" +version = "1.0.56" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e472a104799c74b514a57226160104aa483546de37e839ec50e3c2e41dd87534" +checksum = "2b63bdb0cd06f1f4dedf69b254734f9b45af66e4a031e42a7480257d9898b435" dependencies = [ "unicode-ident", ] @@ -2096,9 +2100,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.7.1" +version = "1.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48aaa5748ba571fb95cd2c85c09f629215d3a6ece942baa100950af03a34f733" +checksum = "8b1f693b24f6ac912f4893ef08244d70b6067480d2f1a46e950c9691e6749d1d" dependencies = [ "aho-corasick", "memchr", @@ -2107,9 +2111,9 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.6.28" +version = "0.6.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "456c603be3e8d448b072f410900c09faf164fbce2d480456f50eea6e25f9c848" +checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" [[package]] name = "renderdoc-sys" @@ -2137,9 +2141,9 @@ dependencies = [ [[package]] name = "rustc-demangle" -version = "0.1.21" +version = "0.1.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ef03e0a2b150c7a90d01faf6254c9c48a41e95fb2a8c2ac1c6f0d2b9aefc342" +checksum = "d4a36c42d1873f9a77c53bde094f9664d9891bc604a45b4798fd2c389ed12e5b" [[package]] name = "rustc-hash" @@ -2167,37 +2171,23 @@ dependencies = [ [[package]] name = "rustix" -version = "0.36.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f43abb88211988493c1abb44a70efa56ff0ce98f233b7b276146f1f3f7ba9644" -dependencies = [ - "bitflags 1.3.2", - "errno 0.2.8", - "io-lifetimes", - "libc", - "linux-raw-sys 0.1.4", - "windows-sys 0.45.0", -] - -[[package]] -name = "rustix" -version = "0.37.7" +version = "0.37.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2aae838e49b3d63e9274e1c01833cc8139d3fec468c3b84688c628f44b1ae11d" +checksum = "85597d61f83914ddeba6a47b3b8ffe7365107221c2e557ed94426489fefb5f77" dependencies = [ "bitflags 1.3.2", - "errno 0.3.1", + "errno", "io-lifetimes", "libc", - "linux-raw-sys 0.3.1", - "windows-sys 0.45.0", + "linux-raw-sys", + "windows-sys 0.48.0", ] [[package]] name = "ryu" -version = "1.0.11" +version = "1.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4501abdff3ae82a1c1b477a17252eb69cee9e66eb915c1abaa4f44d873df9f09" +checksum = "f91339c0467de62360649f8d3e185ca8de4224ff281f66000de5eb2a77a79041" [[package]] name = "safe_arch" @@ -2285,7 +2275,7 @@ checksum = "291a097c63d8497e00160b166a967a4a79c64f3facdd01cbd7502231688d77df" dependencies = [ "proc-macro2", "quote", - "syn 2.0.11", + "syn 2.0.15", ] [[package]] @@ -2308,7 +2298,7 @@ checksum = "1fe39d9fbb0ebf5eb2c7cb7e2a47e4f462fad1379f1166b8ae49ad9eae89a7ca" dependencies = [ "proc-macro2", "quote", - "syn 1.0.105", + "syn 1.0.109", ] [[package]] @@ -2373,9 +2363,9 @@ checksum = "238abfbb77c1915110ad968465608b68e869e0772622c9656714e73e5a1a522f" [[package]] name = "slab" -version = "0.4.7" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4614a76b2a8be0058caa9dbbaf66d988527d86d003c11a94fbd335d7661edcef" +checksum = "6528351c9bc8ab22353f9d776db39a20288e8d6c37ef8cfe3317cf875eecfc2d" dependencies = [ "autocfg", ] @@ -2407,7 +2397,7 @@ dependencies = [ "lazy_static", "log", "memmap2", - "nix", + "nix 0.24.3", "pkg-config", "wayland-client", "wayland-cursor", @@ -2463,9 +2453,9 @@ checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" [[package]] name = "syn" -version = "1.0.105" +version = "1.0.109" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60b9b43d45702de4c839cb9b51d9f529c5dd26a4aff255b42b1ebc03e88ee908" +checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" dependencies = [ "proc-macro2", "quote", @@ -2474,9 +2464,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.11" +version = "2.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21e3787bb71465627110e7d87ed4faaa36c1f61042ee67badb9e2ef173accc40" +checksum = "a34fcf3e8b60f57e6a14301a2e916d323af98b0ea63c599441eec8558660c822" dependencies = [ "proc-macro2", "quote", @@ -2492,7 +2482,7 @@ dependencies = [ "cfg-if", "fastrand", "redox_syscall 0.3.5", - "rustix 0.37.7", + "rustix", "windows-sys 0.45.0", ] @@ -2522,7 +2512,7 @@ checksum = "f9456a42c5b0d803c8cd86e73dd7cc9edd429499f37a3550d286d5e86720569f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.11", + "syn 2.0.15", ] [[package]] @@ -2593,16 +2583,24 @@ checksum = "d266c00fde287f55d3f1c3e96c500c362a2b8c695076ec180f27918820bc6df8" dependencies = [ "proc-macro2", "quote", - "syn 1.0.105", + "syn 1.0.109", ] [[package]] -name = "toml" -version = "0.5.9" +name = "toml_datetime" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ab8ed2edee10b50132aed5f331333428b011c99402b5a534154ed15746f9622" + +[[package]] +name = "toml_edit" +version = "0.19.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d82e1a7758622a465f8cee077614c73484dac5b836c02ff6a40d5d1010324d7" +checksum = "239410c8609e8125456927e6707163a3b1fdb40561e4b803bc041f466ccfdc13" dependencies = [ - "serde", + "indexmap", + "toml_datetime", + "winnow", ] [[package]] @@ -2660,9 +2658,9 @@ checksum = "d70b6494226b36008c8366c288d77190b3fad2eb4c10533139c1c1f461127f1a" [[package]] name = "unicode-ident" -version = "1.0.5" +version = "1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ceab39d59e4c9499d4e5a8ee0e2735b891bb7308ac83dfb4e80cad195c9f6f3" +checksum = "e5464a87b239f13a63a501f2701565754bae92d243d4bb7eb12f6d57d2269bf4" [[package]] name = "unicode-normalization" @@ -2722,7 +2720,7 @@ version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b55a3fef2a1e3b3a00ce878640918820d3c51081576ac657d23af9fc7928fdb" dependencies = [ - "getrandom 0.2.8", + "getrandom 0.2.9", "serde", ] @@ -2785,7 +2783,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 1.0.105", + "syn 1.0.109", ] [[package]] @@ -2821,7 +2819,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 1.0.105", + "syn 1.0.109", "wasm-bindgen-shared", ] @@ -2890,7 +2888,7 @@ checksum = "2aff81306fcac3c7515ad4e177f521b5c9a15f2b08f4e32d823066102f35a5f6" dependencies = [ "proc-macro2", "quote", - "syn 1.0.105", + "syn 1.0.109", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -2998,7 +2996,7 @@ dependencies = [ "bitflags 1.3.2", "downcast-rs", "libc", - "nix", + "nix 0.24.3", "scoped-tls", "wayland-commons", "wayland-scanner", @@ -3011,7 +3009,7 @@ version = "0.29.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8691f134d584a33a6606d9d717b95c4fa20065605f798a3f350d78dced02a902" dependencies = [ - "nix", + "nix 0.24.3", "once_cell", "smallvec", "wayland-sys", @@ -3023,7 +3021,7 @@ version = "0.29.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6865c6b66f13d6257bef1cd40cbfe8ef2f150fb8ebbdb1e8e873455931377661" dependencies = [ - "nix", + "nix 0.24.3", "wayland-client", "xcursor", ] @@ -3262,7 +3260,7 @@ version = "0.44.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9e745dab35a0c4c77aa3ce42d595e13d2003d6902d6b08c9ef5fc326d08da12b" dependencies = [ - "windows-targets 0.42.1", + "windows-targets 0.42.2", ] [[package]] @@ -3284,13 +3282,13 @@ version = "0.42.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5a3e1820f08b8513f676f7ab6c1f99ff312fb97b553d30ff4dd86f9f15728aa7" dependencies = [ - "windows_aarch64_gnullvm 0.42.1", - "windows_aarch64_msvc 0.42.1", - "windows_i686_gnu 0.42.1", - "windows_i686_msvc 0.42.1", - "windows_x86_64_gnu 0.42.1", - "windows_x86_64_gnullvm 0.42.1", - "windows_x86_64_msvc 0.42.1", + "windows_aarch64_gnullvm 0.42.2", + "windows_aarch64_msvc 0.42.2", + "windows_i686_gnu 0.42.2", + "windows_i686_msvc 0.42.2", + "windows_x86_64_gnu 0.42.2", + "windows_x86_64_gnullvm 0.42.2", + "windows_x86_64_msvc 0.42.2", ] [[package]] @@ -3299,7 +3297,7 @@ version = "0.45.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "75283be5efb2831d37ea142365f009c02ec203cd29a3ebecbc093d52315b66d0" dependencies = [ - "windows-targets 0.42.1", + "windows-targets 0.42.2", ] [[package]] @@ -3313,17 +3311,17 @@ dependencies = [ [[package]] name = "windows-targets" -version = "0.42.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e2522491fbfcd58cc84d47aeb2958948c4b8982e9a2d8a2a35bbaed431390e7" +checksum = "8e5180c00cd44c9b1c88adb3693291f1cd93605ded80c250a75d472756b4d071" dependencies = [ - "windows_aarch64_gnullvm 0.42.1", - "windows_aarch64_msvc 0.42.1", - "windows_i686_gnu 0.42.1", - "windows_i686_msvc 0.42.1", - "windows_x86_64_gnu 0.42.1", - "windows_x86_64_gnullvm 0.42.1", - "windows_x86_64_msvc 0.42.1", + "windows_aarch64_gnullvm 0.42.2", + "windows_aarch64_msvc 0.42.2", + "windows_i686_gnu 0.42.2", + "windows_i686_msvc 0.42.2", + "windows_x86_64_gnu 0.42.2", + "windows_x86_64_gnullvm 0.42.2", + "windows_x86_64_msvc 0.42.2", ] [[package]] @@ -3343,9 +3341,9 @@ dependencies = [ [[package]] name = "windows_aarch64_gnullvm" -version = "0.42.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c9864e83243fdec7fc9c5444389dcbbfd258f745e7853198f365e3c4968a608" +checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8" [[package]] name = "windows_aarch64_gnullvm" @@ -3361,9 +3359,9 @@ checksum = "9bb8c3fd39ade2d67e9874ac4f3db21f0d710bee00fe7cab16949ec184eeaa47" [[package]] name = "windows_aarch64_msvc" -version = "0.42.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c8b1b673ffc16c47a9ff48570a9d85e25d265735c503681332589af6253c6c7" +checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43" [[package]] name = "windows_aarch64_msvc" @@ -3379,9 +3377,9 @@ checksum = "180e6ccf01daf4c426b846dfc66db1fc518f074baa793aa7d9b9aaeffad6a3b6" [[package]] name = "windows_i686_gnu" -version = "0.42.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de3887528ad530ba7bdbb1faa8275ec7a1155a45ffa57c37993960277145d640" +checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f" [[package]] name = "windows_i686_gnu" @@ -3397,9 +3395,9 @@ checksum = "e2e7917148b2812d1eeafaeb22a97e4813dfa60a3f8f78ebe204bcc88f12f024" [[package]] name = "windows_i686_msvc" -version = "0.42.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf4d1122317eddd6ff351aa852118a2418ad4214e6613a50e0191f7004372605" +checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060" [[package]] name = "windows_i686_msvc" @@ -3415,9 +3413,9 @@ checksum = "4dcd171b8776c41b97521e5da127a2d86ad280114807d0b2ab1e462bc764d9e1" [[package]] name = "windows_x86_64_gnu" -version = "0.42.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1040f221285e17ebccbc2591ffdc2d44ee1f9186324dd3e84e99ac68d699c45" +checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36" [[package]] name = "windows_x86_64_gnu" @@ -3427,9 +3425,9 @@ checksum = "ca2b8a661f7628cbd23440e50b05d705db3686f894fc9580820623656af974b1" [[package]] name = "windows_x86_64_gnullvm" -version = "0.42.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "628bfdf232daa22b0d64fdb62b09fcc36bb01f05a3939e20ab73aaf9470d0463" +checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3" [[package]] name = "windows_x86_64_gnullvm" @@ -3445,9 +3443,9 @@ checksum = "c811ca4a8c853ef420abd8592ba53ddbbac90410fab6903b3e79972a631f7680" [[package]] name = "windows_x86_64_msvc" -version = "0.42.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "447660ad36a13288b1db4d4248e857b510e8c3a225c822ba4fb748c0aafecffd" +checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0" [[package]] name = "windows_x86_64_msvc" @@ -3488,6 +3486,15 @@ dependencies = [ "x11-dl", ] +[[package]] +name = "winnow" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae8970b36c66498d8ff1d66685dc86b91b29db0c7739899012f63a63814b4b28" +dependencies = [ + "memchr", +] + [[package]] name = "wio" version = "0.2.2" @@ -3563,12 +3570,12 @@ dependencies = [ [[package]] name = "x11-dl" -version = "2.20.1" +version = "2.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1536d6965a5d4e573c7ef73a2c15ebcd0b2de3347bdf526c34c297c00ac40f0" +checksum = "38735924fedd5314a6e548792904ed8c6de6636285cb9fec04d5b1db85c1516f" dependencies = [ - "lazy_static", "libc", + "once_cell", "pkg-config", ] From 73fecaee3a74007de6f74c552c1f3124a9f303d8 Mon Sep 17 00:00:00 2001 From: Mauro Gentile Date: Sun, 16 Apr 2023 11:09:04 +0200 Subject: [PATCH 028/132] Active submission will drop only its own stuff --- wgpu-core/src/device/life.rs | 12 +++---- wgpu-core/src/device/queue.rs | 64 +++++++++++++++-------------------- 2 files changed, 33 insertions(+), 43 deletions(-) diff --git a/wgpu-core/src/device/life.rs b/wgpu-core/src/device/life.rs index 06cee6cbc1..ba27bc69c2 100644 --- a/wgpu-core/src/device/life.rs +++ b/wgpu-core/src/device/life.rs @@ -397,14 +397,11 @@ impl LifetimeTracker { //TODO: enable when `is_sorted_by_key` is stable //debug_assert!(self.active.is_sorted_by_key(|a| a.index)); - let done_count = self - .active - .iter() - .position(|a| a.index > last_done) - .unwrap_or(self.active.len()); - let mut work_done_closures = SmallVec::new(); - for a in self.active.drain(..done_count) { + //TODO: Substitute with drain_filter when available + let mut active_index = self.active.iter().position(|a| a.index == last_done); + while let Some(index) = active_index { + let a = self.active.remove(index); log::trace!("Active submission {} is done", a.index); self.free_resources.extend(a.last_resources); self.ready_to_map.extend(a.mapped); @@ -413,6 +410,7 @@ impl LifetimeTracker { command_allocator.release_encoder(raw); } work_done_closures.extend(a.work_done_closures); + active_index = self.active.iter().position(|a| a.index == last_done); } work_done_closures } diff --git a/wgpu-core/src/device/queue.rs b/wgpu-core/src/device/queue.rs index eb26d7e1b0..d7c0c7e878 100644 --- a/wgpu-core/src/device/queue.rs +++ b/wgpu-core/src/device/queue.rs @@ -1111,14 +1111,6 @@ impl Global { if !command_buffer_ids.is_empty() { profiling::scope!("prepare"); - let buffer_guard = hub.buffers.write(); - let texture_guard = hub.textures.write(); - let texture_view_guard = hub.texture_views.read(); - let sampler_guard = hub.samplers.read(); - - //Note: locking the trackers has to be done after the storages - let mut trackers = device.trackers.lock(); - //TODO: if multiple command buffers are submitted, we can re-use the last // native command buffer of the previous chain instead of always creating // a temporary one, since the chains are not finished. @@ -1127,7 +1119,7 @@ impl Global { for &cmb_id in command_buffer_ids { // we reset the used surface textures every time we use // it, so make sure to set_size on it. - used_surface_textures.set_size(texture_guard.len()); + used_surface_textures.set_size(hub.textures.read().len()); #[allow(unused_mut)] let mut cmdbuf = match hub @@ -1213,6 +1205,7 @@ impl Global { } if should_extend { unsafe { + let texture_guard = hub.textures.read(); used_surface_textures .merge_single( &*texture_guard, @@ -1234,19 +1227,24 @@ impl Global { .push(texture_view.clone()); } } - for bg in cmd_buf_trackers.bind_groups.used_resources() { - bg.info.use_at(submit_index); - // We need to update the submission indices for the contained - // state-less (!) resources as well, so that they don't get - // deleted too early if the parent bind group goes out of scope. - for sub_id in bg.used.views.used() { - texture_view_guard[sub_id].info.use_at(submit_index); - } - for sub_id in bg.used.samplers.used() { - sampler_guard[sub_id].info.use_at(submit_index); - } - if bg.is_unique() { - device.temp_suspected.lock().bind_groups.push(bg.clone()); + { + let texture_view_guard = hub.texture_views.read(); + let sampler_guard = hub.samplers.read(); + + for bg in cmd_buf_trackers.bind_groups.used_resources() { + bg.info.use_at(submit_index); + // We need to update the submission indices for the contained + // state-less (!) resources as well, so that they don't get + // deleted too early if the parent bind group goes out of scope. + for sub_id in bg.used.views.used() { + texture_view_guard[sub_id].info.use_at(submit_index); + } + for sub_id in bg.used.samplers.used() { + sampler_guard[sub_id].info.use_at(submit_index); + } + if bg.is_unique() { + device.temp_suspected.lock().bind_groups.push(bg.clone()); + } } } // assert!(cmd_buf_trackers.samplers.is_empty()); @@ -1314,6 +1312,10 @@ impl Global { .map_err(DeviceError::from)? }; log::trace!("Stitching command buffer {:?} before submission", cmb_id); + let buffer_guard = hub.buffers.read(); + let texture_guard = hub.textures.read(); + //Note: locking the trackers has to be done after the storages + let mut trackers = device.trackers.lock(); baked .initialize_buffer_memory(&mut *trackers, &*buffer_guard) .map_err(|err| QueueSubmitError::DestroyedBuffer(err.0))?; @@ -1458,15 +1460,10 @@ impl Global { active_executions.push(pending_execution); } + let mut pending_writes = device.pending_writes.write(); + let pending_writes = pending_writes.as_mut().unwrap(); // this will register the new submission to the life time tracker - let mut pending_write_resources = mem::take( - &mut device - .pending_writes - .write() - .as_mut() - .unwrap() - .temp_resources, - ); + let mut pending_write_resources = mem::take(&mut pending_writes.temp_resources); device.lock_life().track_submission( submit_index, pending_write_resources.drain(..), @@ -1484,12 +1481,7 @@ impl Global { // pending_write_resources has been drained, so it's empty, but we // want to retain its heap allocation. - device - .pending_writes - .write() - .as_mut() - .unwrap() - .temp_resources = pending_write_resources; + pending_writes.temp_resources = pending_write_resources; device.lock_life().post_submit(); (submit_index, closures) From faf3efbb222b4ab211c183be6e6363b9c052bef5 Mon Sep 17 00:00:00 2001 From: Mauro Gentile Date: Sun, 16 Apr 2023 11:43:33 +0200 Subject: [PATCH 029/132] Increasing wait time for multithreaded test --- wgpu/examples/hello-compute/tests.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/wgpu/examples/hello-compute/tests.rs b/wgpu/examples/hello-compute/tests.rs index 52e62d1c81..4802cee922 100644 --- a/wgpu/examples/hello-compute/tests.rs +++ b/wgpu/examples/hello-compute/tests.rs @@ -104,7 +104,7 @@ fn test_multithreaded_compute() { } for _ in 0..thread_count { - rx.recv_timeout(Duration::from_secs(10)) + rx.recv_timeout(Duration::from_secs(30)) .expect("A thread never completed."); } }, From a1f8651dec3009d5861a73851bd842dbbb5a2170 Mon Sep 17 00:00:00 2001 From: Mauro Gentile Date: Sun, 16 Apr 2023 13:11:59 +0200 Subject: [PATCH 030/132] Reverting test and reducing cleanup time --- wgpu-core/src/device/life.rs | 2 +- wgpu-core/src/device/mod.rs | 2 +- wgpu/examples/hello-compute/tests.rs | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/wgpu-core/src/device/life.rs b/wgpu-core/src/device/life.rs index ba27bc69c2..d09ff07c35 100644 --- a/wgpu-core/src/device/life.rs +++ b/wgpu-core/src/device/life.rs @@ -614,7 +614,7 @@ impl LifetimeTracker { { non_referenced_resources .texture_views - .extend(clear_views.iter().cloned().into_iter()); + .extend(clear_views.iter().cloned()); } non_referenced_resources.textures.push(res); } diff --git a/wgpu-core/src/device/mod.rs b/wgpu-core/src/device/mod.rs index 56789371b5..0d9e8120d2 100644 --- a/wgpu-core/src/device/mod.rs +++ b/wgpu-core/src/device/mod.rs @@ -31,7 +31,7 @@ pub const SHADER_STAGE_COUNT: usize = 3; // value is enough for a 16k texture with float4 format. pub(crate) const ZERO_BUFFER_SIZE: BufferAddress = 512 << 10; -const CLEANUP_WAIT_MS: u32 = 5000; +const CLEANUP_WAIT_MS: u32 = 1000; const IMPLICIT_FAILURE: &str = "failed implicit"; const EP_FAILURE: &str = "EP is invalid"; diff --git a/wgpu/examples/hello-compute/tests.rs b/wgpu/examples/hello-compute/tests.rs index 4802cee922..52e62d1c81 100644 --- a/wgpu/examples/hello-compute/tests.rs +++ b/wgpu/examples/hello-compute/tests.rs @@ -104,7 +104,7 @@ fn test_multithreaded_compute() { } for _ in 0..thread_count { - rx.recv_timeout(Duration::from_secs(30)) + rx.recv_timeout(Duration::from_secs(10)) .expect("A thread never completed."); } }, From 6bc0ecf0a219b6cf65a98928cbd3c8e2906e4e51 Mon Sep 17 00:00:00 2001 From: Mauro Gentile Date: Tue, 25 Apr 2023 19:29:58 +0200 Subject: [PATCH 031/132] Separating info from debug to get debug details --- .github/workflows/ci.yml | 1 + wgpu-core/src/binding_model.rs | 3 ++ wgpu-core/src/command/mod.rs | 1 + wgpu-core/src/device/global.rs | 80 +++++++++++++++++++---------- wgpu-core/src/device/life.rs | 20 ++++---- wgpu-core/src/device/queue.rs | 1 + wgpu-core/src/device/resource.rs | 3 +- wgpu-core/src/global.rs | 2 +- wgpu-core/src/instance.rs | 8 ++- wgpu-core/src/pipeline.rs | 3 ++ wgpu-core/src/present.rs | 1 + wgpu-core/src/resource.rs | 21 ++++++++ wgpu-hal/examples/halmark/main.rs | 4 +- wgpu-hal/src/auxil/dxgi/factory.rs | 10 ++-- wgpu-hal/src/dx11/library.rs | 4 +- wgpu-hal/src/dx12/device.rs | 2 +- wgpu-hal/src/dx12/instance.rs | 2 +- wgpu-hal/src/gles/adapter.rs | 8 +-- wgpu-hal/src/gles/device.rs | 6 +-- wgpu-hal/src/gles/egl.rs | 36 ++++++------- wgpu-hal/src/metal/surface.rs | 2 +- wgpu-hal/src/vulkan/instance.rs | 14 ++--- wgpu/examples/hello-compute/main.rs | 4 +- 23 files changed, 150 insertions(+), 86 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index b174f38abd..17a605bf5b 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -11,6 +11,7 @@ env: CARGO_INCREMENTAL: false CARGO_TERM_COLOR: always RUST_BACKTRACE: full + RUST_LOG: info #needed to understand what's going on when tests fail MSRV: 1.64 PKG_CONFIG_ALLOW_CROSS: 1 # allow android to work RUSTFLAGS: --cfg=web_sys_unstable_apis -D warnings diff --git a/wgpu-core/src/binding_model.rs b/wgpu-core/src/binding_model.rs index c1ed69f51f..120de883e3 100644 --- a/wgpu-core/src/binding_model.rs +++ b/wgpu-core/src/binding_model.rs @@ -460,6 +460,7 @@ pub struct BindGroupLayout { impl Drop for BindGroupLayout { fn drop(&mut self) { + log::info!("Destroying BindGroupLayout {:?}", self.info.label()); if let Some(raw) = self.raw.take() { unsafe { use hal::Device; @@ -595,6 +596,7 @@ pub struct PipelineLayout { impl Drop for PipelineLayout { fn drop(&mut self) { + log::info!("Destroying PipelineLayout {:?}", self.info.label()); if let Some(raw) = self.raw.take() { unsafe { use hal::Device; @@ -812,6 +814,7 @@ pub struct BindGroup { impl Drop for BindGroup { fn drop(&mut self) { + log::info!("Destroying BindGroup {:?}", self.info.label()); if let Some(raw) = self.raw.take() { unsafe { use hal::Device; diff --git a/wgpu-core/src/command/mod.rs b/wgpu-core/src/command/mod.rs index 28f2a9912c..8619351c60 100644 --- a/wgpu-core/src/command/mod.rs +++ b/wgpu-core/src/command/mod.rs @@ -152,6 +152,7 @@ pub struct CommandBuffer { impl Drop for CommandBuffer { fn drop(&mut self) { + log::info!("Destroying CommandBuffer {:?}", self.info.label()); if self.data.lock().is_none() { return; } diff --git a/wgpu-core/src/device/global.rs b/wgpu-core/src/device/global.rs index a19c5731c9..4ba6e3fc92 100644 --- a/wgpu-core/src/device/global.rs +++ b/wgpu-core/src/device/global.rs @@ -244,7 +244,7 @@ impl Global { }; let (id, resource) = fid.assign(buffer); - log::info!("Created buffer {:?} with {:?}", id, desc); + log::info!("Created Buffer {:?} with {:?}", id, desc); device .trackers @@ -453,7 +453,7 @@ impl Global { //TODO: lock pending writes separately, keep the device read-only - log::info!("Buffer {:?} is destroyed", buffer_id); + log::debug!("Buffer {:?} is destroyed", buffer_id); let buffer = hub .buffers .get(buffer_id) @@ -503,7 +503,7 @@ impl Global { pub fn buffer_drop(&self, buffer_id: id::BufferId, wait: bool) { profiling::scope!("Buffer::drop"); - log::debug!("buffer {:?} is dropped", buffer_id); + log::debug!("buffer {:?} is asked to be dropped", buffer_id); let hub = A::hub(self); @@ -737,7 +737,7 @@ impl Global { let hub = A::hub(self); - log::info!("Buffer {:?} is destroyed", texture_id); + log::debug!("Texture {:?} is destroyed", texture_id); let texture = hub .textures .get(texture_id) @@ -788,7 +788,7 @@ impl Global { pub fn texture_drop(&self, texture_id: id::TextureId, wait: bool) { profiling::scope!("Texture::drop"); - log::debug!("texture {:?} is dropped", texture_id); + log::debug!("texture {:?} is asked to be dropped", texture_id); let hub = A::hub(self); @@ -864,7 +864,7 @@ impl Global { Err(e) => break e, }; let (id, resource) = fid.assign(view); - + log::info!("Created TextureView {:?}", id); device.trackers.lock().views.insert_single(id, resource); return (id.0, None); }; @@ -883,7 +883,7 @@ impl Global { wait: bool, ) -> Result<(), resource::TextureViewDestroyError> { profiling::scope!("TextureView::drop"); - log::debug!("texture view {:?} is dropped", texture_view_id); + log::debug!("texture view {:?} is asked to be dropped", texture_view_id); let hub = A::hub(self); @@ -948,7 +948,7 @@ impl Global { }; let (id, resource) = fid.assign(sampler); - + log::info!("Created Sampler {:?}", id); device.trackers.lock().samplers.insert_single(id, resource); return (id.0, None); @@ -964,7 +964,7 @@ impl Global { pub fn sampler_drop(&self, sampler_id: id::SamplerId) { profiling::scope!("Sampler::drop"); - log::debug!("sampler {:?} is dropped", sampler_id); + log::debug!("sampler {:?} is asked to be dropped", sampler_id); let hub = A::hub(self); @@ -1046,6 +1046,7 @@ impl Global { }; let (id, _) = fid.assign(layout); + log::info!("Created BindGroupLayout {:?}", id); return (id.0, None); }; @@ -1059,7 +1060,10 @@ impl Global { pub fn bind_group_layout_drop(&self, bind_group_layout_id: id::BindGroupLayoutId) { profiling::scope!("BindGroupLayout::drop"); - log::debug!("bind group layout {:?} is dropped", bind_group_layout_id); + log::debug!( + "bind group layout {:?} is asked to be dropped", + bind_group_layout_id + ); let hub = A::hub(self); @@ -1116,6 +1120,7 @@ impl Global { }; let (id, _) = fid.assign(layout); + log::info!("Created PipelineLayout {:?}", id); return (id.0, None); }; @@ -1129,7 +1134,10 @@ impl Global { pub fn pipeline_layout_drop(&self, pipeline_layout_id: id::PipelineLayoutId) { profiling::scope!("PipelineLayout::drop"); - log::debug!("pipeline layout {:?} is dropped", pipeline_layout_id); + log::debug!( + "pipeline layout {:?} is asked to be dropped", + pipeline_layout_id + ); let hub = A::hub(self); @@ -1183,7 +1191,7 @@ impl Global { Err(e) => break e, }; let (id, resource) = fid.assign(bind_group); - log::debug!("Bind group {:?}", id,); + log::debug!("Created BindGroup {:?}", id,); device .trackers @@ -1203,7 +1211,7 @@ impl Global { pub fn bind_group_drop(&self, bind_group_id: id::BindGroupId) { profiling::scope!("BindGroup::drop"); - log::debug!("bind group {:?} is dropped", bind_group_id); + log::debug!("bind group {:?} is asked to be dropped", bind_group_id); let hub = A::hub(self); @@ -1276,6 +1284,7 @@ impl Global { Err(e) => break e, }; let (id, _) = fid.assign(shader); + log::info!("Created ShaderModule {:?} with {:?}", id, desc); return (id.0, None); }; @@ -1326,6 +1335,7 @@ impl Global { Err(e) => break e, }; let (id, _) = fid.assign(shader); + log::info!("Created ShaderModule {:?} with {:?}", id, desc); return (id.0, None); }; @@ -1339,7 +1349,10 @@ impl Global { pub fn shader_module_drop(&self, shader_module_id: id::ShaderModuleId) { profiling::scope!("ShaderModule::drop"); - log::debug!("shader module {:?} is dropped", shader_module_id); + log::debug!( + "shader module {:?} is asked to be dropped", + shader_module_id + ); let hub = A::hub(self); hub.shader_modules.unregister(shader_module_id); @@ -1380,6 +1393,7 @@ impl Global { ); let (id, _) = fid.assign(command_buffer); + log::info!("Created CommandBuffer {:?} with {:?}", id, desc); return (id.0, None); }; @@ -1393,7 +1407,10 @@ impl Global { pub fn command_encoder_drop(&self, command_encoder_id: id::CommandEncoderId) { profiling::scope!("CommandEncoder::drop"); - log::debug!("command encoder {:?} is dropped", command_encoder_id); + log::debug!( + "command encoder {:?} is asked to be dropped", + command_encoder_id + ); let hub = A::hub(self); @@ -1405,7 +1422,10 @@ impl Global { pub fn command_buffer_drop(&self, command_buffer_id: id::CommandBufferId) { profiling::scope!("CommandBuffer::drop"); - log::debug!("command buffer {:?} is dropped", command_buffer_id); + log::debug!( + "command buffer {:?} is asked to be dropped", + command_buffer_id + ); self.command_encoder_drop::(command_buffer_id) } @@ -1461,9 +1481,8 @@ impl Global { Err(e) => break e, }; - log::debug!("Render bundle"); let (id, resource) = fid.assign(render_bundle); - + log::info!("Created RenderBundle {:?}", id); device.trackers.lock().bundles.insert_single(id, resource); return (id.0, None); }; @@ -1478,7 +1497,10 @@ impl Global { pub fn render_bundle_drop(&self, render_bundle_id: id::RenderBundleId) { profiling::scope!("RenderBundle::drop"); - log::debug!("render bundle {:?} is dropped", render_bundle_id); + log::debug!( + "render bundle {:?} is asked to be dropped", + render_bundle_id + ); let hub = A::hub(self); let bundle = { @@ -1532,7 +1554,7 @@ impl Global { }; let (id, resource) = fid.assign(query_set); - + log::info!("Created QuerySet {:?}", id); device .trackers .lock() @@ -1548,7 +1570,7 @@ impl Global { pub fn query_set_drop(&self, query_set_id: id::QuerySetId) { profiling::scope!("QuerySet::drop"); - log::debug!("query set {:?} is dropped", query_set_id); + log::debug!("query set {:?} is asked to be dropped", query_set_id); let hub = A::hub(self); let query_set_guard = hub.query_sets.read(); @@ -1615,7 +1637,7 @@ impl Global { }; let (id, resource) = fid.assign(pipeline); - log::info!("Created render pipeline {:?} with {:?}", id, desc); + log::info!("Created RenderPipeline {:?} with {:?}", id, desc); device .trackers @@ -1673,7 +1695,10 @@ impl Global { pub fn render_pipeline_drop(&self, render_pipeline_id: id::RenderPipelineId) { profiling::scope!("RenderPipeline::drop"); - log::debug!("render pipeline {:?} is dropped", render_pipeline_id); + log::debug!( + "render pipeline {:?} is asked to be dropped", + render_pipeline_id + ); let hub = A::hub(self); let (pipeline, layout_id) = { @@ -1734,7 +1759,7 @@ impl Global { }; let (id, resource) = fid.assign(pipeline); - log::info!("Created compute pipeline {:?} with {:?}", id, desc); + log::info!("Created ComputePipeline {:?} with {:?}", id, desc); device .trackers @@ -1793,7 +1818,10 @@ impl Global { pub fn compute_pipeline_drop(&self, compute_pipeline_id: id::ComputePipelineId) { profiling::scope!("ComputePipeline::drop"); - log::debug!("compute pipeline {:?} is dropped", compute_pipeline_id); + log::debug!( + "compute pipeline {:?} is asked to be dropped", + compute_pipeline_id + ); let hub = A::hub(self); let (pipeline, layout_id) = { @@ -2210,7 +2238,7 @@ impl Global { pub fn device_drop(&self, device_id: DeviceId) { profiling::scope!("Device::drop"); - log::debug!("device {:?} is dropped", device_id); + log::debug!("device {:?} is asked to be dropped", device_id); } /// Exit the unreferenced, inactive device `device_id`. diff --git a/wgpu-core/src/device/life.rs b/wgpu-core/src/device/life.rs index d09ff07c35..d7af6923b2 100644 --- a/wgpu-core/src/device/life.rs +++ b/wgpu-core/src/device/life.rs @@ -511,7 +511,7 @@ impl LifetimeTracker { while let Some(bundle) = self.suspected_resources.render_bundles.pop() { let id = bundle.info.id(); if trackers.bundles.remove_abandoned(id) { - log::debug!("Bundle {:?} will be destroyed", id); + log::info!("Bundle {:?} is removed from trackers", id); #[cfg(feature = "trace")] if let Some(ref mut t) = trace { t.add(trace::Action::DestroyRenderBundle(id.0)); @@ -530,7 +530,7 @@ impl LifetimeTracker { while let Some(resource) = self.suspected_resources.bind_groups.pop() { let id = resource.info.id(); if trackers.bind_groups.remove_abandoned(id) { - log::debug!("Bind group {:?} will be destroyed", id); + log::info!("BindGroup {:?} is removed from trackers", id); #[cfg(feature = "trace")] if let Some(ref mut t) = trace { t.add(trace::Action::DestroyBindGroup(id.0)); @@ -563,7 +563,7 @@ impl LifetimeTracker { for texture_view in list.drain(..) { let id = texture_view.info.id(); if trackers.views.remove_abandoned(id) { - log::debug!("Texture view {:?} will be destroyed", id); + log::info!("TextureView {:?} is removed from trackers", id); #[cfg(feature = "trace")] if let Some(ref mut t) = trace { t.add(trace::Action::DestroyTextureView(id.0)); @@ -594,7 +594,7 @@ impl LifetimeTracker { for texture in self.suspected_resources.textures.drain(..) { let id = texture.info.id(); if trackers.textures.remove_abandoned(id) { - log::debug!("Texture {:?} will be destroyed", id); + log::info!("Texture {:?} is removed from trackers", id); #[cfg(feature = "trace")] if let Some(ref mut t) = trace { t.add(trace::Action::DestroyTexture(id.0)); @@ -628,7 +628,7 @@ impl LifetimeTracker { for sampler in self.suspected_resources.samplers.drain(..) { let id = sampler.info.id(); if trackers.samplers.remove_abandoned(id) { - log::debug!("Sampler {:?} will be destroyed", id); + log::info!("Sampler {:?} is removed from trackers", id); #[cfg(feature = "trace")] if let Some(ref mut t) = trace { t.add(trace::Action::DestroySampler(id.0)); @@ -653,7 +653,7 @@ impl LifetimeTracker { for buffer in self.suspected_resources.buffers.drain(..) { let id = buffer.info.id(); if trackers.buffers.remove_abandoned(id) { - log::debug!("Buffer {:?} will be destroyed", id); + log::info!("Buffer {:?} is removed from trackers", id); #[cfg(feature = "trace")] if let Some(ref mut t) = trace { t.add(trace::Action::DestroyBuffer(id.0)); @@ -684,7 +684,7 @@ impl LifetimeTracker { for compute_pipeline in self.suspected_resources.compute_pipelines.drain(..) { let id = compute_pipeline.info.id(); if trackers.compute_pipelines.remove_abandoned(id) { - log::debug!("Compute pipeline {:?} will be destroyed", id); + log::info!("ComputePipeline {:?} is removed from trackers", id); #[cfg(feature = "trace")] if let Some(ref mut t) = trace { t.add(trace::Action::DestroyComputePipeline(id.0)); @@ -709,7 +709,7 @@ impl LifetimeTracker { for render_pipeline in self.suspected_resources.render_pipelines.drain(..) { let id = render_pipeline.info.id(); if trackers.render_pipelines.remove_abandoned(id) { - log::debug!("Render pipeline {:?} will be destroyed", id); + log::info!("RenderPipeline {:?} is removed from trackers", id); #[cfg(feature = "trace")] if let Some(ref mut t) = trace { t.add(trace::Action::DestroyRenderPipeline(id.0)); @@ -786,7 +786,7 @@ impl LifetimeTracker { for query_set in self.suspected_resources.query_sets.drain(..) { let id = query_set.info.id(); if trackers.query_sets.remove_abandoned(id) { - log::debug!("Query set {:?} will be destroyed", id); + log::info!("QuerySet {:?} is removed from trackers", id); // #[cfg(feature = "trace")] // trace.map(|t| t.add(trace::Action::DestroyComputePipeline(id.0))); if let Some(res) = hub.query_sets.unregister(id.0) { @@ -851,7 +851,7 @@ impl LifetimeTracker { let buffer_id = buffer.info.id(); if trackers.buffers.remove_abandoned(buffer_id) { *buffer.map_state.lock() = resource::BufferMapState::Idle; - log::debug!("Mapping request is dropped because the buffer is destroyed."); + log::info!("Buffer {:?} is removed from trackers", buffer_id); if let Some(buf) = hub.buffers.unregister(buffer_id.0) { self.free_resources.buffers.push(buf); } diff --git a/wgpu-core/src/device/queue.rs b/wgpu-core/src/device/queue.rs index d7c0c7e878..96f52135b8 100644 --- a/wgpu-core/src/device/queue.rs +++ b/wgpu-core/src/device/queue.rs @@ -427,6 +427,7 @@ impl Global { let fid = hub.staging_buffers.prepare(id_in); let (id, _) = fid.assign(staging_buffer); + log::info!("Created StagingBuffer {:?}", id); Ok((id.0, staging_buffer_ptr)) } diff --git a/wgpu-core/src/device/resource.rs b/wgpu-core/src/device/resource.rs index 1987506646..37a4d3c607 100644 --- a/wgpu-core/src/device/resource.rs +++ b/wgpu-core/src/device/resource.rs @@ -109,6 +109,7 @@ impl std::fmt::Debug for Device { impl Drop for Device { fn drop(&mut self) { + log::info!("Destroying Device {:?}", self.info.label()); let raw = self.raw.take().unwrap(); let pending_writes = self.pending_writes.write().take().unwrap(); pending_writes.dispose(&raw); @@ -2502,7 +2503,7 @@ impl Device { .iter() .any(|ct| ct.write_mask != first.write_mask || ct.blend != first.blend) } { - log::info!("Color targets: {:?}", color_targets); + log::debug!("Color targets: {:?}", color_targets); self.require_downlevel_flags(wgt::DownlevelFlags::INDEPENDENT_BLEND)?; } diff --git a/wgpu-core/src/global.rs b/wgpu-core/src/global.rs index b57cfcc1f6..bfd77b6d6e 100644 --- a/wgpu-core/src/global.rs +++ b/wgpu-core/src/global.rs @@ -123,7 +123,7 @@ impl Global { impl Drop for Global { fn drop(&mut self) { profiling::scope!("Global::drop"); - log::info!("Dropping Global"); + log::info!("Destroying Global"); let mut surfaces_locked = self.surfaces.write(); // destroy hubs before the instance gets dropped diff --git a/wgpu-core/src/instance.rs b/wgpu-core/src/instance.rs index fc479aca78..308444df02 100644 --- a/wgpu-core/src/instance.rs +++ b/wgpu-core/src/instance.rs @@ -928,7 +928,7 @@ impl Global { let fid = A::hub(self).adapters.prepare(input); - match A::VARIANT { + let id = match A::VARIANT { #[cfg(all(feature = "vulkan", not(target_arch = "wasm32")))] Backend::Vulkan => fid.assign(Adapter::new(hal_adapter)).0 .0, #[cfg(all(feature = "metal", any(target_os = "macos", target_os = "ios")))] @@ -940,7 +940,9 @@ impl Global { #[cfg(feature = "gles")] Backend::Gl => fid.assign(Adapter::new(hal_adapter)).0 .0, _ => unreachable!(), - } + }; + log::info!("Created Adapter {:?}", id); + id } pub fn adapter_get_info( @@ -1055,6 +1057,7 @@ impl Global { Err(e) => break e, }; let (id, _) = fid.assign(device); + log::info!("Created Device {:?}", id); return (id.0, None); }; @@ -1090,6 +1093,7 @@ impl Global { Err(e) => break e, }; let (id, _) = fid.assign(device); + log::info!("Created Device {:?}", id); return (id.0, None); }; diff --git a/wgpu-core/src/pipeline.rs b/wgpu-core/src/pipeline.rs index 6b5a0f0723..d89186fdbe 100644 --- a/wgpu-core/src/pipeline.rs +++ b/wgpu-core/src/pipeline.rs @@ -54,6 +54,7 @@ pub struct ShaderModule { impl Drop for ShaderModule { fn drop(&mut self) { + log::info!("Destroying ShaderModule {:?}", self.info.label()); if let Some(raw) = self.raw.take() { #[cfg(feature = "trace")] if let Some(ref mut trace) = *self.device.trace.lock() { @@ -241,6 +242,7 @@ pub struct ComputePipeline { impl Drop for ComputePipeline { fn drop(&mut self) { + log::info!("Destroying ComputePipeline {:?}", self.info.label()); if let Some(raw) = self.raw.take() { unsafe { use hal::Device; @@ -465,6 +467,7 @@ pub struct RenderPipeline { impl Drop for RenderPipeline { fn drop(&mut self) { + log::info!("Destroying RenderPipeline {:?}", self.info.label()); if let Some(raw) = self.raw.take() { unsafe { use hal::Device; diff --git a/wgpu-core/src/present.rs b/wgpu-core/src/present.rs index b753c4dedd..d5b4929dec 100644 --- a/wgpu-core/src/present.rs +++ b/wgpu-core/src/present.rs @@ -221,6 +221,7 @@ impl Global { }; let (id, resource) = fid.assign(texture); + log::info!("Created CURRENT Texture {:?}", id); { // register it in the device tracker as uninitialized diff --git a/wgpu-core/src/resource.rs b/wgpu-core/src/resource.rs index 4e221eb618..be54849340 100644 --- a/wgpu-core/src/resource.rs +++ b/wgpu-core/src/resource.rs @@ -75,6 +75,22 @@ impl ResourceInfo { } } + #[allow(unused_assignments)] + pub(crate) fn label(&self) -> String + where + Id: Debug, + { + let mut label = String::new(); + #[cfg(debug_assertions)] + { + label = self.label.clone(); + } + if let Some(id) = self.id.read().as_ref() { + label = format!("{:?}", id); + } + label + } + pub(crate) fn id(&self) -> Valid { self.id.read().unwrap() } @@ -352,6 +368,7 @@ pub struct Buffer { impl Drop for Buffer { fn drop(&mut self) { + log::info!("Destroying Buffer {:?}", self.info.label()); if let Some(raw) = self.raw.take() { unsafe { use hal::Device; @@ -479,6 +496,7 @@ pub struct Texture { impl Drop for Texture { fn drop(&mut self) { + log::info!("Destroying Texture {:?}", self.info.label()); use hal::Device; let mut clear_mode = self.clear_mode.write(); let clear_mode = &mut *clear_mode; @@ -768,6 +786,7 @@ pub struct TextureView { impl Drop for TextureView { fn drop(&mut self) { + log::info!("Destroying TextureView {:?}", self.info.label()); if let Some(raw) = self.raw.take() { unsafe { use hal::Device; @@ -879,6 +898,7 @@ pub struct Sampler { impl Drop for Sampler { fn drop(&mut self) { + log::info!("Destroying Sampler {:?}", self.info.label()); if let Some(raw) = self.raw.take() { unsafe { use hal::Device; @@ -965,6 +985,7 @@ pub struct QuerySet { impl Drop for QuerySet { fn drop(&mut self) { + log::info!("Destroying QuerySet {:?}", self.info.label()); if let Some(raw) = self.raw.take() { unsafe { use hal::Device; diff --git a/wgpu-hal/examples/halmark/main.rs b/wgpu-hal/examples/halmark/main.rs index 283eade0b8..f105b8b735 100644 --- a/wgpu-hal/examples/halmark/main.rs +++ b/wgpu-hal/examples/halmark/main.rs @@ -114,7 +114,7 @@ impl Example { }; let surface_caps = unsafe { adapter.surface_capabilities(&surface) }.ok_or(hal::InstanceError)?; - log::info!("Surface caps: {:#?}", surface_caps); + log::debug!("Surface caps: {:#?}", surface_caps); let hal::OpenDevice { device, queue } = unsafe { adapter @@ -723,7 +723,7 @@ impl Example { }; if do_fence { - log::info!("Context switch from {}", self.context_index); + log::debug!("Context switch from {}", self.context_index); let old_fence_value = ctx.fence_value; if self.contexts.len() == 1 { let hal_desc = hal::CommandEncoderDescriptor { diff --git a/wgpu-hal/src/auxil/dxgi/factory.rs b/wgpu-hal/src/auxil/dxgi/factory.rs index ec3c74c194..af7e79b6c4 100644 --- a/wgpu-hal/src/auxil/dxgi/factory.rs +++ b/wgpu-hal/src/auxil/dxgi/factory.rs @@ -65,7 +65,7 @@ pub fn enumerate_adapters(factory: d3d12::DxgiFactory) -> Vec { - log::info!("Failed casting Adapter1 to Adapter3: {}", err); + log::warn!("Failed casting Adapter1 to Adapter3: {}", err); } } } @@ -79,7 +79,7 @@ pub fn enumerate_adapters(factory: d3d12::DxgiFactory) -> Vec { - log::info!("Failed casting Adapter1 to Adapter2: {}", err); + log::warn!("Failed casting Adapter1 to Adapter2: {}", err); } } } @@ -141,7 +141,7 @@ pub fn create_factory( } // If we don't print it to info as all win7 will hit this case. Err(err) => { - log::info!("IDXGIFactory1 creation function not found: {:?}", err); + log::warn!("IDXGIFactory1 creation function not found: {:?}", err); None } }; @@ -163,7 +163,7 @@ pub fn create_factory( } // If we don't print it to info. Err(err) => { - log::info!("Failed to cast IDXGIFactory4 to IDXGIFactory6: {:?}", err); + log::warn!("Failed to cast IDXGIFactory4 to IDXGIFactory6: {:?}", err); return Ok((lib_dxgi, d3d12::DxgiFactory::Factory4(factory4))); } } @@ -201,7 +201,7 @@ pub fn create_factory( } // If we don't print it to info. Err(err) => { - log::info!("Failed to cast IDXGIFactory1 to IDXGIFactory2: {:?}", err); + log::warn!("Failed to cast IDXGIFactory1 to IDXGIFactory2: {:?}", err); } } diff --git a/wgpu-hal/src/dx11/library.rs b/wgpu-hal/src/dx11/library.rs index ea597abd56..b548155825 100644 --- a/wgpu-hal/src/dx11/library.rs +++ b/wgpu-hal/src/dx11/library.rs @@ -121,7 +121,7 @@ impl D3D11Lib { return Some((super::D3D11Device::Device2(device2), feature_level)); } Err(hr) => { - log::info!("Failed to cast device to ID3D11Device2: {}", hr) + log::warn!("Failed to cast device to ID3D11Device2: {}", hr) } } } @@ -134,7 +134,7 @@ impl D3D11Lib { return Some((super::D3D11Device::Device1(device1), feature_level)); } Err(hr) => { - log::info!("Failed to cast device to ID3D11Device1: {}", hr) + log::warn!("Failed to cast device to ID3D11Device1: {}", hr) } } } diff --git a/wgpu-hal/src/dx12/device.rs b/wgpu-hal/src/dx12/device.rs index 7e14818572..90ec0b71e8 100644 --- a/wgpu-hal/src/dx12/device.rs +++ b/wgpu-hal/src/dx12/device.rs @@ -183,7 +183,7 @@ impl super::Device { } let value = cur_value + 1; - log::info!("Waiting for idle with value {}", value); + log::debug!("Waiting for idle with value {}", value); self.present_queue.signal(self.idler.fence, value); let hr = self .idler diff --git a/wgpu-hal/src/dx12/instance.rs b/wgpu-hal/src/dx12/instance.rs index b6767c86bb..b60f26a7bf 100644 --- a/wgpu-hal/src/dx12/instance.rs +++ b/wgpu-hal/src/dx12/instance.rs @@ -50,7 +50,7 @@ impl crate::Instance for super::Instance { } }, Err(err) => { - log::info!("IDXGIFactory1 creation function not found: {:?}", err); + log::warn!("IDXGIFactory1 creation function not found: {:?}", err); None } }; diff --git a/wgpu-hal/src/gles/adapter.rs b/wgpu-hal/src/gles/adapter.rs index 379a89973e..91cc21e608 100644 --- a/wgpu-hal/src/gles/adapter.rs +++ b/wgpu-hal/src/gles/adapter.rs @@ -209,9 +209,9 @@ impl super::Adapter { (vendor, renderer) }; let version = unsafe { gl.get_parameter_string(glow::VERSION) }; - log::info!("Vendor: {}", vendor); - log::info!("Renderer: {}", renderer); - log::info!("Version: {}", version); + log::debug!("Vendor: {}", vendor); + log::debug!("Renderer: {}", renderer); + log::debug!("Version: {}", version); log::debug!("Extensions: {:#?}", extensions); @@ -230,7 +230,7 @@ impl super::Adapter { let shading_language_version = { let sl_version = unsafe { gl.get_parameter_string(glow::SHADING_LANGUAGE_VERSION) }; - log::info!("SL version: {}", &sl_version); + log::debug!("SL version: {}", &sl_version); let (sl_major, sl_minor) = Self::parse_version(&sl_version).ok()?; let value = sl_major as u16 * 100 + sl_minor as u16 * 10; naga::back::glsl::Version::Embedded { diff --git a/wgpu-hal/src/gles/device.rs b/wgpu-hal/src/gles/device.rs index c22570eb72..a818f942cc 100644 --- a/wgpu-hal/src/gles/device.rs +++ b/wgpu-hal/src/gles/device.rs @@ -170,7 +170,7 @@ impl super::Device { unsafe { gl.shader_source(raw, shader) }; unsafe { gl.compile_shader(raw) }; - log::info!("\tCompiled shader {:?}", raw); + log::debug!("\tCompiled shader {:?}", raw); let compiled_ok = unsafe { gl.get_shader_compile_status(raw) }; let msg = unsafe { gl.get_shader_info_log(raw) }; @@ -347,7 +347,7 @@ impl super::Device { // Create empty fragment shader if only vertex shader is present if has_stages == wgt::ShaderStages::VERTEX { let shader_src = format!("#version {glsl_version} es \n void main(void) {{}}",); - log::info!("Only vertex shader is present. Creating an empty fragment shader",); + log::warn!("Only vertex shader is present. Creating an empty fragment shader",); let shader = unsafe { Self::compile_shader( gl, @@ -368,7 +368,7 @@ impl super::Device { unsafe { gl.delete_shader(shader) }; } - log::info!("\tLinked program {:?}", program); + log::debug!("\tLinked program {:?}", program); let linked_ok = unsafe { gl.get_program_link_status(program) }; let msg = unsafe { gl.get_program_info_log(program) }; diff --git a/wgpu-hal/src/gles/egl.rs b/wgpu-hal/src/gles/egl.rs index e75d319239..0bd9b79e65 100644 --- a/wgpu-hal/src/gles/egl.rs +++ b/wgpu-hal/src/gles/egl.rs @@ -113,7 +113,7 @@ unsafe extern "system" fn egl_debug_proc( } fn open_x_display() -> Option<(ptr::NonNull, libloading::Library)> { - log::info!("Loading X11 library to get the current display"); + log::debug!("Loading X11 library to get the current display"); unsafe { let library = libloading::Library::new("libX11.so").ok()?; let func: libloading::Symbol = library.get(b"XOpenDisplay").unwrap(); @@ -136,7 +136,7 @@ fn test_wayland_display() -> Option { /* We try to connect and disconnect here to simply ensure there * is an active wayland display available. */ - log::info!("Loading Wayland library to get the current display"); + log::debug!("Loading Wayland library to get the current display"); let library = unsafe { let client_library = find_library(&["libwayland-client.so.0", "libwayland-client.so"])?; let wl_display_connect: libloading::Symbol = @@ -191,7 +191,7 @@ fn choose_config( let mut attributes = Vec::with_capacity(9); for tier_max in (0..tiers.len()).rev() { let name = tiers[tier_max].0; - log::info!("\tTrying {}", name); + log::debug!("\tTrying {}", name); attributes.clear(); for &(_, tier_attr) in tiers[..=tier_max].iter() { @@ -448,17 +448,17 @@ impl Inner { .query_string(Some(display), khronos_egl::EXTENSIONS) .unwrap() .to_string_lossy(); - log::info!("Display vendor {:?}, version {:?}", vendor, version,); + log::debug!("Display vendor {:?}, version {:?}", vendor, version,); log::debug!( "Display extensions: {:#?}", display_extensions.split_whitespace().collect::>() ); let srgb_kind = if version >= (1, 5) { - log::info!("\tEGL surface: +srgb"); + log::debug!("\tEGL surface: +srgb"); SrgbFrameBufferKind::Core } else if display_extensions.contains("EGL_KHR_gl_colorspace") { - log::info!("\tEGL surface: +srgb khr"); + log::debug!("\tEGL surface: +srgb khr"); SrgbFrameBufferKind::Khr } else { log::warn!("\tEGL surface: -srgb"); @@ -495,14 +495,14 @@ impl Inner { ]; if flags.contains(crate::InstanceFlags::DEBUG) { if version >= (1, 5) { - log::info!("\tEGL context: +debug"); + log::debug!("\tEGL context: +debug"); context_attributes.push(khronos_egl::CONTEXT_OPENGL_DEBUG); context_attributes.push(khronos_egl::TRUE as _); } else if supports_khr_context { - log::info!("\tEGL context: +debug KHR"); + log::debug!("\tEGL context: +debug KHR"); khr_context_flags |= EGL_CONTEXT_OPENGL_DEBUG_BIT_KHR; } else { - log::info!("\tEGL context: -debug"); + log::debug!("\tEGL context: -debug"); } } if needs_robustness { @@ -510,11 +510,11 @@ impl Inner { // (regardless of whether the extension is supported!). // In fact, Angle does precisely that awful behavior, so we don't try it there. if version >= (1, 5) && !display_extensions.contains("EGL_ANGLE_") { - log::info!("\tEGL context: +robust access"); + log::debug!("\tEGL context: +robust access"); context_attributes.push(khronos_egl::CONTEXT_OPENGL_ROBUST_ACCESS); context_attributes.push(khronos_egl::TRUE as _); } else if display_extensions.contains("EGL_EXT_create_context_robustness") { - log::info!("\tEGL context: +robust access EXT"); + log::debug!("\tEGL context: +robust access EXT"); context_attributes.push(EGL_CONTEXT_OPENGL_ROBUST_ACCESS_EXT); context_attributes.push(khronos_egl::TRUE as _); } else { @@ -544,7 +544,7 @@ impl Inner { || display_extensions.contains("EGL_KHR_surfaceless_context") || cfg!(target_os = "emscripten") { - log::info!("\tEGL context: +surfaceless"); + log::debug!("\tEGL context: +surfaceless"); None } else { let attributes = [ @@ -660,7 +660,7 @@ impl crate::Instance for Instance { let egl = match egl_result { Ok(egl) => Arc::new(egl), Err(e) => { - log::info!("Unable to open libEGL: {:?}", e); + log::warn!("Unable to open libEGL: {:?}", e); return Err(crate::InstanceError); } }; @@ -736,7 +736,7 @@ impl crate::Instance for Instance { .unwrap(); (display, Some(Arc::new(library)), WindowKind::AngleX11) } else if client_ext_str.contains("EGL_MESA_platform_surfaceless") { - log::info!("No windowing system present. Using surfaceless platform"); + log::warn!("No windowing system present. Using surfaceless platform"); let egl = egl1_5.expect("Failed to get EGL 1.5 for surfaceless"); let display = egl .get_platform_display( @@ -747,7 +747,7 @@ impl crate::Instance for Instance { .unwrap(); (display, None, WindowKind::Unknown) } else { - log::info!("EGL_MESA_platform_surfaceless not available. Using default platform"); + log::warn!("EGL_MESA_platform_surfaceless not available. Using default platform"); let display = egl.get_display(khronos_egl::DEFAULT_DISPLAY).unwrap(); (display, None, WindowKind::Unknown) }; @@ -755,7 +755,7 @@ impl crate::Instance for Instance { if desc.flags.contains(crate::InstanceFlags::VALIDATION) && client_ext_str.contains("EGL_KHR_debug") { - log::info!("Enabling EGL debug output"); + log::debug!("Enabling EGL debug output"); let function: EglDebugMessageControlFun = { let addr = egl.get_proc_address("eglDebugMessageControlKHR").unwrap(); unsafe { std::mem::transmute(addr) } @@ -903,13 +903,13 @@ impl crate::Instance for Instance { }; if self.flags.contains(crate::InstanceFlags::DEBUG) && gl.supports_debug() { - log::info!("Max label length: {}", unsafe { + log::debug!("Max label length: {}", unsafe { gl.get_parameter_i32(glow::MAX_LABEL_LENGTH) }); } if self.flags.contains(crate::InstanceFlags::VALIDATION) && gl.supports_debug() { - log::info!("Enabling GLES debug output"); + log::debug!("Enabling GLES debug output"); unsafe { gl.enable(glow::DEBUG_OUTPUT) }; unsafe { gl.debug_message_callback(gl_debug_message_callback) }; } diff --git a/wgpu-hal/src/metal/surface.rs b/wgpu-hal/src/metal/surface.rs index 896c631792..72bf09f207 100644 --- a/wgpu-hal/src/metal/surface.rs +++ b/wgpu-hal/src/metal/surface.rs @@ -175,7 +175,7 @@ impl crate::Surface for super::Surface { device: &super::Device, config: &crate::SurfaceConfiguration, ) -> Result<(), crate::SurfaceError> { - log::info!("build swapchain {:?}", config); + log::debug!("build swapchain {:?}", config); let caps = &device.shared.private_caps; *self.swapchain_format.write() = Some(config.format); diff --git a/wgpu-hal/src/vulkan/instance.rs b/wgpu-hal/src/vulkan/instance.rs index a9381d9ee9..80bada37b0 100644 --- a/wgpu-hal/src/vulkan/instance.rs +++ b/wgpu-hal/src/vulkan/instance.rs @@ -166,7 +166,7 @@ impl super::Instance { let instance_extensions = entry .enumerate_instance_extension_properties(None) .map_err(|e| { - log::info!("enumerate_instance_extension_properties: {:?}", e); + log::debug!("enumerate_instance_extension_properties: {:?}", e); crate::InstanceError })?; @@ -223,7 +223,7 @@ impl super::Instance { }) { true } else { - log::info!("Unable to find extension: {}", ext.to_string_lossy()); + log::warn!("Unable to find extension: {}", ext.to_string_lossy()); false } }); @@ -248,10 +248,10 @@ impl super::Instance { has_nv_optimus: bool, drop_guard: Option, ) -> Result { - log::info!("Instance version: 0x{:x}", driver_api_version); + log::debug!("Instance version: 0x{:x}", driver_api_version); let debug_utils = if extensions.contains(&ext::DebugUtils::name()) { - log::info!("Enabling debug utils"); + log::debug!("Enabling debug utils"); let extension = ext::DebugUtils::new(&entry, &raw_instance); // having ERROR unconditionally because Vk doesn't like empty flags let mut severity = vk::DebugUtilsMessageSeverityFlagsEXT::ERROR; @@ -285,7 +285,7 @@ impl super::Instance { let get_physical_device_properties = if extensions.contains(&khr::GetPhysicalDeviceProperties2::name()) { - log::info!("Enabling device properties2"); + log::debug!("Enabling device properties2"); Some(khr::GetPhysicalDeviceProperties2::new( &entry, &raw_instance, @@ -500,7 +500,7 @@ impl crate::Instance for super::Instance { let entry = match unsafe { ash::Entry::load() } { Ok(entry) => entry, Err(err) => { - log::info!("Missing Vulkan entry points: {:?}", err); + log::warn!("Missing Vulkan entry points: {:?}", err); return Err(crate::InstanceError); } }; @@ -540,7 +540,7 @@ impl crate::Instance for super::Instance { let extensions = Self::required_extensions(&entry, driver_api_version, desc.flags)?; let instance_layers = entry.enumerate_instance_layer_properties().map_err(|e| { - log::info!("enumerate_instance_layer_properties: {:?}", e); + log::debug!("enumerate_instance_layer_properties: {:?}", e); crate::InstanceError })?; diff --git a/wgpu/examples/hello-compute/main.rs b/wgpu/examples/hello-compute/main.rs index afdf7744c9..dc32686885 100644 --- a/wgpu/examples/hello-compute/main.rs +++ b/wgpu/examples/hello-compute/main.rs @@ -143,7 +143,7 @@ async fn execute_gpu_inner( encoder.copy_buffer_to_buffer(&storage_buffer, 0, &staging_buffer, 0, size); // Submits command encoder for processing - queue.submit(Some(encoder.finish())); + let submission_index = queue.submit(Some(encoder.finish())); // Note that we're not calling `.await` here. let buffer_slice = staging_buffer.slice(..); @@ -154,7 +154,7 @@ async fn execute_gpu_inner( // Poll the device in a blocking manner so that our future resolves. // In an actual application, `device.poll(...)` should // be called in an event loop or on another thread. - device.poll(wgpu::Maintain::Wait); + device.poll(wgpu::Maintain::WaitForSubmissionIndex(submission_index)); // Awaits until `buffer_future` can be read from if let Some(Ok(())) = receiver.receive().await { From f2736e5aa002106b30446225e56ab44c8626b1a0 Mon Sep 17 00:00:00 2001 From: Mauro Gentile Date: Tue, 25 Apr 2023 19:50:38 +0200 Subject: [PATCH 032/132] Fix merge issue --- wgpu-core/src/device/resource.rs | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/wgpu-core/src/device/resource.rs b/wgpu-core/src/device/resource.rs index 37a4d3c607..9aa6f9d271 100644 --- a/wgpu-core/src/device/resource.rs +++ b/wgpu-core/src/device/resource.rs @@ -654,7 +654,10 @@ impl Device { let missing_allowed_usages = desc.usage - format_features.allowed_usages; if !missing_allowed_usages.is_empty() { // detect downlevel incompatibilities - let wgpu_allowed_usages = desc.format.guaranteed_format_features().allowed_usages; + let wgpu_allowed_usages = desc + .format + .guaranteed_format_features(self.features) + .allowed_usages; let wgpu_missing_usages = desc.usage - wgpu_allowed_usages; return Err(CreateTextureError::InvalidFormatUsages( missing_allowed_usages, @@ -2989,7 +2992,7 @@ impl Device { if using_device_features || downlevel { Ok(adapter.get_texture_format_features(format)) } else { - Ok(format.guaranteed_format_features()) + Ok(format.guaranteed_format_features(self.features)) } } From 4529f73c9b99222b330634c6000c6274d6be2185 Mon Sep 17 00:00:00 2001 From: Mauro Gentile Date: Tue, 25 Apr 2023 20:08:49 +0200 Subject: [PATCH 033/132] Fix clippy wasm --- wgpu-hal/src/gles/web.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/wgpu-hal/src/gles/web.rs b/wgpu-hal/src/gles/web.rs index dcbb98d4e4..83c1871b02 100644 --- a/wgpu-hal/src/gles/web.rs +++ b/wgpu-hal/src/gles/web.rs @@ -174,6 +174,7 @@ pub struct Surface { impl Clone for Surface { fn clone(&self) -> Self { Self { + canvas: self.canvas.clone(), webgl2_context: self.webgl2_context.clone(), swapchain: RwLock::new(self.swapchain.read().clone()), texture: Mutex::new(*self.texture.lock()), From fbd23cc9ffe56bd8e5d28c90fcfa402e0b559801 Mon Sep 17 00:00:00 2001 From: Mauro Gentile Date: Sat, 29 Apr 2023 11:25:29 +0200 Subject: [PATCH 034/132] Fixing temp suspected clear order --- wgpu-core/src/device/queue.rs | 1 + wgpu-core/src/device/resource.rs | 1 - 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/wgpu-core/src/device/queue.rs b/wgpu-core/src/device/queue.rs index 96f52135b8..5aaf218cc0 100644 --- a/wgpu-core/src/device/queue.rs +++ b/wgpu-core/src/device/queue.rs @@ -1095,6 +1095,7 @@ impl Global { .devices .get(queue_id) .map_err(|_| DeviceError::Invalid)?; + device.temp_suspected.lock().clear(); let mut fence = device.fence.write(); let fence = fence.as_mut().unwrap(); diff --git a/wgpu-core/src/device/resource.rs b/wgpu-core/src/device/resource.rs index 9aa6f9d271..003ed2088b 100644 --- a/wgpu-core/src/device/resource.rs +++ b/wgpu-core/src/device/resource.rs @@ -274,7 +274,6 @@ impl Device { life_tracker .suspected_resources .extend(&self.temp_suspected.lock()); - self.temp_suspected.lock().clear(); life_tracker.triage_suspected( hub, From 2a3c78a329e2c8397fd08b2da99b730025cc654e Mon Sep 17 00:00:00 2001 From: Mauro Gentile Date: Sat, 29 Apr 2023 12:28:47 +0200 Subject: [PATCH 035/132] Adding drop to staging buffer --- wgpu-core/src/command/render.rs | 2 +- wgpu-core/src/device/queue.rs | 17 ++++++++--------- wgpu-core/src/device/resource.rs | 2 +- wgpu-core/src/instance.rs | 2 +- wgpu-core/src/resource.rs | 15 ++++++++++++++- 5 files changed, 25 insertions(+), 13 deletions(-) diff --git a/wgpu-core/src/command/render.rs b/wgpu-core/src/command/render.rs index 4f1deae37a..b9ec91dd86 100644 --- a/wgpu-core/src/command/render.rs +++ b/wgpu-core/src/command/render.rs @@ -532,7 +532,7 @@ pub enum RenderPassErrorInner { }, #[error("Surface texture is dropped before the render pass is finished")] SurfaceTextureDropped, - #[error("Not enough memory left")] + #[error("Not enough memory left for render pass")] OutOfMemory, #[error("Unable to clear non-present/read-only depth")] InvalidDepthOps, diff --git a/wgpu-core/src/device/queue.rs b/wgpu-core/src/device/queue.rs index 5aaf218cc0..a7c082da24 100644 --- a/wgpu-core/src/device/queue.rs +++ b/wgpu-core/src/device/queue.rs @@ -260,7 +260,7 @@ impl PendingWrites { } fn prepare_staging_buffer( - device: &A::Device, + device: &Arc>, size: wgt::BufferAddress, ) -> Result<(StagingBuffer, *mut u8), DeviceError> { profiling::scope!("prepare_staging_buffer"); @@ -271,11 +271,12 @@ fn prepare_staging_buffer( memory_flags: hal::MemoryFlags::TRANSIENT, }; - let buffer = unsafe { device.create_buffer(&stage_desc)? }; - let mapping = unsafe { device.map_buffer(&buffer, 0..size) }?; + let buffer = unsafe { device.raw.as_ref().unwrap().create_buffer(&stage_desc)? }; + let mapping = unsafe { device.raw.as_ref().unwrap().map_buffer(&buffer, 0..size) }?; let staging_buffer = StagingBuffer { raw: Mutex::new(Some(buffer)), + device: device.clone(), size, info: ResourceInfo::new(""), is_coherent: mapping.is_coherent, @@ -375,8 +376,7 @@ impl Global { // Platform validation requires that the staging buffer always be // freed, even if an error occurs. All paths from here must call // `device.pending_writes.consume`. - let (staging_buffer, staging_buffer_ptr) = - prepare_staging_buffer(device.raw.as_ref().unwrap(), data_size)?; + let (staging_buffer, staging_buffer_ptr) = prepare_staging_buffer(&device, data_size)?; if let Err(flush_error) = unsafe { profiling::scope!("copy"); @@ -423,7 +423,7 @@ impl Global { .map_err(|_| DeviceError::Invalid)?; let (staging_buffer, staging_buffer_ptr) = - prepare_staging_buffer(device.raw.as_ref().unwrap(), buffer_size.get())?; + prepare_staging_buffer(&device, buffer_size.get())?; let fid = hub.staging_buffers.prepare(id_in); let (id, _) = fid.assign(staging_buffer); @@ -793,8 +793,7 @@ impl Global { // Platform validation requires that the staging buffer always be // freed, even if an error occurs. All paths from here must call // `device.pending_writes.consume`. - let (staging_buffer, staging_buffer_ptr) = - prepare_staging_buffer(device.raw.as_ref().unwrap(), stage_size)?; + let (staging_buffer, staging_buffer_ptr) = prepare_staging_buffer(&device, stage_size)?; if stage_bytes_per_row == bytes_per_row { profiling::scope!("copy aligned"); @@ -1095,10 +1094,10 @@ impl Global { .devices .get(queue_id) .map_err(|_| DeviceError::Invalid)?; - device.temp_suspected.lock().clear(); let mut fence = device.fence.write(); let fence = fence.as_mut().unwrap(); + device.temp_suspected.lock().clear(); let submit_index = device .active_submission_index diff --git a/wgpu-core/src/device/resource.rs b/wgpu-core/src/device/resource.rs index 003ed2088b..106dd34c0a 100644 --- a/wgpu-core/src/device/resource.rs +++ b/wgpu-core/src/device/resource.rs @@ -124,7 +124,7 @@ impl Drop for Device { #[derive(Clone, Debug, Error)] pub enum CreateDeviceError { - #[error("Not enough memory left")] + #[error("Not enough memory left to create device")] OutOfMemory, #[error("Failed to create internal buffer for initializing textures")] FailedToCreateZeroBuffer(#[from] DeviceError), diff --git a/wgpu-core/src/instance.rs b/wgpu-core/src/instance.rs index f9a84f635c..20313798a7 100644 --- a/wgpu-core/src/instance.rs +++ b/wgpu-core/src/instance.rs @@ -406,7 +406,7 @@ pub enum RequestDeviceError { LimitsExceeded(#[from] FailedLimit), #[error("Device has no queue supporting graphics")] NoGraphicsQueue, - #[error("Not enough memory left")] + #[error("Not enough memory left to request device")] OutOfMemory, #[error("Unsupported features were requested: {0:?}")] UnsupportedFeature(wgt::Features), diff --git a/wgpu-core/src/resource.rs b/wgpu-core/src/resource.rs index be54849340..e0e54d2cca 100644 --- a/wgpu-core/src/resource.rs +++ b/wgpu-core/src/resource.rs @@ -427,11 +427,24 @@ impl Resource for Buffer { #[derive(Debug)] pub struct StagingBuffer { pub(crate) raw: Mutex>, + pub(crate) device: Arc>, pub(crate) size: wgt::BufferAddress, pub(crate) is_coherent: bool, pub(crate) info: ResourceInfo, } +impl Drop for StagingBuffer { + fn drop(&mut self) { + log::info!("Destroying StagingBuffer {:?}", self.info.label()); + if let Some(raw) = self.raw.lock().take() { + unsafe { + use hal::Device; + self.device.raw.as_ref().unwrap().destroy_buffer(raw); + } + } + } +} + impl Resource for StagingBuffer { const TYPE: &'static str = "StagingBuffer"; @@ -801,7 +814,7 @@ impl Drop for TextureView { pub enum CreateTextureViewError { #[error("Parent texture is invalid or destroyed")] InvalidTexture, - #[error("Not enough memory left")] + #[error("Not enough memory left to create texture view")] OutOfMemory, #[error("Invalid texture view dimension `{view:?}` with texture of dimension `{texture:?}`")] InvalidTextureViewDimension { From 011ed1b935dca01b2b72285fc7a6ba7af92997bd Mon Sep 17 00:00:00 2001 From: Mauro Gentile Date: Sat, 29 Apr 2023 13:13:31 +0200 Subject: [PATCH 036/132] Better log info --- wgpu-core/src/command/mod.rs | 6 +++++- wgpu-core/src/device/life.rs | 2 +- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/wgpu-core/src/command/mod.rs b/wgpu-core/src/command/mod.rs index 8619351c60..c4da10e52c 100644 --- a/wgpu-core/src/command/mod.rs +++ b/wgpu-core/src/command/mod.rs @@ -152,10 +152,10 @@ pub struct CommandBuffer { impl Drop for CommandBuffer { fn drop(&mut self) { - log::info!("Destroying CommandBuffer {:?}", self.info.label()); if self.data.lock().is_none() { return; } + log::info!("Destroying CommandBuffer {:?}", self.info.label()); let mut baked = self.extract_baked_commands(); unsafe { baked.encoder.reset_all(baked.list.into_iter()); @@ -293,6 +293,10 @@ impl CommandBuffer { } pub(crate) fn extract_baked_commands(&mut self) -> BakedCommands { + log::info!( + "Extracting BackedCommands from CommandBuffer {:?}", + self.info.label() + ); let data = self.data.lock().take().unwrap(); BakedCommands { encoder: data.encoder.raw, diff --git a/wgpu-core/src/device/life.rs b/wgpu-core/src/device/life.rs index d7af6923b2..7f7f4a84d5 100644 --- a/wgpu-core/src/device/life.rs +++ b/wgpu-core/src/device/life.rs @@ -402,7 +402,7 @@ impl LifetimeTracker { let mut active_index = self.active.iter().position(|a| a.index == last_done); while let Some(index) = active_index { let a = self.active.remove(index); - log::trace!("Active submission {} is done", a.index); + log::info!("Active submission {} is done", a.index); self.free_resources.extend(a.last_resources); self.ready_to_map.extend(a.mapped); for encoder in a.encoders { From 404e888dac2f0f1c0145c5ba01b1240672d1446f Mon Sep 17 00:00:00 2001 From: Mauro Gentile Date: Sun, 30 Apr 2023 10:04:43 +0200 Subject: [PATCH 037/132] Active submission index incremented only at submit --- wgpu-core/src/device/global.rs | 2 +- wgpu-core/src/device/mod.rs | 2 +- wgpu-core/src/device/queue.rs | 6 +++--- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/wgpu-core/src/device/global.rs b/wgpu-core/src/device/global.rs index 4ba6e3fc92..d8e49425d9 100644 --- a/wgpu-core/src/device/global.rs +++ b/wgpu-core/src/device/global.rs @@ -2495,7 +2495,7 @@ impl Global { buffer.info.use_at( device .active_submission_index - .fetch_add(1, Ordering::Relaxed) + .load(Ordering::Relaxed) + 1, ); let region = wgt::BufferSize::new(buffer.size).map(|size| hal::BufferCopy { diff --git a/wgpu-core/src/device/mod.rs b/wgpu-core/src/device/mod.rs index 0d9e8120d2..56789371b5 100644 --- a/wgpu-core/src/device/mod.rs +++ b/wgpu-core/src/device/mod.rs @@ -31,7 +31,7 @@ pub const SHADER_STAGE_COUNT: usize = 3; // value is enough for a 16k texture with float4 format. pub(crate) const ZERO_BUFFER_SIZE: BufferAddress = 512 << 10; -const CLEANUP_WAIT_MS: u32 = 1000; +const CLEANUP_WAIT_MS: u32 = 5000; const IMPLICIT_FAILURE: &str = "failed implicit"; const EP_FAILURE: &str = "EP is invalid"; diff --git a/wgpu-core/src/device/queue.rs b/wgpu-core/src/device/queue.rs index a7c082da24..6e00fadaf0 100644 --- a/wgpu-core/src/device/queue.rs +++ b/wgpu-core/src/device/queue.rs @@ -564,7 +564,7 @@ impl Global { dst.info.use_at( device .active_submission_index - .fetch_add(1, Ordering::Relaxed) + .load(Ordering::Relaxed) + 1, ); @@ -775,7 +775,7 @@ impl Global { dst.info.use_at( device .active_submission_index - .fetch_add(1, Ordering::Relaxed) + .load(Ordering::Relaxed) + 1, ); @@ -1045,7 +1045,7 @@ impl Global { dst.info.use_at( device .active_submission_index - .fetch_add(1, Ordering::Relaxed) + .load(Ordering::Relaxed) + 1, ); From ddc015cf93f3c3d4afddea760ff7a3fc78ea8ed6 Mon Sep 17 00:00:00 2001 From: Mauro Gentile Date: Sun, 30 Apr 2023 10:16:06 +0200 Subject: [PATCH 038/132] Fix fmt --- wgpu-core/src/device/global.rs | 9 +++------ wgpu-core/src/device/queue.rs | 24 ++++++------------------ 2 files changed, 9 insertions(+), 24 deletions(-) diff --git a/wgpu-core/src/device/global.rs b/wgpu-core/src/device/global.rs index d8e49425d9..df2b7e3d40 100644 --- a/wgpu-core/src/device/global.rs +++ b/wgpu-core/src/device/global.rs @@ -2492,12 +2492,9 @@ impl Global { let raw_buf = buffer.raw.as_ref().ok_or(BufferAccessError::Destroyed)?; - buffer.info.use_at( - device - .active_submission_index - .load(Ordering::Relaxed) - + 1, - ); + buffer + .info + .use_at(device.active_submission_index.load(Ordering::Relaxed) + 1); let region = wgt::BufferSize::new(buffer.size).map(|size| hal::BufferCopy { src_offset: 0, dst_offset: 0, diff --git a/wgpu-core/src/device/queue.rs b/wgpu-core/src/device/queue.rs index 6e00fadaf0..1f05466806 100644 --- a/wgpu-core/src/device/queue.rs +++ b/wgpu-core/src/device/queue.rs @@ -561,12 +561,8 @@ impl Global { let src_buffer_size = staging_buffer.size; self.queue_validate_write_buffer_impl(&dst, buffer_id, buffer_offset, src_buffer_size)?; - dst.info.use_at( - device - .active_submission_index - .load(Ordering::Relaxed) - + 1, - ); + dst.info + .use_at(device.active_submission_index.load(Ordering::Relaxed) + 1); let region = wgt::BufferSize::new(src_buffer_size).map(|size| hal::BufferCopy { src_offset: 0, @@ -772,12 +768,8 @@ impl Global { ) .ok_or(TransferError::InvalidTexture(destination.texture))?; - dst.info.use_at( - device - .active_submission_index - .load(Ordering::Relaxed) - + 1, - ); + dst.info + .use_at(device.active_submission_index.load(Ordering::Relaxed) + 1); let dst_raw = dst .inner @@ -1042,12 +1034,8 @@ impl Global { ) .ok_or(TransferError::InvalidTexture(destination.texture))?; - dst.info.use_at( - device - .active_submission_index - .load(Ordering::Relaxed) - + 1, - ); + dst.info + .use_at(device.active_submission_index.load(Ordering::Relaxed) + 1); let dst_raw = dst .inner From 7778e1abf5e8d37e635bc2817b6a0b8ac2283e70 Mon Sep 17 00:00:00 2001 From: Mauro Gentile <62186646+gents83@users.noreply.github.com> Date: Thu, 4 May 2023 19:56:49 +0200 Subject: [PATCH 039/132] Drain submissions again --- wgpu-core/src/device/life.rs | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/wgpu-core/src/device/life.rs b/wgpu-core/src/device/life.rs index 7f7f4a84d5..b4798e3bed 100644 --- a/wgpu-core/src/device/life.rs +++ b/wgpu-core/src/device/life.rs @@ -397,11 +397,14 @@ impl LifetimeTracker { //TODO: enable when `is_sorted_by_key` is stable //debug_assert!(self.active.is_sorted_by_key(|a| a.index)); + let done_count = self + .active + .iter() + .position(|a| a.index > last_done) + .unwrap_or(self.active.len()); + let mut work_done_closures = SmallVec::new(); - //TODO: Substitute with drain_filter when available - let mut active_index = self.active.iter().position(|a| a.index == last_done); - while let Some(index) = active_index { - let a = self.active.remove(index); + for a in self.active.drain(..done_count) { log::info!("Active submission {} is done", a.index); self.free_resources.extend(a.last_resources); self.ready_to_map.extend(a.mapped); @@ -410,7 +413,6 @@ impl LifetimeTracker { command_allocator.release_encoder(raw); } work_done_closures.extend(a.work_done_closures); - active_index = self.active.iter().position(|a| a.index == last_done); } work_done_closures } From 4640ec067ffbbb7db7f86aefd1ec62d858e72755 Mon Sep 17 00:00:00 2001 From: Mauro Gentile <62186646+gents83@users.noreply.github.com> Date: Sat, 20 May 2023 09:37:29 +0200 Subject: [PATCH 040/132] Integrating fix multiview rendering in resource.rs --- wgpu-core/src/device/resource.rs | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/wgpu-core/src/device/resource.rs b/wgpu-core/src/device/resource.rs index 106dd34c0a..096105fece 100644 --- a/wgpu-core/src/device/resource.rs +++ b/wgpu-core/src/device/resource.rs @@ -926,7 +926,10 @@ impl Device { break 'b Err(TextureViewNotRenderableReason::Usage(texture_desc.usage)); } - if resolved_dimension != TextureViewDimension::D2 { + if !(resolved_dimension == TextureViewDimension::D2 + || (self.features.contains(wgt::Features::MULTIVIEW) + && resolved_dimension == TextureViewDimension::D2Array)) + { break 'b Err(TextureViewNotRenderableReason::Dimension( resolved_dimension, )); @@ -938,7 +941,9 @@ impl Device { )); } - if resolved_array_layer_count != 1 { + if resolved_array_layer_count != 1 + && !(self.features.contains(wgt::Features::MULTIVIEW)) + { break 'b Err(TextureViewNotRenderableReason::ArrayLayerCount( resolved_array_layer_count, )); From 15e3145bce48c94ded3ce2761fb30ebd6f8712d0 Mon Sep 17 00:00:00 2001 From: Mauro Gentile <62186646+gents83@users.noreply.github.com> Date: Sat, 20 May 2023 10:15:59 +0200 Subject: [PATCH 041/132] Readding licenses and helper functions --- wgpu-core/LICENSE.APACHE | 1 + wgpu-core/LICENSE.MIT | 1 + wgpu-core/src/binding_model.rs | 26 ++++++---- wgpu-core/src/command/bundle.rs | 12 ++--- wgpu-core/src/command/compute.rs | 24 +++------ wgpu-core/src/command/mod.rs | 4 +- wgpu-core/src/command/query.rs | 19 +++---- wgpu-core/src/command/render.rs | 34 +++++-------- wgpu-core/src/device/global.rs | 67 ++++++++----------------- wgpu-core/src/device/mod.rs | 11 ++-- wgpu-core/src/device/queue.rs | 18 +++---- wgpu-core/src/device/resource.rs | 86 ++++++++++++-------------------- wgpu-core/src/hub.rs | 4 +- wgpu-core/src/pipeline.rs | 32 ++++++++---- wgpu-core/src/resource.rs | 40 ++++++++++++--- wgpu-hal/LICENSE.APACHE | 1 + wgpu-hal/LICENSE.MIT | 1 + wgpu-types/LICENSE.APACHE | 1 + wgpu-types/LICENSE.MIT | 1 + wgpu/LICENSE.APACHE | 1 + wgpu/LICENSE.MIT | 1 + 21 files changed, 176 insertions(+), 209 deletions(-) create mode 100644 wgpu-core/LICENSE.APACHE create mode 100644 wgpu-core/LICENSE.MIT create mode 100644 wgpu-hal/LICENSE.APACHE create mode 100644 wgpu-hal/LICENSE.MIT create mode 100644 wgpu-types/LICENSE.APACHE create mode 100644 wgpu-types/LICENSE.MIT create mode 100644 wgpu/LICENSE.APACHE create mode 100644 wgpu/LICENSE.MIT diff --git a/wgpu-core/LICENSE.APACHE b/wgpu-core/LICENSE.APACHE new file mode 100644 index 0000000000..7141cad5b2 --- /dev/null +++ b/wgpu-core/LICENSE.APACHE @@ -0,0 +1 @@ +../LICENSE.APACHE \ No newline at end of file diff --git a/wgpu-core/LICENSE.MIT b/wgpu-core/LICENSE.MIT new file mode 100644 index 0000000000..6b8772d1a7 --- /dev/null +++ b/wgpu-core/LICENSE.MIT @@ -0,0 +1 @@ +../LICENSE.MIT \ No newline at end of file diff --git a/wgpu-core/src/binding_model.rs b/wgpu-core/src/binding_model.rs index 2b3c72f2c0..934f4c5f9e 100644 --- a/wgpu-core/src/binding_model.rs +++ b/wgpu-core/src/binding_model.rs @@ -464,11 +464,7 @@ impl Drop for BindGroupLayout { if let Some(raw) = self.raw.take() { unsafe { use hal::Device; - self.device - .raw - .as_ref() - .unwrap() - .destroy_bind_group_layout(raw); + self.device.raw().destroy_bind_group_layout(raw); } } } @@ -489,6 +485,12 @@ impl Resource for BindGroupLayout { } } +impl BindGroupLayout { + pub(crate) fn raw(&self) -> &A::BindGroupLayout { + self.raw.as_ref().unwrap() + } +} + #[derive(Clone, Debug, Error)] #[non_exhaustive] pub enum CreatePipelineLayoutError { @@ -600,17 +602,16 @@ impl Drop for PipelineLayout { if let Some(raw) = self.raw.take() { unsafe { use hal::Device; - self.device - .raw - .as_ref() - .unwrap() - .destroy_pipeline_layout(raw); + self.device.raw().destroy_pipeline_layout(raw); } } } } impl PipelineLayout { + pub(crate) fn raw(&self) -> &A::PipelineLayout { + self.raw.as_ref().unwrap() + } /// Validate push constants match up with expected ranges. pub(crate) fn validate_push_constant_ranges( &self, @@ -818,13 +819,16 @@ impl Drop for BindGroup { if let Some(raw) = self.raw.take() { unsafe { use hal::Device; - self.device.raw.as_ref().unwrap().destroy_bind_group(raw); + self.device.raw().destroy_bind_group(raw); } } } } impl BindGroup { + pub(crate) fn raw(&self) -> &A::BindGroup { + self.raw.as_ref().unwrap() + } pub(crate) fn validate_dynamic_bindings( &self, bind_group_index: u32, diff --git a/wgpu-core/src/command/bundle.rs b/wgpu-core/src/command/bundle.rs index d5a888ab34..5da02b8192 100644 --- a/wgpu-core/src/command/bundle.rs +++ b/wgpu-core/src/command/bundle.rs @@ -786,11 +786,9 @@ impl RenderBundle { unsafe { raw.set_bind_group( pipeline_layout_guard[pipeline_layout_id.unwrap()] - .raw - .as_ref() - .unwrap(), + .raw(), index, - bind_group.raw.as_ref().unwrap(), + bind_group.raw(), &offsets[..num_dynamic_offsets as usize], ) }; @@ -798,7 +796,7 @@ impl RenderBundle { } RenderCommand::SetPipeline(pipeline_id) => { let pipeline = pipeline_guard.get(pipeline_id).unwrap(); - unsafe { raw.set_render_pipeline(pipeline.raw.as_ref().unwrap()) }; + unsafe { raw.set_render_pipeline(pipeline.raw()) }; pipeline_layout_id = Some(pipeline.layout_id); } @@ -857,7 +855,7 @@ impl RenderBundle { unsafe { raw.set_push_constants( - pipeline_layout.raw.as_ref().unwrap(), + pipeline_layout.raw(), stages, offset, data_slice, @@ -870,7 +868,7 @@ impl RenderBundle { |clear_offset, clear_data| { unsafe { raw.set_push_constants( - pipeline_layout.raw.as_ref().unwrap(), + pipeline_layout.raw(), stages, clear_offset, clear_data, diff --git a/wgpu-core/src/command/compute.rs b/wgpu-core/src/command/compute.rs index 274c7ffb2b..7f3540eafc 100644 --- a/wgpu-core/src/command/compute.rs +++ b/wgpu-core/src/command/compute.rs @@ -455,15 +455,10 @@ impl Global { &temp_offsets, ); if !entries.is_empty() { - let pipeline_layout = pipeline_layout_guard[pipeline_layout_id.unwrap()] - .raw - .as_ref() - .unwrap(); + let pipeline_layout = + pipeline_layout_guard[pipeline_layout_id.unwrap()].raw(); for (i, e) in entries.iter().enumerate() { - let raw_bg = bind_group_guard[*e.group_id.as_ref().unwrap()] - .raw - .as_ref() - .unwrap(); + let raw_bg = bind_group_guard[*e.group_id.as_ref().unwrap()].raw(); unsafe { raw.set_bind_group( pipeline_layout, @@ -487,7 +482,7 @@ impl Global { .map_pass_err(scope)?; unsafe { - raw.set_compute_pipeline(pipeline.raw.as_ref().unwrap()); + raw.set_compute_pipeline(pipeline.raw()); } // Rebind resources @@ -501,13 +496,10 @@ impl Global { ); if !entries.is_empty() { for (i, e) in entries.iter().enumerate() { - let raw_bg = bind_group_guard[*e.group_id.as_ref().unwrap()] - .raw - .as_ref() - .unwrap(); + let raw_bg = bind_group_guard[*e.group_id.as_ref().unwrap()].raw(); unsafe { raw.set_bind_group( - pipeline_layout.raw.as_ref().unwrap(), + pipeline_layout.raw(), start_index as u32 + i as u32, raw_bg, &e.dynamic_offsets, @@ -528,7 +520,7 @@ impl Global { size_bytes, |clear_offset, clear_data| unsafe { raw.set_push_constants( - pipeline_layout.raw.as_ref().unwrap(), + pipeline_layout.raw(), wgt::ShaderStages::COMPUTE, clear_offset, clear_data, @@ -571,7 +563,7 @@ impl Global { unsafe { raw.set_push_constants( - pipeline_layout.raw.as_ref().unwrap(), + pipeline_layout.raw(), wgt::ShaderStages::COMPUTE, offset, data_slice, diff --git a/wgpu-core/src/command/mod.rs b/wgpu-core/src/command/mod.rs index c4da10e52c..eeb3084bff 100644 --- a/wgpu-core/src/command/mod.rs +++ b/wgpu-core/src/command/mod.rs @@ -163,9 +163,7 @@ impl Drop for CommandBuffer { unsafe { use hal::Device; self.device - .raw - .as_ref() - .unwrap() + .raw() .destroy_command_encoder(baked.encoder); } } diff --git a/wgpu-core/src/command/query.rs b/wgpu-core/src/command/query.rs index b2dcfb180d..3834e39775 100644 --- a/wgpu-core/src/command/query.rs +++ b/wgpu-core/src/command/query.rs @@ -68,10 +68,7 @@ impl QueryResetMap { // We've hit the end of a run, dispatch a reset (Some(start), false) => { run_start = None; - unsafe { - raw_encoder - .reset_queries(query_set.raw.as_ref().unwrap(), start..idx as u32) - }; + unsafe { raw_encoder.reset_queries(query_set.raw(), start..idx as u32) }; } // We're starting a run (None, true) => { @@ -213,7 +210,7 @@ impl QuerySet { }); } - Ok(self.raw.as_ref().unwrap()) + Ok(self.raw()) } pub(super) fn validate_and_write_timestamp( @@ -234,8 +231,7 @@ impl QuerySet { unsafe { // If we don't have a reset state tracker which can defer resets, we must reset now. if needs_reset { - raw_encoder - .reset_queries(self.raw.as_ref().unwrap(), query_index..(query_index + 1)); + raw_encoder.reset_queries(self.raw(), query_index..(query_index + 1)); } raw_encoder.write_timestamp(query_set, query_index); } @@ -269,8 +265,7 @@ impl QuerySet { unsafe { // If we don't have a reset state tracker which can defer resets, we must reset now. if needs_reset { - raw_encoder - .reset_queries(self.raw.as_ref().unwrap(), query_index..(query_index + 1)); + raw_encoder.reset_queries(self.raw(), query_index..(query_index + 1)); } raw_encoder.begin_query(query_set, query_index); } @@ -288,7 +283,7 @@ pub(super) fn end_pipeline_statistics_query( // We can unwrap here as the validity was validated when the active query was set let query_set = storage.get(query_set_id).unwrap(); - unsafe { raw_encoder.end_query(query_set.raw.as_ref().unwrap(), query_index) }; + unsafe { raw_encoder.end_query(query_set.raw(), query_index) }; Ok(()) } else { @@ -424,9 +419,9 @@ impl Global { unsafe { raw_encoder.transition_buffers(dst_barrier.into_iter()); raw_encoder.copy_query_results( - query_set.raw.as_ref().unwrap(), + query_set.raw(), start_query..end_query, - dst_buffer.raw.as_ref().unwrap(), + dst_buffer.raw(), destination_offset, wgt::BufferSize::new_unchecked(stride as u64), ); diff --git a/wgpu-core/src/command/render.rs b/wgpu-core/src/command/render.rs index d589cc9832..75646ad9fb 100644 --- a/wgpu-core/src/command/render.rs +++ b/wgpu-core/src/command/render.rs @@ -929,7 +929,7 @@ impl<'a, A: HalApi> RenderPassInfo<'a, A> { depth_stencil = Some(hal::DepthStencilAttachment { target: hal::Attachment { - view: view.raw.as_ref().unwrap(), + view: view.raw(), usage, }, depth_ops: at.depth.hal_ops(), @@ -1040,14 +1040,14 @@ impl<'a, A: HalApi> RenderPassInfo<'a, A> { .push(resolve_view.to_render_attachment(hal::TextureUses::COLOR_TARGET)); hal_resolve_target = Some(hal::Attachment { - view: resolve_view.raw.as_ref().unwrap(), + view: resolve_view.raw(), usage: hal::TextureUses::COLOR_TARGET, }); } colors.push(Some(hal::ColorAttachment { target: hal::Attachment { - view: color_view.raw.as_ref().unwrap(), + view: color_view.raw(), usage: hal::TextureUses::COLOR_TARGET, }, resolve_target: hal_resolve_target, @@ -1168,7 +1168,7 @@ impl<'a, A: HalApi> RenderPassInfo<'a, A> { color_attachments: &[], depth_stencil_attachment: Some(hal::DepthStencilAttachment { target: hal::Attachment { - view: view.raw.as_ref().unwrap(), + view: view.raw(), usage: hal::TextureUses::DEPTH_STENCIL_WRITE, }, depth_ops, @@ -1363,16 +1363,10 @@ impl Global { &temp_offsets, ); if !entries.is_empty() { - let pipeline_layout = pipeline_layout_guard - [pipeline_layout_id.unwrap()] - .raw - .as_ref() - .unwrap(); + let pipeline_layout = + pipeline_layout_guard[pipeline_layout_id.unwrap()].raw(); for (i, e) in entries.iter().enumerate() { - let raw_bg = bind_group_guard[*e.group_id.as_ref().unwrap()] - .raw - .as_ref() - .unwrap(); + let raw_bg = bind_group_guard[*e.group_id.as_ref().unwrap()].raw(); unsafe { raw.set_bind_group( pipeline_layout, @@ -1418,7 +1412,7 @@ impl Global { .require(pipeline.flags.contains(PipelineFlags::BLEND_CONSTANT)); unsafe { - raw.set_render_pipeline(pipeline.raw.as_ref().unwrap()); + raw.set_render_pipeline(pipeline.raw()); } if pipeline.flags.contains(PipelineFlags::STENCIL_REFERENCE) { @@ -1438,13 +1432,11 @@ impl Global { ); if !entries.is_empty() { for (i, e) in entries.iter().enumerate() { - let raw_bg = bind_group_guard[*e.group_id.as_ref().unwrap()] - .raw - .as_ref() - .unwrap(); + let raw_bg = + bind_group_guard[*e.group_id.as_ref().unwrap()].raw(); unsafe { raw.set_bind_group( - pipeline_layout.raw.as_ref().unwrap(), + pipeline_layout.raw(), start_index as u32 + i as u32, raw_bg, &e.dynamic_offsets, @@ -1465,7 +1457,7 @@ impl Global { size_bytes, |clear_offset, clear_data| unsafe { raw.set_push_constants( - pipeline_layout.raw.as_ref().unwrap(), + pipeline_layout.raw(), range.stages, clear_offset, clear_data, @@ -1678,7 +1670,7 @@ impl Global { unsafe { raw.set_push_constants( - pipeline_layout.raw.as_ref().unwrap(), + pipeline_layout.raw(), stages, offset, data_slice, diff --git a/wgpu-core/src/device/global.rs b/wgpu-core/src/device/global.rs index df2b7e3d40..3d18d4822c 100644 --- a/wgpu-core/src/device/global.rs +++ b/wgpu-core/src/device/global.rs @@ -163,13 +163,7 @@ impl Global { let ptr = if map_size == 0 { std::ptr::NonNull::dangling() } else { - match map_buffer( - device.raw.as_ref().unwrap(), - &buffer, - 0, - map_size, - HostMap::Write, - ) { + match map_buffer(device.raw(), &buffer, 0, map_size, HostMap::Write) { Ok(ptr) => ptr, Err(e) => { device.lock_life().schedule_resource_destruction( @@ -206,13 +200,7 @@ impl Global { break e; } }; - let mapping = match unsafe { - device - .raw - .as_ref() - .unwrap() - .map_buffer(stage.raw.as_ref().unwrap(), 0..stage.size) - } { + let mapping = match unsafe { device.raw().map_buffer(stage.raw(), 0..stage.size) } { Ok(mapping) => mapping, Err(e) => { let mut life_lock = device.lock_life(); @@ -360,26 +348,20 @@ impl Global { }); } - let raw_buf = buffer.raw.as_ref().unwrap(); + let raw_buf = buffer.raw(); unsafe { let mapping = device - .raw - .as_ref() - .unwrap() + .raw() .map_buffer(raw_buf, offset..offset + data.len() as u64) .map_err(DeviceError::from)?; ptr::copy_nonoverlapping(data.as_ptr(), mapping.ptr.as_ptr(), data.len()); if !mapping.is_coherent { device - .raw - .as_ref() - .unwrap() + .raw() .flush_mapped_ranges(raw_buf, iter::once(offset..offset + data.len() as u64)); } device - .raw - .as_ref() - .unwrap() + .raw() .unmap_buffer(raw_buf) .map_err(DeviceError::from)?; } @@ -410,25 +392,21 @@ impl Global { check_buffer_usage(buffer.usage, wgt::BufferUsages::MAP_READ)?; //assert!(buffer isn't used by the GPU); - let raw_buf = buffer.raw.as_ref().unwrap(); + let raw_buf = buffer.raw(); unsafe { let mapping = device - .raw - .as_ref() - .unwrap() + .raw() .map_buffer(raw_buf, offset..offset + data.len() as u64) .map_err(DeviceError::from)?; if !mapping.is_coherent { - device.raw.as_ref().unwrap().invalidate_mapped_ranges( + device.raw().invalidate_mapped_ranges( raw_buf, iter::once(offset..offset + data.len() as u64), ); } ptr::copy_nonoverlapping(mapping.ptr.as_ptr(), data.as_mut_ptr(), data.len()); device - .raw - .as_ref() - .unwrap() + .raw() .unmap_buffer(raw_buf) .map_err(DeviceError::from)?; } @@ -1379,7 +1357,7 @@ impl Global { .lock() .as_mut() .unwrap() - .acquire_encoder(device.raw.as_ref().unwrap(), device.queue.as_ref().unwrap()) + .acquire_encoder(device.raw(), device.queue.as_ref().unwrap()) { Ok(raw) => raw, Err(_) => break DeviceError::OutOfMemory, @@ -2048,7 +2026,7 @@ impl Global { A::get_surface(surface) .unwrap() .raw - .configure(device.raw.as_ref().unwrap(), &hal_config) + .configure(device.raw(), &hal_config) } { Ok(()) => (), Err(error) => { @@ -2225,14 +2203,14 @@ impl Global { pub fn device_start_capture(&self, id: DeviceId) { let hub = A::hub(self); if let Ok(device) = hub.devices.get(id) { - unsafe { device.raw.as_ref().unwrap().start_capture() }; + unsafe { device.raw().start_capture() }; } } pub fn device_stop_capture(&self, id: DeviceId) { let hub = A::hub(self); if let Ok(device) = hub.devices.get(id) { - unsafe { device.raw.as_ref().unwrap().stop_capture() }; + unsafe { device.raw().stop_capture() }; } } @@ -2483,10 +2461,9 @@ impl Global { let _ = ptr; if needs_flush { unsafe { - device.raw.as_ref().unwrap().flush_mapped_ranges( - stage_buffer.raw.as_ref().unwrap(), - iter::once(0..buffer.size), - ); + device + .raw() + .flush_mapped_ranges(stage_buffer.raw(), iter::once(0..buffer.size)); } } @@ -2501,7 +2478,7 @@ impl Global { size, }); let transition_src = hal::BufferBarrier { - buffer: stage_buffer.raw.as_ref().unwrap(), + buffer: stage_buffer.raw(), usage: hal::BufferUses::MAP_WRITE..hal::BufferUses::COPY_SRC, }; let transition_dst = hal::BufferBarrier { @@ -2517,7 +2494,7 @@ impl Global { ); if buffer.size > 0 { encoder.copy_buffer_to_buffer( - stage_buffer.raw.as_ref().unwrap(), + stage_buffer.raw(), raw_buf, region.into_iter(), ); @@ -2551,10 +2528,8 @@ impl Global { } unsafe { device - .raw - .as_ref() - .unwrap() - .unmap_buffer(buffer.raw.as_ref().unwrap()) + .raw() + .unmap_buffer(buffer.raw()) .map_err(DeviceError::from)? }; } diff --git a/wgpu-core/src/device/mod.rs b/wgpu-core/src/device/mod.rs index 56789371b5..5d679f8ed8 100644 --- a/wgpu-core/src/device/mod.rs +++ b/wgpu-core/src/device/mod.rs @@ -198,16 +198,13 @@ fn map_buffer( kind: HostMap, ) -> Result, BufferAccessError> { let mapping = unsafe { - raw.map_buffer(buffer.raw.as_ref().unwrap(), offset..offset + size) + raw.map_buffer(buffer.raw(), offset..offset + size) .map_err(DeviceError::from)? }; *buffer.sync_mapped_writes.lock() = match kind { HostMap::Read if !mapping.is_coherent => unsafe { - raw.invalidate_mapped_ranges( - buffer.raw.as_ref().unwrap(), - iter::once(offset..offset + size), - ); + raw.invalidate_mapped_ranges(buffer.raw(), iter::once(offset..offset + size)); None }, HostMap::Write if !mapping.is_coherent => Some(offset..offset + size), @@ -247,9 +244,7 @@ fn map_buffer( mapped[fill_range].fill(0); if zero_init_needs_flush_now { - unsafe { - raw.flush_mapped_ranges(buffer.raw.as_ref().unwrap(), iter::once(uninitialized)) - }; + unsafe { raw.flush_mapped_ranges(buffer.raw(), iter::once(uninitialized)) }; } } diff --git a/wgpu-core/src/device/queue.rs b/wgpu-core/src/device/queue.rs index 1f05466806..df2d1cda55 100644 --- a/wgpu-core/src/device/queue.rs +++ b/wgpu-core/src/device/queue.rs @@ -271,8 +271,8 @@ fn prepare_staging_buffer( memory_flags: hal::MemoryFlags::TRANSIENT, }; - let buffer = unsafe { device.raw.as_ref().unwrap().create_buffer(&stage_desc)? }; - let mapping = unsafe { device.raw.as_ref().unwrap().map_buffer(&buffer, 0..size) }?; + let buffer = unsafe { device.raw().create_buffer(&stage_desc)? }; + let mapping = unsafe { device.raw().map_buffer(&buffer, 0..size) }?; let staging_buffer = StagingBuffer { raw: Mutex::new(Some(buffer)), @@ -381,7 +381,7 @@ impl Global { if let Err(flush_error) = unsafe { profiling::scope!("copy"); ptr::copy_nonoverlapping(data.as_ptr(), staging_buffer_ptr, data.len()); - staging_buffer.flush(device.raw.as_ref().unwrap()) + staging_buffer.flush(device.raw()) } { device .pending_writes @@ -459,7 +459,7 @@ impl Global { // user. Platform validation requires that the staging buffer always // be freed, even if an error occurs. All paths from here must call // `device.pending_writes.consume`. - if let Err(flush_error) = unsafe { staging_buffer.flush(device.raw.as_ref().unwrap()) } { + if let Err(flush_error) = unsafe { staging_buffer.flush(device.raw()) } { device .pending_writes .write() @@ -820,7 +820,7 @@ impl Global { } } - if let Err(e) = unsafe { staging_buffer.flush(device.raw.as_ref().unwrap()) } { + if let Err(e) = unsafe { staging_buffer.flush(device.raw()) } { pending_writes.consume(&device, Arc::new(staging_buffer)); return Err(e.into()); } @@ -1163,10 +1163,8 @@ impl Global { if let BufferMapState::Active { .. } = *buffer.map_state.lock() { log::warn!("Dropped buffer has a pending mapping."); - unsafe { - device.raw.as_ref().unwrap().unmap_buffer(raw_buf) - } - .map_err(DeviceError::from)?; + unsafe { device.raw().unmap_buffer(raw_buf) } + .map_err(DeviceError::from)?; } device.temp_suspected.lock().buffers.push(buffer.clone()); } else { @@ -1442,7 +1440,7 @@ impl Global { if let Some(pending_execution) = device.pending_writes.write().as_mut().unwrap().post_submit( device.command_allocator.lock().as_mut().unwrap(), - device.raw.as_ref().unwrap(), + device.raw(), device.queue.as_ref().unwrap(), ) { diff --git a/wgpu-core/src/device/resource.rs b/wgpu-core/src/device/resource.rs index 096105fece..49866b2ac6 100644 --- a/wgpu-core/src/device/resource.rs +++ b/wgpu-core/src/device/resource.rs @@ -56,21 +56,17 @@ use super::{ /// stored behind mutexes. /// /// TODO: establish clear order of locking for these: -/// `mem_allocator`, `desc_allocator`, `life_tracker`, `trackers`, -/// `render_passes`, `pending_writes`, `trace`. +/// `life_tracker`, `trackers`, `render_passes`, `pending_writes`, `trace`. /// /// Currently, the rules are: /// 1. `life_tracker` is locked after `hub.devices`, enforced by the type system /// 1. `self.trackers` is locked last (unenforced) /// 1. `self.trace` is locked last (unenforced) pub struct Device { - pub(crate) raw: Option, + raw: Option, pub(crate) adapter_id: id::Valid, pub(crate) queue: Option, pub(crate) zero_buffer: Option, - //pub(crate) cmd_allocator: command::CommandAllocator, - //mem_allocator: Mutex>, - //desc_allocator: Mutex>, //Note: The submission index here corresponds to the last submission that is done. pub(crate) info: ResourceInfo, @@ -131,6 +127,9 @@ pub enum CreateDeviceError { } impl Device { + pub(crate) fn raw(&self) -> &A::Device { + self.raw.as_ref().unwrap() + } pub(crate) fn require_features(&self, feature: wgt::Features) -> Result<(), MissingFeatures> { if self.features.contains(feature) { Ok(()) @@ -316,8 +315,7 @@ impl Device { last_done_index, self.command_allocator.lock().as_mut().unwrap(), ); - let mapping_closures = - life_tracker.handle_mapping(hub, self.raw.as_ref().unwrap(), &self.trackers); + let mapping_closures = life_tracker.handle_mapping(hub, self.raw(), &self.trackers); life_tracker.cleanup(); let closures = UserClosures { @@ -328,69 +326,56 @@ impl Device { } pub(crate) fn untrack(&self, trackers: &Tracker) { - self.temp_suspected.lock().clear(); + let mut temp_suspected = self.temp_suspected.lock(); + temp_suspected.clear(); // As the tracker is cleared/dropped, we need to consider all the resources // that it references for destruction in the next GC pass. { for resource in trackers.buffers.used_resources() { if resource.is_unique() { - self.temp_suspected.lock().buffers.push(resource.clone()); + temp_suspected.buffers.push(resource.clone()); } } for resource in trackers.textures.used_resources() { if resource.is_unique() { - self.temp_suspected.lock().textures.push(resource.clone()); + temp_suspected.textures.push(resource.clone()); } } for resource in trackers.views.used_resources() { if resource.is_unique() { - self.temp_suspected - .lock() - .texture_views - .push(resource.clone()); + temp_suspected.texture_views.push(resource.clone()); } } for resource in trackers.bind_groups.used_resources() { if resource.is_unique() { - self.temp_suspected - .lock() - .bind_groups - .push(resource.clone()); + temp_suspected.bind_groups.push(resource.clone()); } } for resource in trackers.samplers.used_resources() { if resource.is_unique() { - self.temp_suspected.lock().samplers.push(resource.clone()); + temp_suspected.samplers.push(resource.clone()); } } for resource in trackers.compute_pipelines.used_resources() { if resource.is_unique() { - self.temp_suspected - .lock() - .compute_pipelines - .push(resource.clone()); + temp_suspected.compute_pipelines.push(resource.clone()); } } for resource in trackers.render_pipelines.used_resources() { if resource.is_unique() { - self.temp_suspected - .lock() - .render_pipelines - .push(resource.clone()); + temp_suspected.render_pipelines.push(resource.clone()); } } for resource in trackers.query_sets.used_resources() { if resource.is_unique() { - self.temp_suspected.lock().query_sets.push(resource.clone()); + temp_suspected.query_sets.push(resource.clone()); } } } - self.lock_life() - .suspected_resources - .extend(&self.temp_suspected.lock()); + self.lock_life().suspected_resources.extend(&temp_suspected); - self.temp_suspected.lock().clear(); + temp_suspected.clear(); } pub(crate) fn create_buffer( @@ -478,8 +463,7 @@ impl Device { usage, memory_flags, }; - let buffer = unsafe { self.raw.as_ref().unwrap().create_buffer(&hal_desc) } - .map_err(DeviceError::from)?; + let buffer = unsafe { self.raw().create_buffer(&hal_desc) }.map_err(DeviceError::from)?; Ok(Buffer { raw: Some(buffer), @@ -1897,7 +1881,7 @@ impl Device { } let res_index = hal_samplers.len(); - hal_samplers.push(&sampler.raw); + hal_samplers.push(sampler.raw()); (res_index, 1) } _ => { @@ -1919,7 +1903,7 @@ impl Device { .samplers .add_single(&*sampler_guard, id) .ok_or(Error::InvalidSampler(id))?; - hal_samplers.push(&sampler.raw); + hal_samplers.push(sampler.raw()); } (res_index, num_bindings) @@ -1945,7 +1929,7 @@ impl Device { )?; let res_index = hal_textures.len(); hal_textures.push(hal::TextureBinding { - view: view.raw.as_ref().unwrap(), + view: view.raw(), usage: internal_use, }); (res_index, 1) @@ -1972,7 +1956,7 @@ impl Device { &mut used_texture_ranges, )?; hal_textures.push(hal::TextureBinding { - view: view.raw.as_ref().unwrap(), + view: view.raw(), usage: internal_use, }); } @@ -1996,16 +1980,12 @@ impl Device { return Err(Error::DuplicateBinding(a.binding)); } } - let samplers = hal_samplers - .iter() - .map(|&s| s.as_ref().unwrap()) - .collect::>(); let hal_desc = hal::BindGroupDescriptor { label: desc.label.borrow_option(), - layout: layout.raw.as_ref().unwrap(), + layout: layout.raw(), entries: &hal_entries, buffers: &hal_buffers, - samplers: samplers.as_ref(), + samplers: &hal_samplers, textures: &hal_textures, }; let raw = unsafe { @@ -2267,7 +2247,7 @@ impl Device { let bgl_vec = desc .bind_group_layouts .iter() - .map(|&id| bgl_guard.get(id).unwrap().raw.as_ref().unwrap()) + .map(|&id| bgl_guard.get(id).unwrap().raw()) .collect::>(); let hal_desc = hal::PipelineLayoutDescriptor { label: desc.label.borrow_option(), @@ -2425,10 +2405,10 @@ impl Device { let pipeline_desc = hal::ComputePipelineDescriptor { label: desc.label.borrow_option(), - layout: layout.raw.as_ref().unwrap(), + layout: layout.raw(), stage: hal::ProgrammableStage { entry_point: desc.stage.entry_point.as_ref(), - module: shader_module.raw.as_ref().unwrap(), + module: shader_module.raw(), }, }; @@ -2770,7 +2750,7 @@ impl Device { } hal::ProgrammableStage { - module: shader_module.raw.as_ref().unwrap(), + module: shader_module.raw(), entry_point: stage.entry_point.as_ref(), } }; @@ -2819,7 +2799,7 @@ impl Device { } Some(hal::ProgrammableStage { - module: shader_module.raw.as_ref().unwrap(), + module: shader_module.raw(), entry_point: fragment.stage.entry_point.as_ref(), }) } @@ -2901,7 +2881,7 @@ impl Device { let pipeline_desc = hal::RenderPipelineDescriptor { label: desc.label.borrow_option(), - layout: layout.raw.as_ref().unwrap(), + layout: layout.raw(), vertex_buffers: &vertex_buffers, vertex_stage, primitive: desc.primitive, @@ -3064,9 +3044,7 @@ impl Device { let hal_desc = desc.map_label(crate::LabelHelpers::borrow_option); Ok(QuerySet { raw: Some(unsafe { - self.raw - .as_ref() - .unwrap() + self.raw() .create_query_set(&hal_desc) .unwrap() }), diff --git a/wgpu-core/src/hub.rs b/wgpu-core/src/hub.rs index 7cfce75cad..a7a5e7bcba 100644 --- a/wgpu-core/src/hub.rs +++ b/wgpu-core/src/hub.rs @@ -301,7 +301,7 @@ impl Hub { let device = &devices[present.device_id]; let suf = A::get_surface(surface); unsafe { - suf.unwrap().raw.unconfigure(device.raw.as_ref().unwrap()); + suf.unwrap().raw.unconfigure(device.raw()); //TODO: we could destroy the surface here } } @@ -324,7 +324,7 @@ impl Hub { let device = self.devices.get(device_id.0).unwrap(); unsafe { use hal::Surface; - surface.raw.unconfigure(device.raw.as_ref().unwrap()); + surface.raw.unconfigure(device.raw()); } } diff --git a/wgpu-core/src/pipeline.rs b/wgpu-core/src/pipeline.rs index d89186fdbe..8a8b4a60bc 100644 --- a/wgpu-core/src/pipeline.rs +++ b/wgpu-core/src/pipeline.rs @@ -62,7 +62,7 @@ impl Drop for ShaderModule { } unsafe { use hal::Device; - self.device.raw.as_ref().unwrap().destroy_shader_module(raw); + self.device.raw().destroy_shader_module(raw); } } } @@ -83,6 +83,12 @@ impl Resource for ShaderModule { } } +impl ShaderModule { + pub(crate) fn raw(&self) -> &A::ShaderModule { + self.raw.as_ref().unwrap() + } +} + #[derive(Clone, Debug)] pub struct ShaderError { pub source: String, @@ -246,11 +252,7 @@ impl Drop for ComputePipeline { if let Some(raw) = self.raw.take() { unsafe { use hal::Device; - self.device - .raw - .as_ref() - .unwrap() - .destroy_compute_pipeline(raw); + self.device.raw().destroy_compute_pipeline(raw); } } } @@ -264,6 +266,12 @@ impl Resource for ComputePipeline { } } +impl ComputePipeline { + pub(crate) fn raw(&self) -> &A::ComputePipeline { + self.raw.as_ref().unwrap() + } +} + /// Describes how the vertex buffer is interpreted. #[derive(Clone, Debug)] #[cfg_attr(feature = "trace", derive(serde::Serialize))] @@ -471,11 +479,7 @@ impl Drop for RenderPipeline { if let Some(raw) = self.raw.take() { unsafe { use hal::Device; - self.device - .raw - .as_ref() - .unwrap() - .destroy_render_pipeline(raw); + self.device.raw().destroy_render_pipeline(raw); } } } @@ -488,3 +492,9 @@ impl Resource for RenderPipeline { &self.info } } + +impl RenderPipeline { + pub(crate) fn raw(&self) -> &A::RenderPipeline { + self.raw.as_ref().unwrap() + } +} diff --git a/wgpu-core/src/resource.rs b/wgpu-core/src/resource.rs index dcd677841e..0551fc5910 100644 --- a/wgpu-core/src/resource.rs +++ b/wgpu-core/src/resource.rs @@ -372,12 +372,18 @@ impl Drop for Buffer { if let Some(raw) = self.raw.take() { unsafe { use hal::Device; - self.device.raw.as_ref().unwrap().destroy_buffer(raw); + self.device.raw().destroy_buffer(raw); } } } } +impl Buffer { + pub(crate) fn raw(&self) -> &A::Buffer { + self.raw.as_ref().unwrap() + } +} + #[derive(Clone, Debug, Error)] #[non_exhaustive] pub enum CreateBufferError { @@ -439,7 +445,7 @@ impl Drop for StagingBuffer { if let Some(raw) = self.raw.lock().take() { unsafe { use hal::Device; - self.device.raw.as_ref().unwrap().destroy_buffer(raw); + self.device.raw().destroy_buffer(raw); } } } @@ -526,7 +532,7 @@ impl Drop for Texture { let inner = self.inner.take().unwrap(); if let TextureInner::Native { raw: Some(raw) } = inner { unsafe { - self.device.raw.as_ref().unwrap().destroy_texture(raw); + self.device.raw().destroy_texture(raw); } } } @@ -556,7 +562,7 @@ impl Texture { } else { mip_level * desc.size.depth_or_array_layers } + depth_or_layer; - clear_views[index as usize].raw.as_ref().unwrap() + clear_views[index as usize].raw() } } } @@ -611,7 +617,7 @@ impl Global { let hub = A::hub(self); let device = hub.devices.try_get(id).ok().flatten(); - let hal_device = device.as_ref().map(|device| device.raw.as_ref().unwrap()); + let hal_device = device.as_ref().map(|device| device.raw()); hal_device_callback(hal_device) } @@ -803,12 +809,18 @@ impl Drop for TextureView { if let Some(raw) = self.raw.take() { unsafe { use hal::Device; - self.device.raw.as_ref().unwrap().destroy_texture_view(raw); + self.device.raw().destroy_texture_view(raw); } } } } +impl TextureView { + pub(crate) fn raw(&self) -> &A::TextureView { + self.raw.as_ref().unwrap() + } +} + #[derive(Clone, Debug, Error)] #[non_exhaustive] pub enum CreateTextureViewError { @@ -915,12 +927,18 @@ impl Drop for Sampler { if let Some(raw) = self.raw.take() { unsafe { use hal::Device; - self.device.raw.as_ref().unwrap().destroy_sampler(raw); + self.device.raw().destroy_sampler(raw); } } } } +impl Sampler { + pub(crate) fn raw(&self) -> &A::Sampler { + self.raw.as_ref().unwrap() + } +} + #[derive(Copy, Clone)] pub enum SamplerFilterErrorType { MagFilter, @@ -1002,7 +1020,7 @@ impl Drop for QuerySet { if let Some(raw) = self.raw.take() { unsafe { use hal::Device; - self.device.raw.as_ref().unwrap().destroy_query_set(raw); + self.device.raw().destroy_query_set(raw); } } } @@ -1016,6 +1034,12 @@ impl Resource for QuerySet { } } +impl QuerySet { + pub(crate) fn raw(&self) -> &A::QuerySet { + self.raw.as_ref().unwrap() + } +} + #[derive(Clone, Debug, Error)] #[non_exhaustive] pub enum DestroyError { diff --git a/wgpu-hal/LICENSE.APACHE b/wgpu-hal/LICENSE.APACHE new file mode 100644 index 0000000000..7141cad5b2 --- /dev/null +++ b/wgpu-hal/LICENSE.APACHE @@ -0,0 +1 @@ +../LICENSE.APACHE \ No newline at end of file diff --git a/wgpu-hal/LICENSE.MIT b/wgpu-hal/LICENSE.MIT new file mode 100644 index 0000000000..6b8772d1a7 --- /dev/null +++ b/wgpu-hal/LICENSE.MIT @@ -0,0 +1 @@ +../LICENSE.MIT \ No newline at end of file diff --git a/wgpu-types/LICENSE.APACHE b/wgpu-types/LICENSE.APACHE new file mode 100644 index 0000000000..7141cad5b2 --- /dev/null +++ b/wgpu-types/LICENSE.APACHE @@ -0,0 +1 @@ +../LICENSE.APACHE \ No newline at end of file diff --git a/wgpu-types/LICENSE.MIT b/wgpu-types/LICENSE.MIT new file mode 100644 index 0000000000..6b8772d1a7 --- /dev/null +++ b/wgpu-types/LICENSE.MIT @@ -0,0 +1 @@ +../LICENSE.MIT \ No newline at end of file diff --git a/wgpu/LICENSE.APACHE b/wgpu/LICENSE.APACHE new file mode 100644 index 0000000000..7141cad5b2 --- /dev/null +++ b/wgpu/LICENSE.APACHE @@ -0,0 +1 @@ +../LICENSE.APACHE \ No newline at end of file diff --git a/wgpu/LICENSE.MIT b/wgpu/LICENSE.MIT new file mode 100644 index 0000000000..6b8772d1a7 --- /dev/null +++ b/wgpu/LICENSE.MIT @@ -0,0 +1 @@ +../LICENSE.MIT \ No newline at end of file From a0af81ae50b62e4193c7048c1cfae74b4b1d5e91 Mon Sep 17 00:00:00 2001 From: Mauro Gentile <62186646+gents83@users.noreply.github.com> Date: Sat, 20 May 2023 10:18:51 +0200 Subject: [PATCH 042/132] Fixing format --- wgpu-core/src/command/bundle.rs | 3 +-- wgpu-core/src/command/mod.rs | 4 +--- wgpu-core/src/device/resource.rs | 6 +----- 3 files changed, 3 insertions(+), 10 deletions(-) diff --git a/wgpu-core/src/command/bundle.rs b/wgpu-core/src/command/bundle.rs index 5da02b8192..6dcfa0d758 100644 --- a/wgpu-core/src/command/bundle.rs +++ b/wgpu-core/src/command/bundle.rs @@ -785,8 +785,7 @@ impl RenderBundle { let bind_group = bind_group_guard.get(bind_group_id).unwrap(); unsafe { raw.set_bind_group( - pipeline_layout_guard[pipeline_layout_id.unwrap()] - .raw(), + pipeline_layout_guard[pipeline_layout_id.unwrap()].raw(), index, bind_group.raw(), &offsets[..num_dynamic_offsets as usize], diff --git a/wgpu-core/src/command/mod.rs b/wgpu-core/src/command/mod.rs index eeb3084bff..8247de0ffe 100644 --- a/wgpu-core/src/command/mod.rs +++ b/wgpu-core/src/command/mod.rs @@ -162,9 +162,7 @@ impl Drop for CommandBuffer { } unsafe { use hal::Device; - self.device - .raw() - .destroy_command_encoder(baked.encoder); + self.device.raw().destroy_command_encoder(baked.encoder); } } } diff --git a/wgpu-core/src/device/resource.rs b/wgpu-core/src/device/resource.rs index 49866b2ac6..4f6c07c4c4 100644 --- a/wgpu-core/src/device/resource.rs +++ b/wgpu-core/src/device/resource.rs @@ -3043,11 +3043,7 @@ impl Device { let hal_desc = desc.map_label(crate::LabelHelpers::borrow_option); Ok(QuerySet { - raw: Some(unsafe { - self.raw() - .create_query_set(&hal_desc) - .unwrap() - }), + raw: Some(unsafe { self.raw().create_query_set(&hal_desc).unwrap() }), device: self.clone(), info: ResourceInfo::new(""), desc: desc.map_label(|_| ()), From 106b51bcfe221e32d61b7e4229fac4e8b5e5c733 Mon Sep 17 00:00:00 2001 From: Mauro Gentile <62186646+gents83@users.noreply.github.com> Date: Tue, 23 May 2023 21:29:30 +0200 Subject: [PATCH 043/132] Fixing wrong merge of create_render_bundle_error --- wgpu-core/src/device/global.rs | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/wgpu-core/src/device/global.rs b/wgpu-core/src/device/global.rs index 3d18d4822c..b9c4ad7322 100644 --- a/wgpu-core/src/device/global.rs +++ b/wgpu-core/src/device/global.rs @@ -282,6 +282,17 @@ impl Global { fid.assign_error(label.borrow_or_default()); } + pub fn create_render_bundle_error( + &self, + id_in: Input, + label: Label, + ) { + let hub = A::hub(self); + let fid = hub.render_bundles.prepare(id_in); + + fid.assign_error(label.borrow_or_default()); + } + /// Assign `id_in` an error with the given `label`. /// /// See `create_buffer_error` for more context and explaination. From 07c14e60258395adea37dbe8d8afe6d3656e6293 Mon Sep 17 00:00:00 2001 From: Jim Blandy Date: Mon, 22 May 2023 13:23:06 -0700 Subject: [PATCH 044/132] Use symlinks for wgpu{,-core,-hal,-types}/LICENSE.{APACHE,MIT}. --- wgpu-core/LICENSE.APACHE | 0 wgpu-core/LICENSE.MIT | 0 wgpu-hal/LICENSE.APACHE | 0 wgpu-hal/LICENSE.MIT | 0 wgpu-types/LICENSE.APACHE | 0 wgpu-types/LICENSE.MIT | 0 wgpu/LICENSE.APACHE | 0 wgpu/LICENSE.MIT | 0 8 files changed, 0 insertions(+), 0 deletions(-) mode change 100644 => 120000 wgpu-core/LICENSE.APACHE mode change 100644 => 120000 wgpu-core/LICENSE.MIT mode change 100644 => 120000 wgpu-hal/LICENSE.APACHE mode change 100644 => 120000 wgpu-hal/LICENSE.MIT mode change 100644 => 120000 wgpu-types/LICENSE.APACHE mode change 100644 => 120000 wgpu-types/LICENSE.MIT mode change 100644 => 120000 wgpu/LICENSE.APACHE mode change 100644 => 120000 wgpu/LICENSE.MIT diff --git a/wgpu-core/LICENSE.APACHE b/wgpu-core/LICENSE.APACHE deleted file mode 100644 index 7141cad5b2..0000000000 --- a/wgpu-core/LICENSE.APACHE +++ /dev/null @@ -1 +0,0 @@ -../LICENSE.APACHE \ No newline at end of file diff --git a/wgpu-core/LICENSE.APACHE b/wgpu-core/LICENSE.APACHE new file mode 120000 index 0000000000..7141cad5b2 --- /dev/null +++ b/wgpu-core/LICENSE.APACHE @@ -0,0 +1 @@ +../LICENSE.APACHE \ No newline at end of file diff --git a/wgpu-core/LICENSE.MIT b/wgpu-core/LICENSE.MIT deleted file mode 100644 index 6b8772d1a7..0000000000 --- a/wgpu-core/LICENSE.MIT +++ /dev/null @@ -1 +0,0 @@ -../LICENSE.MIT \ No newline at end of file diff --git a/wgpu-core/LICENSE.MIT b/wgpu-core/LICENSE.MIT new file mode 120000 index 0000000000..6b8772d1a7 --- /dev/null +++ b/wgpu-core/LICENSE.MIT @@ -0,0 +1 @@ +../LICENSE.MIT \ No newline at end of file diff --git a/wgpu-hal/LICENSE.APACHE b/wgpu-hal/LICENSE.APACHE deleted file mode 100644 index 7141cad5b2..0000000000 --- a/wgpu-hal/LICENSE.APACHE +++ /dev/null @@ -1 +0,0 @@ -../LICENSE.APACHE \ No newline at end of file diff --git a/wgpu-hal/LICENSE.APACHE b/wgpu-hal/LICENSE.APACHE new file mode 120000 index 0000000000..7141cad5b2 --- /dev/null +++ b/wgpu-hal/LICENSE.APACHE @@ -0,0 +1 @@ +../LICENSE.APACHE \ No newline at end of file diff --git a/wgpu-hal/LICENSE.MIT b/wgpu-hal/LICENSE.MIT deleted file mode 100644 index 6b8772d1a7..0000000000 --- a/wgpu-hal/LICENSE.MIT +++ /dev/null @@ -1 +0,0 @@ -../LICENSE.MIT \ No newline at end of file diff --git a/wgpu-hal/LICENSE.MIT b/wgpu-hal/LICENSE.MIT new file mode 120000 index 0000000000..6b8772d1a7 --- /dev/null +++ b/wgpu-hal/LICENSE.MIT @@ -0,0 +1 @@ +../LICENSE.MIT \ No newline at end of file diff --git a/wgpu-types/LICENSE.APACHE b/wgpu-types/LICENSE.APACHE deleted file mode 100644 index 7141cad5b2..0000000000 --- a/wgpu-types/LICENSE.APACHE +++ /dev/null @@ -1 +0,0 @@ -../LICENSE.APACHE \ No newline at end of file diff --git a/wgpu-types/LICENSE.APACHE b/wgpu-types/LICENSE.APACHE new file mode 120000 index 0000000000..7141cad5b2 --- /dev/null +++ b/wgpu-types/LICENSE.APACHE @@ -0,0 +1 @@ +../LICENSE.APACHE \ No newline at end of file diff --git a/wgpu-types/LICENSE.MIT b/wgpu-types/LICENSE.MIT deleted file mode 100644 index 6b8772d1a7..0000000000 --- a/wgpu-types/LICENSE.MIT +++ /dev/null @@ -1 +0,0 @@ -../LICENSE.MIT \ No newline at end of file diff --git a/wgpu-types/LICENSE.MIT b/wgpu-types/LICENSE.MIT new file mode 120000 index 0000000000..6b8772d1a7 --- /dev/null +++ b/wgpu-types/LICENSE.MIT @@ -0,0 +1 @@ +../LICENSE.MIT \ No newline at end of file diff --git a/wgpu/LICENSE.APACHE b/wgpu/LICENSE.APACHE deleted file mode 100644 index 7141cad5b2..0000000000 --- a/wgpu/LICENSE.APACHE +++ /dev/null @@ -1 +0,0 @@ -../LICENSE.APACHE \ No newline at end of file diff --git a/wgpu/LICENSE.APACHE b/wgpu/LICENSE.APACHE new file mode 120000 index 0000000000..7141cad5b2 --- /dev/null +++ b/wgpu/LICENSE.APACHE @@ -0,0 +1 @@ +../LICENSE.APACHE \ No newline at end of file diff --git a/wgpu/LICENSE.MIT b/wgpu/LICENSE.MIT deleted file mode 100644 index 6b8772d1a7..0000000000 --- a/wgpu/LICENSE.MIT +++ /dev/null @@ -1 +0,0 @@ -../LICENSE.MIT \ No newline at end of file diff --git a/wgpu/LICENSE.MIT b/wgpu/LICENSE.MIT new file mode 120000 index 0000000000..6b8772d1a7 --- /dev/null +++ b/wgpu/LICENSE.MIT @@ -0,0 +1 @@ +../LICENSE.MIT \ No newline at end of file From a07a1b37d62e3f99390a9c637bf6b7ae4c6f0d97 Mon Sep 17 00:00:00 2001 From: Mauro Gentile <62186646+gents83@users.noreply.github.com> Date: Sat, 27 May 2023 10:52:23 +0200 Subject: [PATCH 045/132] Remove ResourceMetadataProvider::Resource and logs --- player/src/lib.rs | 4 +- wgpu-core/src/binding_model.rs | 18 ++- wgpu-core/src/command/bundle.rs | 6 +- wgpu-core/src/command/mod.rs | 6 +- wgpu-core/src/device/global.rs | 54 ++++----- wgpu-core/src/device/life.rs | 26 ++--- wgpu-core/src/device/resource.rs | 8 +- wgpu-core/src/global.rs | 6 +- wgpu-core/src/instance.rs | 14 ++- wgpu-core/src/pipeline.rs | 18 ++- wgpu-core/src/present.rs | 2 +- wgpu-core/src/registry.rs | 12 +- wgpu-core/src/resource.rs | 54 ++++++--- wgpu-core/src/storage.rs | 27 ++--- wgpu-core/src/track/buffer.rs | 58 +++------- wgpu-core/src/track/metadata.rs | 53 ++------- wgpu-core/src/track/stateless.rs | 17 ++- wgpu-core/src/track/texture.rs | 180 ++++++++++------------------- wgpu-hal/src/auxil/dxgi/factory.rs | 6 +- 19 files changed, 253 insertions(+), 316 deletions(-) diff --git a/player/src/lib.rs b/player/src/lib.rs index dfd8dd7fe5..8c522fa6b2 100644 --- a/player/src/lib.rs +++ b/player/src/lib.rs @@ -156,7 +156,7 @@ impl GlobalPlay for wgc::global::Global { comb_manager: &mut wgc::identity::IdentityManager, ) { use wgc::device::trace::Action; - log::info!("action {:?}", action); + log::debug!("action {:?}", action); //TODO: find a way to force ID perishing without excessive `maintain()` calls. match action { Action::Init { .. } => { @@ -254,7 +254,7 @@ impl GlobalPlay for wgc::global::Global { self.bind_group_drop::(id); } Action::CreateShaderModule { id, desc, data } => { - log::info!("Creating shader from {}", data); + log::debug!("Creating shader from {}", data); let code = fs::read_to_string(dir.join(&data)).unwrap(); let source = if data.ends_with(".wgsl") { wgc::pipeline::ShaderModuleSource::Wgsl(Cow::Owned(code)) diff --git a/wgpu-core/src/binding_model.rs b/wgpu-core/src/binding_model.rs index 934f4c5f9e..033d4d78e0 100644 --- a/wgpu-core/src/binding_model.rs +++ b/wgpu-core/src/binding_model.rs @@ -473,10 +473,14 @@ impl Drop for BindGroupLayout { impl Resource for BindGroupLayout { const TYPE: &'static str = "BindGroupLayout"; - fn info(&self) -> &ResourceInfo { + fn as_info(&self) -> &ResourceInfo { &self.info } + fn as_info_mut(&mut self) -> &mut ResourceInfo { + &mut self.info + } + fn label(&self) -> String { #[cfg(debug_assertions)] return self.label.clone(); @@ -694,9 +698,13 @@ impl PipelineLayout { impl Resource for PipelineLayout { const TYPE: &'static str = "PipelineLayout"; - fn info(&self) -> &ResourceInfo { + fn as_info(&self) -> &ResourceInfo { &self.info } + + fn as_info_mut(&mut self) -> &mut ResourceInfo { + &mut self.info + } } #[repr(C)] @@ -881,9 +889,13 @@ impl BindGroup { impl Resource for BindGroup { const TYPE: &'static str = "BindGroup"; - fn info(&self) -> &ResourceInfo { + fn as_info(&self) -> &ResourceInfo { &self.info } + + fn as_info_mut(&mut self) -> &mut ResourceInfo { + &mut self.info + } } #[derive(Clone, Debug, Error)] diff --git a/wgpu-core/src/command/bundle.rs b/wgpu-core/src/command/bundle.rs index 6dcfa0d758..c60d1df80c 100644 --- a/wgpu-core/src/command/bundle.rs +++ b/wgpu-core/src/command/bundle.rs @@ -963,9 +963,13 @@ impl RenderBundle { impl Resource for RenderBundle { const TYPE: &'static str = "RenderBundle"; - fn info(&self) -> &ResourceInfo { + fn as_info(&self) -> &ResourceInfo { &self.info } + + fn as_info_mut(&mut self) -> &mut ResourceInfo { + &mut self.info + } } /// A render bundle's current index buffer state. diff --git a/wgpu-core/src/command/mod.rs b/wgpu-core/src/command/mod.rs index 8247de0ffe..51b2845d2e 100644 --- a/wgpu-core/src/command/mod.rs +++ b/wgpu-core/src/command/mod.rs @@ -315,10 +315,14 @@ impl CommandBuffer { impl Resource for CommandBuffer { const TYPE: &'static str = "CommandBuffer"; - fn info(&self) -> &ResourceInfo { + fn as_info(&self) -> &ResourceInfo { &self.info } + fn as_info_mut(&mut self) -> &mut ResourceInfo { + &mut self.info + } + fn label(&self) -> String { let str = match self.data.lock().as_ref().unwrap().encoder.label.as_ref() { Some(label) => label.clone(), diff --git a/wgpu-core/src/device/global.rs b/wgpu-core/src/device/global.rs index b9c4ad7322..93d0d8cc74 100644 --- a/wgpu-core/src/device/global.rs +++ b/wgpu-core/src/device/global.rs @@ -442,7 +442,7 @@ impl Global { //TODO: lock pending writes separately, keep the device read-only - log::debug!("Buffer {:?} is destroyed", buffer_id); + log::debug!("Buffer {:?} is asked to be dropped", buffer_id); let buffer = hub .buffers .get(buffer_id) @@ -492,7 +492,7 @@ impl Global { pub fn buffer_drop(&self, buffer_id: id::BufferId, wait: bool) { profiling::scope!("Buffer::drop"); - log::debug!("buffer {:?} is asked to be dropped", buffer_id); + log::debug!("Buffer {:?} is asked to be dropped", buffer_id); let hub = A::hub(self); @@ -577,7 +577,7 @@ impl Global { Err(error) => break error, }; let (id, resource) = fid.assign(texture); - log::info!("Created texture {:?} with {:?}", id, desc); + log::info!("Created Texture {:?} with {:?}", id, desc); if let TextureClearMode::RenderPass { ref mut clear_views, @@ -615,7 +615,7 @@ impl Global { hub.texture_views.prepare(idtv_in.clone().unwrap()) }; let (tv_id, texture_view) = fid_tv.assign(texture_view); - log::info!("Created texture view {:?} for texture {:?}", tv_id, id); + log::info!("Created TextureView {:?} for texture {:?}", tv_id, id); clear_views.push(texture_view.clone()); @@ -699,7 +699,7 @@ impl Global { RwLock::new(TextureInitTracker::new(desc.mip_level_count, 0)); let (id, resource) = fid.assign(texture); - log::info!("Created texture {:?} with {:?}", id, desc); + log::info!("Created Texture {:?} with {:?}", id, desc); device.trackers.lock().textures.insert_single( id.0, @@ -777,7 +777,7 @@ impl Global { pub fn texture_drop(&self, texture_id: id::TextureId, wait: bool) { profiling::scope!("Texture::drop"); - log::debug!("texture {:?} is asked to be dropped", texture_id); + log::debug!("Texture {:?} is asked to be dropped", texture_id); let hub = A::hub(self); @@ -801,7 +801,7 @@ impl Global { let mut life_lock = device.lock_life(); if device .pending_writes - .write() + .read() .as_ref() .unwrap() .dst_textures @@ -872,7 +872,7 @@ impl Global { wait: bool, ) -> Result<(), resource::TextureViewDestroyError> { profiling::scope!("TextureView::drop"); - log::debug!("texture view {:?} is asked to be dropped", texture_view_id); + log::debug!("TextureView {:?} is asked to be dropped", texture_view_id); let hub = A::hub(self); @@ -953,7 +953,7 @@ impl Global { pub fn sampler_drop(&self, sampler_id: id::SamplerId) { profiling::scope!("Sampler::drop"); - log::debug!("sampler {:?} is asked to be dropped", sampler_id); + log::debug!("Sampler {:?} is asked to be dropped", sampler_id); let hub = A::hub(self); @@ -1050,7 +1050,7 @@ impl Global { pub fn bind_group_layout_drop(&self, bind_group_layout_id: id::BindGroupLayoutId) { profiling::scope!("BindGroupLayout::drop"); log::debug!( - "bind group layout {:?} is asked to be dropped", + "BindGroupLayout {:?} is asked to be dropped", bind_group_layout_id ); @@ -1124,7 +1124,7 @@ impl Global { pub fn pipeline_layout_drop(&self, pipeline_layout_id: id::PipelineLayoutId) { profiling::scope!("PipelineLayout::drop"); log::debug!( - "pipeline layout {:?} is asked to be dropped", + "PipelineLayout {:?} is asked to be dropped", pipeline_layout_id ); @@ -1180,7 +1180,7 @@ impl Global { Err(e) => break e, }; let (id, resource) = fid.assign(bind_group); - log::debug!("Created BindGroup {:?}", id,); + log::info!("Created BindGroup {:?}", id,); device .trackers @@ -1200,7 +1200,7 @@ impl Global { pub fn bind_group_drop(&self, bind_group_id: id::BindGroupId) { profiling::scope!("BindGroup::drop"); - log::debug!("bind group {:?} is asked to be dropped", bind_group_id); + log::debug!("BindGroup {:?} is asked to be dropped", bind_group_id); let hub = A::hub(self); @@ -1338,10 +1338,7 @@ impl Global { pub fn shader_module_drop(&self, shader_module_id: id::ShaderModuleId) { profiling::scope!("ShaderModule::drop"); - log::debug!( - "shader module {:?} is asked to be dropped", - shader_module_id - ); + log::debug!("ShaderModule {:?} is asked to be dropped", shader_module_id); let hub = A::hub(self); hub.shader_modules.unregister(shader_module_id); @@ -1397,7 +1394,7 @@ impl Global { pub fn command_encoder_drop(&self, command_encoder_id: id::CommandEncoderId) { profiling::scope!("CommandEncoder::drop"); log::debug!( - "command encoder {:?} is asked to be dropped", + "CommandEncoder {:?} is asked to be dropped", command_encoder_id ); @@ -1412,7 +1409,7 @@ impl Global { pub fn command_buffer_drop(&self, command_buffer_id: id::CommandBufferId) { profiling::scope!("CommandBuffer::drop"); log::debug!( - "command buffer {:?} is asked to be dropped", + "CommandBuffer {:?} is asked to be dropped", command_buffer_id ); self.command_encoder_drop::(command_buffer_id) @@ -1486,10 +1483,7 @@ impl Global { pub fn render_bundle_drop(&self, render_bundle_id: id::RenderBundleId) { profiling::scope!("RenderBundle::drop"); - log::debug!( - "render bundle {:?} is asked to be dropped", - render_bundle_id - ); + log::debug!("RenderBundle {:?} is asked to be dropped", render_bundle_id); let hub = A::hub(self); let bundle = { @@ -1559,7 +1553,7 @@ impl Global { pub fn query_set_drop(&self, query_set_id: id::QuerySetId) { profiling::scope!("QuerySet::drop"); - log::debug!("query set {:?} is asked to be dropped", query_set_id); + log::debug!("QuerySet {:?} is asked to be dropped", query_set_id); let hub = A::hub(self); let query_set_guard = hub.query_sets.read(); @@ -1685,7 +1679,7 @@ impl Global { pub fn render_pipeline_drop(&self, render_pipeline_id: id::RenderPipelineId) { profiling::scope!("RenderPipeline::drop"); log::debug!( - "render pipeline {:?} is asked to be dropped", + "RenderPipeline {:?} is asked to be dropped", render_pipeline_id ); let hub = A::hub(self); @@ -1808,7 +1802,7 @@ impl Global { pub fn compute_pipeline_drop(&self, compute_pipeline_id: id::ComputePipelineId) { profiling::scope!("ComputePipeline::drop"); log::debug!( - "compute pipeline {:?} is asked to be dropped", + "ComputePipeline {:?} is asked to be dropped", compute_pipeline_id ); let hub = A::hub(self); @@ -1894,7 +1888,7 @@ impl Global { unreachable!("Fallback system failed to choose present mode. This is a bug. Mode: {:?}, Options: {:?}", config.present_mode, &caps.present_modes); }; - log::info!( + log::warn!( "Automatically choosing presentation mode by rule {:?}. Chose {new_mode:?}", config.present_mode ); @@ -1938,7 +1932,7 @@ impl Global { ); }; - log::info!( + log::warn!( "Automatically choosing alpha mode by rule {:?}. Chose {new_alpha_mode:?}", config.composite_alpha_mode ); @@ -1953,7 +1947,7 @@ impl Global { Ok(()) } - log::info!("configuring surface with {:?}", config); + log::debug!("configuring surface with {:?}", config); let hub = A::hub(self); let surface_guard = self.surfaces.read(); @@ -2227,7 +2221,7 @@ impl Global { pub fn device_drop(&self, device_id: DeviceId) { profiling::scope!("Device::drop"); - log::debug!("device {:?} is asked to be dropped", device_id); + log::debug!("Device {:?} is asked to be dropped", device_id); } /// Exit the unreferenced, inactive device `device_id`. diff --git a/wgpu-core/src/device/life.rs b/wgpu-core/src/device/life.rs index b4798e3bed..d8d9d84d28 100644 --- a/wgpu-core/src/device/life.rs +++ b/wgpu-core/src/device/life.rs @@ -513,7 +513,7 @@ impl LifetimeTracker { while let Some(bundle) = self.suspected_resources.render_bundles.pop() { let id = bundle.info.id(); if trackers.bundles.remove_abandoned(id) { - log::info!("Bundle {:?} is removed from trackers", id); + log::info!("Bundle {:?} is removed from registry", id); #[cfg(feature = "trace")] if let Some(ref mut t) = trace { t.add(trace::Action::DestroyRenderBundle(id.0)); @@ -532,7 +532,7 @@ impl LifetimeTracker { while let Some(resource) = self.suspected_resources.bind_groups.pop() { let id = resource.info.id(); if trackers.bind_groups.remove_abandoned(id) { - log::info!("BindGroup {:?} is removed from trackers", id); + log::info!("BindGroup {:?} is removed from registry", id); #[cfg(feature = "trace")] if let Some(ref mut t) = trace { t.add(trace::Action::DestroyBindGroup(id.0)); @@ -565,7 +565,7 @@ impl LifetimeTracker { for texture_view in list.drain(..) { let id = texture_view.info.id(); if trackers.views.remove_abandoned(id) { - log::info!("TextureView {:?} is removed from trackers", id); + log::info!("TextureView {:?} is removed from registry", id); #[cfg(feature = "trace")] if let Some(ref mut t) = trace { t.add(trace::Action::DestroyTextureView(id.0)); @@ -596,7 +596,7 @@ impl LifetimeTracker { for texture in self.suspected_resources.textures.drain(..) { let id = texture.info.id(); if trackers.textures.remove_abandoned(id) { - log::info!("Texture {:?} is removed from trackers", id); + log::info!("Texture {:?} is removed from registry", id); #[cfg(feature = "trace")] if let Some(ref mut t) = trace { t.add(trace::Action::DestroyTexture(id.0)); @@ -630,7 +630,7 @@ impl LifetimeTracker { for sampler in self.suspected_resources.samplers.drain(..) { let id = sampler.info.id(); if trackers.samplers.remove_abandoned(id) { - log::info!("Sampler {:?} is removed from trackers", id); + log::info!("Sampler {:?} is removed from registry", id); #[cfg(feature = "trace")] if let Some(ref mut t) = trace { t.add(trace::Action::DestroySampler(id.0)); @@ -655,7 +655,7 @@ impl LifetimeTracker { for buffer in self.suspected_resources.buffers.drain(..) { let id = buffer.info.id(); if trackers.buffers.remove_abandoned(id) { - log::info!("Buffer {:?} is removed from trackers", id); + log::info!("Buffer {:?} is removed from registry", id); #[cfg(feature = "trace")] if let Some(ref mut t) = trace { t.add(trace::Action::DestroyBuffer(id.0)); @@ -686,7 +686,7 @@ impl LifetimeTracker { for compute_pipeline in self.suspected_resources.compute_pipelines.drain(..) { let id = compute_pipeline.info.id(); if trackers.compute_pipelines.remove_abandoned(id) { - log::info!("ComputePipeline {:?} is removed from trackers", id); + log::info!("ComputePipeline {:?} is removed from registry", id); #[cfg(feature = "trace")] if let Some(ref mut t) = trace { t.add(trace::Action::DestroyComputePipeline(id.0)); @@ -711,7 +711,7 @@ impl LifetimeTracker { for render_pipeline in self.suspected_resources.render_pipelines.drain(..) { let id = render_pipeline.info.id(); if trackers.render_pipelines.remove_abandoned(id) { - log::info!("RenderPipeline {:?} is removed from trackers", id); + log::info!("RenderPipeline {:?} is removed from registry", id); #[cfg(feature = "trace")] if let Some(ref mut t) = trace { t.add(trace::Action::DestroyRenderPipeline(id.0)); @@ -737,7 +737,7 @@ impl LifetimeTracker { let id = pipeline_layout.info.id(); //Note: this has to happen after all the suspected pipelines are destroyed if pipeline_layouts_locked.is_unique(id.0).unwrap() { - log::debug!("Pipeline layout {:?} will be destroyed", id); + log::debug!("PipelineLayout {:?} will be removed from registry", id); #[cfg(feature = "trace")] if let Some(ref mut t) = trace { t.add(trace::Action::DestroyPipelineLayout(id.0)); @@ -761,13 +761,13 @@ impl LifetimeTracker { let mut bind_group_layouts_locked = hub.bind_group_layouts.write(); for bgl in self.suspected_resources.bind_group_layouts.drain(..) { - let id = bgl.info().id(); + let id = bgl.as_info().id(); //Note: this has to happen after all the suspected pipelines are destroyed //Note: nothing else can bump the refcount since the guard is locked exclusively //Note: same BGL can appear multiple times in the list, but only the last // encounter could drop the refcount to 0. if bind_group_layouts_locked.is_unique(id.0).unwrap() { - log::debug!("Bind group layout {:?} will be destroyed", id); + log::debug!("BindGroupLayout {:?} will be removed from registry", id); #[cfg(feature = "trace")] if let Some(ref mut t) = trace { t.add(trace::Action::DestroyBindGroupLayout(id.0)); @@ -788,7 +788,7 @@ impl LifetimeTracker { for query_set in self.suspected_resources.query_sets.drain(..) { let id = query_set.info.id(); if trackers.query_sets.remove_abandoned(id) { - log::info!("QuerySet {:?} is removed from trackers", id); + log::info!("QuerySet {:?} is removed from registry", id); // #[cfg(feature = "trace")] // trace.map(|t| t.add(trace::Action::DestroyComputePipeline(id.0))); if let Some(res) = hub.query_sets.unregister(id.0) { @@ -853,7 +853,7 @@ impl LifetimeTracker { let buffer_id = buffer.info.id(); if trackers.buffers.remove_abandoned(buffer_id) { *buffer.map_state.lock() = resource::BufferMapState::Idle; - log::info!("Buffer {:?} is removed from trackers", buffer_id); + log::info!("Buffer {:?} is removed from registry", buffer_id); if let Some(buf) = hub.buffers.unregister(buffer_id.0) { self.free_resources.buffers.push(buf); } diff --git a/wgpu-core/src/device/resource.rs b/wgpu-core/src/device/resource.rs index 4f6c07c4c4..f949802e95 100644 --- a/wgpu-core/src/device/resource.rs +++ b/wgpu-core/src/device/resource.rs @@ -2823,7 +2823,7 @@ impl Device { )?; } _ => { - log::info!( + log::warn!( "The fragment stage {:?} output @location({}) values are ignored", fragment_stage .as_ref() @@ -3095,7 +3095,11 @@ impl Device { impl Resource for Device { const TYPE: &'static str = "Device"; - fn info(&self) -> &ResourceInfo { + fn as_info(&self) -> &ResourceInfo { &self.info } + + fn as_info_mut(&mut self) -> &mut ResourceInfo { + &mut self.info + } } diff --git a/wgpu-core/src/global.rs b/wgpu-core/src/global.rs index bfd77b6d6e..be7ed1bde3 100644 --- a/wgpu-core/src/global.rs +++ b/wgpu-core/src/global.rs @@ -36,7 +36,7 @@ impl Global { profiling::scope!("Global::new"); Self { instance: Instance::new(name, instance_desc), - surfaces: Registry::without_backend(&factory, "Surface"), + surfaces: Registry::without_backend(&factory), hubs: Hubs::new(&factory), } } @@ -52,7 +52,7 @@ impl Global { profiling::scope!("Global::new"); Self { instance: A::create_instance_from_hal(name, hal_instance), - surfaces: Registry::without_backend(&factory, "Surface"), + surfaces: Registry::without_backend(&factory), hubs: Hubs::new(&factory), } } @@ -71,7 +71,7 @@ impl Global { profiling::scope!("Global::new"); Self { instance, - surfaces: Registry::without_backend(&factory, "Surface"), + surfaces: Registry::without_backend(&factory), hubs: Hubs::new(&factory), } } diff --git a/wgpu-core/src/instance.rs b/wgpu-core/src/instance.rs index 20313798a7..5cda62748f 100644 --- a/wgpu-core/src/instance.rs +++ b/wgpu-core/src/instance.rs @@ -154,10 +154,14 @@ pub struct Surface { impl Resource for Surface { const TYPE: &'static str = "Surface"; - fn info(&self) -> &ResourceInfo { + fn as_info(&self) -> &ResourceInfo { &self.info } + fn as_info_mut(&mut self) -> &mut ResourceInfo { + &mut self.info + } + fn label(&self) -> String { String::from("") } @@ -330,7 +334,7 @@ impl Adapter { missing_flags, DOWNLEVEL_WARNING_MESSAGE ); - log::info!("{:#?}", caps.downlevel); + log::warn!("{:#?}", caps.downlevel); } // Verify feature preconditions @@ -367,9 +371,13 @@ impl Adapter { impl Resource for Adapter { const TYPE: &'static str = "Adapter"; - fn info(&self) -> &ResourceInfo { + fn as_info(&self) -> &ResourceInfo { &self.info } + + fn as_info_mut(&mut self) -> &mut ResourceInfo { + &mut self.info + } } #[derive(Clone, Debug, Error)] diff --git a/wgpu-core/src/pipeline.rs b/wgpu-core/src/pipeline.rs index 8a8b4a60bc..d351f8d9ab 100644 --- a/wgpu-core/src/pipeline.rs +++ b/wgpu-core/src/pipeline.rs @@ -71,10 +71,14 @@ impl Drop for ShaderModule { impl Resource for ShaderModule { const TYPE: &'static str = "ShaderModule"; - fn info(&self) -> &ResourceInfo { + fn as_info(&self) -> &ResourceInfo { &self.info } + fn as_info_mut(&mut self) -> &mut ResourceInfo { + &mut self.info + } + fn label(&self) -> String { #[cfg(debug_assertions)] return self.label.clone(); @@ -261,9 +265,13 @@ impl Drop for ComputePipeline { impl Resource for ComputePipeline { const TYPE: &'static str = "ComputePipeline"; - fn info(&self) -> &ResourceInfo { + fn as_info(&self) -> &ResourceInfo { &self.info } + + fn as_info_mut(&mut self) -> &mut ResourceInfo { + &mut self.info + } } impl ComputePipeline { @@ -488,9 +496,13 @@ impl Drop for RenderPipeline { impl Resource for RenderPipeline { const TYPE: &'static str = "RenderPipeline"; - fn info(&self) -> &ResourceInfo { + fn as_info(&self) -> &ResourceInfo { &self.info } + + fn as_info_mut(&mut self) -> &mut ResourceInfo { + &mut self.info + } } impl RenderPipeline { diff --git a/wgpu-core/src/present.rs b/wgpu-core/src/present.rs index d5b4929dec..008b23d38b 100644 --- a/wgpu-core/src/present.rs +++ b/wgpu-core/src/present.rs @@ -221,7 +221,7 @@ impl Global { }; let (id, resource) = fid.assign(texture); - log::info!("Created CURRENT Texture {:?}", id); + log::info!("Created CURRENT Surface Texture {:?}", id); { // register it in the device tracker as uninitialized diff --git a/wgpu-core/src/registry.rs b/wgpu-core/src/registry.rs index a2574f12c4..e2828f2790 100644 --- a/wgpu-core/src/registry.rs +++ b/wgpu-core/src/registry.rs @@ -26,12 +26,8 @@ impl, F: IdentityHandlerFactory> Registry Self { - Self { - identity: factory.spawn(), - storage: RwLock::new(Storage::from_kind(kind)), - backend: Backend::Empty, - } + pub(crate) fn without_backend(factory: &F) -> Self { + Self::new(Backend::Empty, factory) } } @@ -75,10 +71,10 @@ impl, F: IdentityHandlerFactory> Regist } } pub(crate) fn try_get(&self, id: I) -> Result>, InvalidId> { - self.storage.read().try_get(id).map(|o| o.cloned()) + self.read().try_get(id).map(|o| o.cloned()) } pub(crate) fn get(&self, id: I) -> Result, InvalidId> { - self.storage.read().get(id).map(|v| v.clone()) + self.read().get(id).map(|v| v.clone()) } pub(crate) fn read<'a>(&'a self) -> RwLockReadGuard<'a, Storage> { self.storage.read() diff --git a/wgpu-core/src/resource.rs b/wgpu-core/src/resource.rs index 0551fc5910..7995b9cc5a 100644 --- a/wgpu-core/src/resource.rs +++ b/wgpu-core/src/resource.rs @@ -49,7 +49,7 @@ use std::{ /// [`Buffer`]: crate::resource::Buffer #[derive(Debug)] pub struct ResourceInfo { - id: RwLock>>, + id: Option>, /// The index of the last queue submission in which the resource /// was used. /// @@ -68,7 +68,7 @@ impl ResourceInfo { #[allow(unused_variables)] pub(crate) fn new(label: &str) -> Self { Self { - id: RwLock::new(None), + id: None, submission_index: AtomicUsize::new(0), #[cfg(debug_assertions)] label: label.to_string(), @@ -85,19 +85,18 @@ impl ResourceInfo { { label = self.label.clone(); } - if let Some(id) = self.id.read().as_ref() { + if let Some(id) = self.id.as_ref() { label = format!("{:?}", id); } label } pub(crate) fn id(&self) -> Valid { - self.id.read().unwrap() + self.id.unwrap() } - pub(crate) fn set_id(&self, id: Id) { - let mut value = self.id.write(); - *value = Some(Valid(id)); + pub(crate) fn set_id(&mut self, id: Id) { + self.id = Some(Valid(id)); } /// Record that this resource will be used by the queue submission with the @@ -114,10 +113,11 @@ impl ResourceInfo { pub trait Resource { const TYPE: &'static str; - fn info(&self) -> &ResourceInfo; + fn as_info(&self) -> &ResourceInfo; + fn as_info_mut(&mut self) -> &mut ResourceInfo; fn label(&self) -> String { #[cfg(debug_assertions)] - return self.info().label.clone(); + return self.as_info().label.clone(); #[cfg(not(debug_assertions))] return String::new(); } @@ -406,9 +406,13 @@ pub enum CreateBufferError { impl Resource for Buffer { const TYPE: &'static str = "Buffer"; - fn info(&self) -> &ResourceInfo { + fn as_info(&self) -> &ResourceInfo { &self.info } + + fn as_info_mut(&mut self) -> &mut ResourceInfo { + &mut self.info + } } /// A temporary buffer, consumed by the command that uses it. @@ -454,10 +458,14 @@ impl Drop for StagingBuffer { impl Resource for StagingBuffer { const TYPE: &'static str = "StagingBuffer"; - fn info(&self) -> &ResourceInfo { + fn as_info(&self) -> &ResourceInfo { &self.info } + fn as_info_mut(&mut self) -> &mut ResourceInfo { + &mut self.info + } + fn label(&self) -> String { String::from("") } @@ -720,9 +728,13 @@ pub enum CreateTextureError { impl Resource for Texture { const TYPE: &'static str = "Texture"; - fn info(&self) -> &ResourceInfo { + fn as_info(&self) -> &ResourceInfo { &self.info } + + fn as_info_mut(&mut self) -> &mut ResourceInfo { + &mut self.info + } } impl Borrow for Texture { @@ -875,9 +887,13 @@ pub enum TextureViewDestroyError {} impl Resource for TextureView { const TYPE: &'static str = "TextureView"; - fn info(&self) -> &ResourceInfo { + fn as_info(&self) -> &ResourceInfo { &self.info } + + fn as_info_mut(&mut self) -> &mut ResourceInfo { + &mut self.info + } } /// Describes a [`Sampler`] @@ -986,9 +1002,13 @@ pub enum CreateSamplerError { impl Resource for Sampler { const TYPE: &'static str = "Sampler"; - fn info(&self) -> &ResourceInfo { + fn as_info(&self) -> &ResourceInfo { &self.info } + + fn as_info_mut(&mut self) -> &mut ResourceInfo { + &mut self.info + } } #[derive(Clone, Debug, Error)] @@ -1029,9 +1049,13 @@ impl Drop for QuerySet { impl Resource for QuerySet { const TYPE: &'static str = "QuerySet"; - fn info(&self) -> &ResourceInfo { + fn as_info(&self) -> &ResourceInfo { &self.info } + + fn as_info_mut(&mut self) -> &mut ResourceInfo { + &mut self.info + } } impl QuerySet { diff --git a/wgpu-core/src/storage.rs b/wgpu-core/src/storage.rs index 576e843da7..c978120dc9 100644 --- a/wgpu-core/src/storage.rs +++ b/wgpu-core/src/storage.rs @@ -44,27 +44,30 @@ pub(crate) struct InvalidId; /// values, so you should use an id allocator like `IdentityManager` /// that keeps the index values dense and close to zero. #[derive(Debug)] -pub struct Storage +pub struct Storage where T: Resource, + I: id::TypedId, { pub(crate) map: Vec>, kind: &'static str, _phantom: PhantomData, } -impl ops::Index> for Storage +impl ops::Index> for Storage where T: Resource, + I: id::TypedId, { type Output = Arc; fn index(&self, id: id::Valid) -> &Arc { self.get(id.0).unwrap() } } -impl Storage +impl Storage where T: Resource, + I: id::TypedId, { pub(crate) fn new() -> Self { Self { @@ -75,17 +78,11 @@ where } } -impl Storage +impl Storage where T: Resource, + I: id::TypedId, { - pub(crate) fn from_kind(kind: &'static str) -> Self { - Self { - map: Vec::new(), - kind, - _phantom: PhantomData, - } - } pub(crate) fn contains(&self, id: I) -> bool { let (index, epoch, _) = id.unzip(); match self.map.get(index as usize) { @@ -182,9 +179,9 @@ where } } - pub(crate) fn insert(&mut self, id: I, value: T) { + pub(crate) fn insert(&mut self, id: I, mut value: T) { let (index, epoch, _) = id.unzip(); - value.info().set_id(id); + value.as_info_mut().set_id(id); self.insert_impl(index as usize, Element::Occupied(Arc::new(value), epoch)) } @@ -193,9 +190,9 @@ where self.insert_impl(index as usize, Element::Error(epoch, label.to_string())) } - pub(crate) fn force_replace(&mut self, id: I, value: T) { + pub(crate) fn force_replace(&mut self, id: I, mut value: T) { let (index, epoch, _) = id.unzip(); - value.info().set_id(id); + value.as_info_mut().set_id(id); self.map[index as usize] = Element::Occupied(Arc::new(value), epoch); } diff --git a/wgpu-core/src/track/buffer.rs b/wgpu-core/src/track/buffer.rs index ff55ccb02e..6cd2518d7f 100644 --- a/wgpu-core/src/track/buffer.rs +++ b/wgpu-core/src/track/buffer.rs @@ -150,19 +150,17 @@ impl BufferUsageScope { bind_group: &BufferBindGroupState, ) -> Result<(), UsageConflict> { for &(id, ref resource, state) in &bind_group.buffers { - let (index32, epoch, _) = id.0.unzip(); - let index = index32 as usize; + let index = id.0.unzip().0 as usize; unsafe { insert_or_merge( None, &mut self.state, &mut self.metadata, - index32, + index as _, index, BufferStateProvider::Direct { state }, ResourceMetadataProvider::Direct { - epoch, resource: Cow::Borrowed(resource), }, )? @@ -226,8 +224,7 @@ impl BufferUsageScope { .get(id) .map_err(|_| UsageConflict::BufferInvalid { id })?; - let (index32, epoch, _) = id.unzip(); - let index = index32 as usize; + let index = id.unzip().0 as usize; self.allow_index(index); @@ -238,12 +235,11 @@ impl BufferUsageScope { None, &mut self.state, &mut self.metadata, - index32, + index as _, index, BufferStateProvider::Direct { state: new_state }, - ResourceMetadataProvider::Resource { - epoch, - resource: buffer.clone(), + ResourceMetadataProvider::Direct { + resource: Cow::Owned(buffer.clone()), }, )?; } @@ -322,8 +318,7 @@ impl BufferTracker { resource: Arc>, state: BufferUses, ) { - let (index32, epoch, _) = id.0.unzip(); - let index = index32 as usize; + let index = id.0.unzip().0 as usize; self.allow_index(index); @@ -344,7 +339,6 @@ impl BufferTracker { BufferStateProvider::Direct { state }, None, ResourceMetadataProvider::Direct { - epoch, resource: Cow::Owned(resource), }, ) @@ -366,8 +360,7 @@ impl BufferTracker { ) -> SetSingleResult { let buffer = storage.get(id).ok()?; - let (index32, epoch, _) = id.unzip(); - let index = index32 as usize; + let index = id.unzip().0 as usize; self.allow_index(index); @@ -378,13 +371,11 @@ impl BufferTracker { Some(&mut self.start), &mut self.end, &mut self.metadata, - index32, index, BufferStateProvider::Direct { state }, None, - ResourceMetadataProvider::Resource { - epoch, - resource: buffer.clone(), + ResourceMetadataProvider::Direct { + resource: Cow::Owned(buffer.clone()), }, &mut self.temp, ) @@ -417,7 +408,6 @@ impl BufferTracker { Some(&mut self.start), &mut self.end, &mut self.metadata, - index as u32, index, BufferStateProvider::Indirect { state: &tracker.start, @@ -456,7 +446,6 @@ impl BufferTracker { Some(&mut self.start), &mut self.end, &mut self.metadata, - index as u32, index, BufferStateProvider::Indirect { state: &scope.state, @@ -513,7 +502,6 @@ impl BufferTracker { Some(&mut self.start), &mut self.end, &mut self.metadata, - index as u32, index, BufferStateProvider::Indirect { state: &scope.state, @@ -538,8 +526,7 @@ impl BufferTracker { /// If the ID is higher than the length of internal vectors, /// false will be returned. pub fn remove_abandoned(&mut self, id: Valid) -> bool { - let (index32, epoch, _) = id.0.unzip(); - let index = index32 as usize; + let index = id.0.unzip().0 as usize; if index > self.metadata.size() { return false; @@ -549,10 +536,9 @@ impl BufferTracker { unsafe { if self.metadata.contains_unchecked(index) { - let existing_epoch = self.metadata.get_epoch_unchecked(index); let existing_ref_count = self.metadata.get_ref_count_unchecked(index); //3 ref count: Registry, Device Tracker and suspected resource itself - if existing_epoch == epoch && existing_ref_count <= 3 { + if existing_ref_count <= 3 { self.metadata.remove(index); return true; } @@ -658,7 +644,6 @@ unsafe fn insert_or_barrier_update( start_states: Option<&mut [BufferUses]>, current_states: &mut [BufferUses], resource_metadata: &mut ResourceMetadata>, - index32: u32, index: usize, start_state_provider: BufferStateProvider<'_>, end_state_provider: Option>, @@ -683,15 +668,7 @@ unsafe fn insert_or_barrier_update( } let update_state_provider = end_state_provider.unwrap_or_else(|| start_state_provider.clone()); - unsafe { - barrier( - current_states, - index32, - index, - start_state_provider, - barriers, - ) - }; + unsafe { barrier(current_states, index, start_state_provider, barriers) }; unsafe { update(current_states, index, update_state_provider) }; } @@ -723,8 +700,8 @@ unsafe fn insert( } *current_states.get_unchecked_mut(index) = new_end_state; - let (epoch, resource) = metadata_provider.get_own(index); - resource_metadata.insert(index, epoch, resource); + let resource = metadata_provider.get_own(index); + resource_metadata.insert(index, resource); } } @@ -763,7 +740,6 @@ unsafe fn merge( #[inline(always)] unsafe fn barrier( current_states: &mut [BufferUses], - index32: u32, index: usize, state_provider: BufferStateProvider<'_>, barriers: &mut Vec>, @@ -776,12 +752,12 @@ unsafe fn barrier( } barriers.push(PendingTransition { - id: index32, + id: index as _, selector: (), usage: current_state..new_state, }); - log::trace!("\tbuf {index32}: transition {current_state:?} -> {new_state:?}"); + log::trace!("\tbuf {index}: transition {current_state:?} -> {new_state:?}"); } #[inline(always)] diff --git a/wgpu-core/src/track/metadata.rs b/wgpu-core/src/track/metadata.rs index 535de4e2b4..0c64abe687 100644 --- a/wgpu-core/src/track/metadata.rs +++ b/wgpu-core/src/track/metadata.rs @@ -5,7 +5,7 @@ use bit_vec::BitVec; use std::{borrow::Cow, marker::PhantomData, mem, sync::Arc}; use wgt::strict_assert; -/// A set of resources, holding a [`RefCount`] and epoch for each member. +/// A set of resources, holding a `Arc` and epoch for each member. /// /// Testing for membership is fast, and iterating over members is /// reasonably fast in practice. Storage consumption is proportional @@ -17,12 +17,9 @@ pub(super) struct ResourceMetadata> { /// If the resource with index `i` is a member, `owned[i]` is `true`. owned: BitVec, - /// A vector parallel to `owned`, holding clones of members' `RefCount`s. + /// A vector holding clones of members' `T`s. resources: Vec>>, - /// A vector parallel to `owned`, holding the epoch of each members' id. - epochs: Vec, - /// This tells Rust that this type should be covariant with `A`. _phantom: PhantomData<(A, I)>, } @@ -32,8 +29,6 @@ impl> ResourceMetadata { Self { owned: BitVec::default(), resources: Vec::new(), - epochs: Vec::new(), - _phantom: PhantomData, } } @@ -45,8 +40,6 @@ impl> ResourceMetadata { pub(super) fn set_size(&mut self, size: usize) { self.resources.resize(size, None); - self.epochs.resize(size, u32::MAX); - resize_bitvec(&mut self.owned, size); } @@ -58,8 +51,6 @@ impl> ResourceMetadata { pub(super) fn tracker_assert_in_bounds(&self, index: usize) { strict_assert!(index < self.owned.len()); strict_assert!(index < self.resources.len()); - strict_assert!(index < self.epochs.len()); - strict_assert!(if self.contains(index) { self.resources[index].is_some() } else { @@ -100,10 +91,9 @@ impl> ResourceMetadata { /// The given `index` must be in bounds for this `ResourceMetadata`'s /// existing tables. See `tracker_assert_in_bounds`. #[inline(always)] - pub(super) unsafe fn insert(&mut self, index: usize, epoch: Epoch, resource: Arc) { + pub(super) unsafe fn insert(&mut self, index: usize, resource: Arc) { self.owned.set(index, true); unsafe { - *self.epochs.get_unchecked_mut(index) = epoch; *self.resources.get_unchecked_mut(index) = Some(resource); } } @@ -142,17 +132,6 @@ impl> ResourceMetadata { } } - /// Get the [`Epoch`] of the id of the resource with the given index. - /// - /// # Safety - /// - /// The given `index` must be in bounds for this `ResourceMetadata`'s - /// existing tables. See `tracker_assert_in_bounds`. - #[inline(always)] - pub(super) unsafe fn get_epoch_unchecked(&self, index: usize) -> Epoch { - unsafe { *self.epochs.get_unchecked(index) } - } - /// Returns an iterator over the resources owned by `self`. pub(super) fn owned_resources(&self) -> impl Iterator> + '_ { if !self.owned.is_empty() { @@ -176,7 +155,6 @@ impl> ResourceMetadata { pub(super) unsafe fn remove(&mut self, index: usize) { unsafe { *self.resources.get_unchecked_mut(index) = None; - *self.epochs.get_unchecked_mut(index) = u32::MAX; } self.owned.set(index, false); } @@ -188,16 +166,11 @@ impl> ResourceMetadata { /// trackers can get new resource metadata from. pub(super) enum ResourceMetadataProvider<'a, A: HalApi, I: TypedId, T: Resource> { /// Comes directly from explicit values. - Direct { - epoch: Epoch, - resource: Cow<'a, Arc>, - }, + Direct { resource: Cow<'a, Arc> }, /// Comes from another metadata tracker. Indirect { metadata: &'a ResourceMetadata, }, - /// The epoch is given directly, but the life count comes from the resource itself. - Resource { epoch: Epoch, resource: Arc }, } impl> ResourceMetadataProvider<'_, A, I, T> { /// Get the epoch and an owned refcount from this. @@ -207,17 +180,16 @@ impl> ResourceMetadataProvider<'_, A, I, T /// - The index must be in bounds of the metadata tracker if this uses an indirect source. /// - info must be Some if this uses a Resource source. #[inline(always)] - pub(super) unsafe fn get_own(self, index: usize) -> (Epoch, Arc) { + pub(super) unsafe fn get_own(self, index: usize) -> Arc { match self { - ResourceMetadataProvider::Direct { epoch, resource } => (epoch, resource.into_owned()), + ResourceMetadataProvider::Direct { resource } => resource.into_owned(), ResourceMetadataProvider::Indirect { metadata } => { metadata.tracker_assert_in_bounds(index); - (unsafe { *metadata.epochs.get_unchecked(index) }, { + { let resource = unsafe { metadata.resources.get_unchecked(index) }; unsafe { resource.clone().unwrap_unchecked() } - }) + } } - ResourceMetadataProvider::Resource { epoch, resource } => (epoch, resource), } } /// Get the epoch from this. @@ -227,14 +199,7 @@ impl> ResourceMetadataProvider<'_, A, I, T /// - The index must be in bounds of the metadata tracker if this uses an indirect source. #[inline(always)] pub(super) unsafe fn get_epoch(self, index: usize) -> Epoch { - match self { - ResourceMetadataProvider::Direct { epoch, .. } - | ResourceMetadataProvider::Resource { epoch, .. } => epoch, - ResourceMetadataProvider::Indirect { metadata } => { - metadata.tracker_assert_in_bounds(index); - unsafe { *metadata.epochs.get_unchecked(index) } - } - } + unsafe { self.get_own(index).as_info().id().0.unzip().1 } } } diff --git a/wgpu-core/src/track/stateless.rs b/wgpu-core/src/track/stateless.rs index f2375f5e4b..b003ea9e17 100644 --- a/wgpu-core/src/track/stateless.rs +++ b/wgpu-core/src/track/stateless.rs @@ -106,7 +106,7 @@ impl> StatelessTracker { /// If the ID is higher than the length of internal vectors, /// the vectors will be extended. A call to set_size is not needed. pub fn insert_single(&mut self, id: Valid, resource: Arc) { - let (index32, epoch, _) = id.0.unzip(); + let (index32, _epoch, _) = id.0.unzip(); let index = index32 as usize; self.allow_index(index); @@ -114,7 +114,7 @@ impl> StatelessTracker { self.tracker_assert_in_bounds(index); unsafe { - self.metadata.insert(index, epoch, resource); + self.metadata.insert(index, resource); } } @@ -125,7 +125,7 @@ impl> StatelessTracker { pub fn add_single<'a>(&mut self, storage: &'a Storage, id: Id) -> Option<&'a T> { let resource = storage.get(id).ok()?; - let (index32, epoch, _) = id.unzip(); + let (index32, _epoch, _) = id.unzip(); let index = index32 as usize; self.allow_index(index); @@ -133,7 +133,7 @@ impl> StatelessTracker { self.tracker_assert_in_bounds(index); unsafe { - self.metadata.insert(index, epoch, resource.clone()); + self.metadata.insert(index, resource.clone()); } Some(resource) @@ -156,9 +156,8 @@ impl> StatelessTracker { let previously_owned = self.metadata.contains_unchecked(index); if !previously_owned { - let epoch = other.metadata.get_epoch_unchecked(index); let other_resource = other.metadata.get_resource_unchecked(index); - self.metadata.insert(index, epoch, other_resource.clone()); + self.metadata.insert(index, other_resource.clone()); } } } @@ -172,8 +171,7 @@ impl> StatelessTracker { /// If the ID is higher than the length of internal vectors, /// false will be returned. pub fn remove_abandoned(&mut self, id: Valid) -> bool { - let (index32, epoch, _) = id.0.unzip(); - let index = index32 as usize; + let index = id.0.unzip().0 as usize; if index > self.metadata.size() { return false; @@ -183,10 +181,9 @@ impl> StatelessTracker { unsafe { if self.metadata.contains_unchecked(index) { - let existing_epoch = self.metadata.get_epoch_unchecked(index); let existing_ref_count = self.metadata.get_ref_count_unchecked(index); //3 ref count: Registry, Device Tracker and suspected resource itself - if existing_epoch == epoch && existing_ref_count <= 3 { + if existing_ref_count <= 3 { self.metadata.remove(index); return true; } diff --git a/wgpu-core/src/track/texture.rs b/wgpu-core/src/track/texture.rs index 0c9868c9df..a2a2111ec0 100644 --- a/wgpu-core/src/track/texture.rs +++ b/wgpu-core/src/track/texture.rs @@ -206,7 +206,7 @@ impl TextureBindGroupState { #[derive(Debug)] pub(crate) struct TextureStateSet { simple: Vec, - complex: FastHashMap, + complex: FastHashMap, } impl TextureStateSet { fn new() -> Self { @@ -246,7 +246,7 @@ impl TextureUsageScope { strict_assert!(if self.metadata.contains(index) && self.set.simple[index] == TextureUses::COMPLEX { - self.set.complex.contains_key(&(index as u32)) + self.set.complex.contains_key(&index) } else { true }); @@ -291,18 +291,15 @@ impl TextureUsageScope { } for index in scope.metadata.owned_indices() { - let index32 = index as u32; - self.tracker_assert_in_bounds(index); scope.tracker_assert_in_bounds(index); - let texture_selector = unsafe { texture_selector_from_texture(storage, index32) }; + let texture_selector = unsafe { texture_selector_from_texture(storage, index) }; unsafe { insert_or_merge( texture_selector, &mut self.set, &mut self.metadata, - index32, index, TextureStateProvider::TextureSet { set: &scope.set }, ResourceMetadataProvider::Indirect { @@ -359,23 +356,20 @@ impl TextureUsageScope { selector: Option, new_state: TextureUses, ) -> Result<(), UsageConflict> { - let (index32, epoch, _) = id.0.unzip(); - let index = index32 as usize; + let index = id.0.unzip().0 as usize; let resource = storage.get(id.0).unwrap(); self.tracker_assert_in_bounds(index); - let texture_selector = unsafe { texture_selector_from_texture(storage, index32) }; + let texture_selector = unsafe { texture_selector_from_texture(storage, index) }; unsafe { insert_or_merge( texture_selector, &mut self.set, &mut self.metadata, - index32, index, TextureStateProvider::from_option(selector, new_state), ResourceMetadataProvider::Direct { - epoch, resource: Cow::Borrowed(resource), }, )? @@ -419,14 +413,14 @@ impl TextureTracker { strict_assert!(if self.metadata.contains(index) && self.start_set.simple[index] == TextureUses::COMPLEX { - self.start_set.complex.contains_key(&(index as u32)) + self.start_set.complex.contains_key(&index) } else { true }); strict_assert!(if self.metadata.contains(index) && self.end_set.simple[index] == TextureUses::COMPLEX { - self.end_set.complex.contains_key(&(index as u32)) + self.end_set.complex.contains_key(&index) } else { true }); @@ -467,8 +461,7 @@ impl TextureTracker { /// If the ID is higher than the length of internal vectors, /// the vectors will be extended. A call to set_size is not needed. pub fn insert_single(&mut self, id: TextureId, resource: Arc>, usage: TextureUses) { - let (index32, epoch, _) = id.unzip(); - let index = index32 as usize; + let index = id.unzip().0 as usize; self.allow_index(index); @@ -486,12 +479,10 @@ impl TextureTracker { Some(&mut self.start_set), &mut self.end_set, &mut self.metadata, - index32, index, TextureStateProvider::KnownSingle { state: usage }, None, ResourceMetadataProvider::Direct { - epoch, resource: Cow::Owned(resource), }, ) @@ -512,8 +503,7 @@ impl TextureTracker { selector: TextureSelector, new_state: TextureUses, ) -> Option>> { - let (index32, epoch, _) = id.unzip(); - let index = index32 as usize; + let index = id.unzip().0 as usize; self.allow_index(index); @@ -525,16 +515,14 @@ impl TextureTracker { Some(&mut self.start_set), &mut self.end_set, &mut self.metadata, - index32, index, TextureStateProvider::Selector { selector, state: new_state, }, None, - ResourceMetadataProvider::Resource { - epoch, - resource: texture.clone(), + ResourceMetadataProvider::Direct { + resource: Cow::Owned(texture.clone()), }, &mut self.temp, ) @@ -558,18 +546,15 @@ impl TextureTracker { } for index in tracker.metadata.owned_indices() { - let index32 = index as u32; - self.tracker_assert_in_bounds(index); tracker.tracker_assert_in_bounds(index); unsafe { - let texture_selector = texture_selector_from_texture(storage, index32); + let texture_selector = texture_selector_from_texture(storage, index); insert_or_barrier_update( texture_selector, Some(&mut self.start_set), &mut self.end_set, &mut self.metadata, - index32, index, TextureStateProvider::TextureSet { set: &tracker.start_set, @@ -605,18 +590,15 @@ impl TextureTracker { } for index in scope.metadata.owned_indices() { - let index32 = index as u32; - self.tracker_assert_in_bounds(index); scope.tracker_assert_in_bounds(index); unsafe { - let texture_selector = texture_selector_from_texture(storage, index32); + let texture_selector = texture_selector_from_texture(storage, index); insert_or_barrier_update( texture_selector, Some(&mut self.start_set), &mut self.end_set, &mut self.metadata, - index32, index, TextureStateProvider::TextureSet { set: &scope.set }, None, @@ -659,21 +641,19 @@ impl TextureTracker { } for t in bind_group_state.textures.iter() { - let (index32, _, _) = t.id.0.unzip(); - let index = index32 as usize; + let index = t.id.0.unzip().0 as usize; scope.tracker_assert_in_bounds(index); if unsafe { !scope.metadata.contains_unchecked(index) } { continue; } - let texture_selector = unsafe { texture_selector_from_texture(storage, index32) }; + let texture_selector = unsafe { texture_selector_from_texture(storage, index) }; unsafe { insert_or_barrier_update( texture_selector, Some(&mut self.start_set), &mut self.end_set, &mut self.metadata, - index32, index, TextureStateProvider::TextureSet { set: &scope.set }, None, @@ -695,8 +675,7 @@ impl TextureTracker { /// If the ID is higher than the length of internal vectors, /// false will be returned. pub fn remove(&mut self, id: Valid) -> bool { - let (index32, epoch, _) = id.0.unzip(); - let index = index32 as usize; + let index = id.0.unzip().0 as usize; if index > self.metadata.size() { return false; @@ -706,14 +685,9 @@ impl TextureTracker { unsafe { if self.metadata.contains_unchecked(index) { - let existing_epoch = self.metadata.get_epoch_unchecked(index); - assert_eq!(existing_epoch, epoch); - - self.start_set.complex.remove(&index32); - self.end_set.complex.remove(&index32); - + self.start_set.complex.remove(&index); + self.end_set.complex.remove(&index); self.metadata.remove(index); - return true; } } @@ -729,8 +703,7 @@ impl TextureTracker { /// If the ID is higher than the length of internal vectors, /// false will be returned. pub fn remove_abandoned(&mut self, id: Valid) -> bool { - let (index32, epoch, _) = id.0.unzip(); - let index = index32 as usize; + let index = id.0.unzip().0 as usize; if index > self.metadata.size() { return false; @@ -740,15 +713,12 @@ impl TextureTracker { unsafe { if self.metadata.contains_unchecked(index) { - let existing_epoch = self.metadata.get_epoch_unchecked(index); let existing_ref_count = self.metadata.get_ref_count_unchecked(index); //3 ref count: Registry, Device Tracker and suspected resource itself - if existing_epoch == epoch && existing_ref_count <= 3 { - self.start_set.complex.remove(&index32); - self.end_set.complex.remove(&index32); - + if existing_ref_count <= 3 { + self.start_set.complex.remove(&index); + self.end_set.complex.remove(&index); self.metadata.remove(index); - return true; } } @@ -824,7 +794,6 @@ impl<'a> TextureStateProvider<'a> { unsafe fn get_state( self, texture_selector: Option<&TextureSelector>, - index32: u32, index: usize, ) -> SingleOrManyStates< TextureUses, @@ -847,7 +816,7 @@ impl<'a> TextureStateProvider<'a> { let new_state = *unsafe { set.simple.get_unchecked(index) }; if new_state == TextureUses::COMPLEX { - let new_complex = unsafe { set.complex.get(&index32).unwrap_unchecked() }; + let new_complex = unsafe { set.complex.get(&index).unwrap_unchecked() }; SingleOrManyStates::Many(EitherIter::Right( new_complex.to_selector_state_iter(), @@ -865,9 +834,9 @@ impl<'a> TextureStateProvider<'a> { #[inline(always)] unsafe fn texture_selector_from_texture( storage: &Storage, TextureId>, - index32: u32, + index: usize, ) -> &TextureSelector { - let texture = unsafe { storage.get_unchecked(index32) }; + let texture = unsafe { storage.get_unchecked(index as _) }; &texture.full_range } @@ -885,7 +854,6 @@ unsafe fn insert_or_merge( texture_selector: &TextureSelector, current_state_set: &mut TextureStateSet, resource_metadata: &mut ResourceMetadata>, - index32: u32, index: usize, state_provider: TextureStateProvider<'_>, metadata_provider: ResourceMetadataProvider<'_, A, TextureId, Texture>, @@ -899,7 +867,6 @@ unsafe fn insert_or_merge( None, current_state_set, resource_metadata, - index32, index, state_provider, None, @@ -913,7 +880,6 @@ unsafe fn insert_or_merge( merge( texture_selector, current_state_set, - index32, index, state_provider, metadata_provider, @@ -944,7 +910,6 @@ unsafe fn insert_or_barrier_update( start_state: Option<&mut TextureStateSet>, current_state_set: &mut TextureStateSet, resource_metadata: &mut ResourceMetadata>, - index32: u32, index: usize, start_state_provider: TextureStateProvider<'_>, end_state_provider: Option>, @@ -960,7 +925,6 @@ unsafe fn insert_or_barrier_update( start_state, current_state_set, resource_metadata, - index32, index, start_state_provider, end_state_provider, @@ -975,7 +939,6 @@ unsafe fn insert_or_barrier_update( barrier( texture_selector, current_state_set, - index32, index, start_state_provider, barriers, @@ -988,7 +951,6 @@ unsafe fn insert_or_barrier_update( texture_selector, start_state_set, current_state_set, - index32, index, update_state_provider, ) @@ -1001,20 +963,19 @@ unsafe fn insert( start_state: Option<&mut TextureStateSet>, end_state: &mut TextureStateSet, resource_metadata: &mut ResourceMetadata>, - index32: u32, index: usize, start_state_provider: TextureStateProvider<'_>, end_state_provider: Option>, metadata_provider: ResourceMetadataProvider<'_, A, TextureId, Texture>, ) { - let start_layers = unsafe { start_state_provider.get_state(texture_selector, index32, index) }; + let start_layers = unsafe { start_state_provider.get_state(texture_selector, index) }; match start_layers { SingleOrManyStates::Single(state) => { // This should only ever happen with a wgpu bug, but let's just double // check that resource states don't have any conflicts. strict_assert_eq!(invalid_resource_state(state), false); - log::trace!("\ttex {index32}: insert start {state:?}"); + log::trace!("\ttex {index}: insert start {state:?}"); if let Some(start_state) = start_state { unsafe { *start_state.simple.get_unchecked_mut(index) = state }; @@ -1031,29 +992,29 @@ unsafe fn insert( let complex = unsafe { ComplexTextureState::from_selector_state_iter(full_range, state_iter) }; - log::trace!("\ttex {index32}: insert start {complex:?}"); + log::trace!("\ttex {index}: insert start {complex:?}"); if let Some(start_state) = start_state { unsafe { *start_state.simple.get_unchecked_mut(index) = TextureUses::COMPLEX }; - start_state.complex.insert(index32, complex.clone()); + start_state.complex.insert(index, complex.clone()); } // We only need to insert ourselves the end state if there is no end state provider. if end_state_provider.is_none() { unsafe { *end_state.simple.get_unchecked_mut(index) = TextureUses::COMPLEX }; - end_state.complex.insert(index32, complex); + end_state.complex.insert(index, complex); } } } if let Some(end_state_provider) = end_state_provider { - match unsafe { end_state_provider.get_state(texture_selector, index32, index) } { + match unsafe { end_state_provider.get_state(texture_selector, index) } { SingleOrManyStates::Single(state) => { // This should only ever happen with a wgpu bug, but let's just double // check that resource states don't have any conflicts. strict_assert_eq!(invalid_resource_state(state), false); - log::trace!("\ttex {index32}: insert end {state:?}"); + log::trace!("\ttex {index}: insert end {state:?}"); // We only need to insert into the end, as there is guarenteed to be // a start state provider. @@ -1066,19 +1027,19 @@ unsafe fn insert( ComplexTextureState::from_selector_state_iter(full_range, state_iter) }; - log::trace!("\ttex {index32}: insert end {complex:?}"); + log::trace!("\ttex {index}: insert end {complex:?}"); // We only need to insert into the end, as there is guarenteed to be // a start state provider. unsafe { *end_state.simple.get_unchecked_mut(index) = TextureUses::COMPLEX }; - end_state.complex.insert(index32, complex); + end_state.complex.insert(index, complex); } } } unsafe { - let (epoch, resource) = metadata_provider.get_own(index); - resource_metadata.insert(index, epoch, resource); + let resource = metadata_provider.get_own(index); + resource_metadata.insert(index, resource); } } @@ -1086,7 +1047,6 @@ unsafe fn insert( unsafe fn merge( texture_selector: &TextureSelector, current_state_set: &mut TextureStateSet, - index32: u32, index: usize, state_provider: TextureStateProvider<'_>, metadata_provider: ResourceMetadataProvider<'_, A, TextureId, Texture>, @@ -1094,27 +1054,24 @@ unsafe fn merge( let current_simple = unsafe { current_state_set.simple.get_unchecked_mut(index) }; let current_state = if *current_simple == TextureUses::COMPLEX { SingleOrManyStates::Many(unsafe { - current_state_set - .complex - .get_mut(&index32) - .unwrap_unchecked() + current_state_set.complex.get_mut(&index).unwrap_unchecked() }) } else { SingleOrManyStates::Single(current_simple) }; - let new_state = unsafe { state_provider.get_state(Some(texture_selector), index32, index) }; + let new_state = unsafe { state_provider.get_state(Some(texture_selector), index) }; match (current_state, new_state) { (SingleOrManyStates::Single(current_simple), SingleOrManyStates::Single(new_simple)) => { let merged_state = *current_simple | new_simple; - log::trace!("\ttex {index32}: merge simple {current_simple:?} + {new_simple:?}"); + log::trace!("\ttex {index}: merge simple {current_simple:?} + {new_simple:?}"); if invalid_resource_state(merged_state) { return Err(UsageConflict::from_texture( TextureId::zip( - index32, + index as _, unsafe { metadata_provider.get_epoch(index) }, A::VARIANT, ), @@ -1140,14 +1097,12 @@ unsafe fn merge( for (selector, new_state) in new_many { let merged_state = *current_simple | new_state; - log::trace!( - "\ttex {index32}: merge {selector:?} {current_simple:?} + {new_state:?}" - ); + log::trace!("\ttex {index}: merge {selector:?} {current_simple:?} + {new_state:?}"); if invalid_resource_state(merged_state) { return Err(UsageConflict::from_texture( TextureId::zip( - index32, + index as _, unsafe { metadata_provider.get_epoch(index) }, A::VARIANT, ), @@ -1171,7 +1126,7 @@ unsafe fn merge( } *current_simple = TextureUses::COMPLEX; - current_state_set.complex.insert(index32, new_complex); + current_state_set.complex.insert(index, new_complex); } (SingleOrManyStates::Many(current_complex), SingleOrManyStates::Single(new_simple)) => { for (mip_id, mip) in current_complex.mips.iter_mut().enumerate() { @@ -1185,14 +1140,14 @@ unsafe fn merge( let merged_state = merged_state - TextureUses::UNKNOWN; log::trace!( - "\ttex {index32}: merge mip {mip_id} layers {layers:?} \ + "\ttex {index}: merge mip {mip_id} layers {layers:?} \ {current_layer_state:?} + {new_simple:?}" ); if invalid_resource_state(merged_state) { return Err(UsageConflict::from_texture( TextureId::zip( - index32, + index as _, unsafe { metadata_provider.get_epoch(index) }, A::VARIANT, ), @@ -1230,14 +1185,14 @@ unsafe fn merge( } log::trace!( - "\ttex {index32}: merge mip {mip_id} layers {layers:?} \ + "\ttex {index}: merge mip {mip_id} layers {layers:?} \ {current_layer_state:?} + {new_state:?}" ); if invalid_resource_state(merged_state) { return Err(UsageConflict::from_texture( TextureId::zip( - index32, + index as _, unsafe { metadata_provider.get_epoch(index) }, A::VARIANT, ), @@ -1264,7 +1219,6 @@ unsafe fn merge( unsafe fn barrier( texture_selector: &TextureSelector, current_state_set: &TextureStateSet, - index32: u32, index: usize, state_provider: TextureStateProvider<'_>, barriers: &mut Vec>, @@ -1272,13 +1226,13 @@ unsafe fn barrier( let current_simple = unsafe { *current_state_set.simple.get_unchecked(index) }; let current_state = if current_simple == TextureUses::COMPLEX { SingleOrManyStates::Many(unsafe { - current_state_set.complex.get(&index32).unwrap_unchecked() + current_state_set.complex.get(&index).unwrap_unchecked() }) } else { SingleOrManyStates::Single(current_simple) }; - let new_state = unsafe { state_provider.get_state(Some(texture_selector), index32, index) }; + let new_state = unsafe { state_provider.get_state(Some(texture_selector), index) }; match (current_state, new_state) { (SingleOrManyStates::Single(current_simple), SingleOrManyStates::Single(new_simple)) => { @@ -1286,10 +1240,10 @@ unsafe fn barrier( return; } - log::trace!("\ttex {index32}: transition simple {current_simple:?} -> {new_simple:?}"); + log::trace!("\ttex {index}: transition simple {current_simple:?} -> {new_simple:?}"); barriers.push(PendingTransition { - id: index32, + id: index as _, selector: texture_selector.clone(), usage: current_simple..new_simple, }); @@ -1305,11 +1259,11 @@ unsafe fn barrier( } log::trace!( - "\ttex {index32}: transition {selector:?} {current_simple:?} -> {new_state:?}" + "\ttex {index}: transition {selector:?} {current_simple:?} -> {new_state:?}" ); barriers.push(PendingTransition { - id: index32, + id: index as _, selector, usage: current_simple..new_state, }); @@ -1329,12 +1283,12 @@ unsafe fn barrier( } log::trace!( - "\ttex {index32}: transition mip {mip_id} layers {layers:?} \ + "\ttex {index}: transition mip {mip_id} layers {layers:?} \ {current_layer_state:?} -> {new_simple:?}" ); barriers.push(PendingTransition { - id: index32, + id: index as _, selector: TextureSelector { mips: mip_id..mip_id + 1, layers: layers.clone(), @@ -1363,12 +1317,12 @@ unsafe fn barrier( } log::trace!( - "\ttex {index32}: transition mip {mip_id} layers {layers:?} \ + "\ttex {index}: transition mip {mip_id} layers {layers:?} \ {current_layer_state:?} -> {new_state:?}" ); barriers.push(PendingTransition { - id: index32, + id: index as _, selector: TextureSelector { mips: mip_id..mip_id + 1, layers, @@ -1388,7 +1342,6 @@ unsafe fn update( texture_selector: &TextureSelector, start_state_set: &mut TextureStateSet, current_state_set: &mut TextureStateSet, - index32: u32, index: usize, state_provider: TextureStateProvider<'_>, ) { @@ -1399,23 +1352,19 @@ unsafe fn update( // If the state is simple, the first insert to the tracker would cover it. let mut start_complex = None; if start_simple == TextureUses::COMPLEX { - start_complex = - Some(unsafe { start_state_set.complex.get_mut(&index32).unwrap_unchecked() }); + start_complex = Some(unsafe { start_state_set.complex.get_mut(&index).unwrap_unchecked() }); } let current_simple = unsafe { current_state_set.simple.get_unchecked_mut(index) }; let current_state = if *current_simple == TextureUses::COMPLEX { SingleOrManyStates::Many(unsafe { - current_state_set - .complex - .get_mut(&index32) - .unwrap_unchecked() + current_state_set.complex.get_mut(&index).unwrap_unchecked() }) } else { SingleOrManyStates::Single(current_simple) }; - let new_state = unsafe { state_provider.get_state(Some(texture_selector), index32, index) }; + let new_state = unsafe { state_provider.get_state(Some(texture_selector), index) }; match (current_state, new_state) { (SingleOrManyStates::Single(current_simple), SingleOrManyStates::Single(new_simple)) => { @@ -1450,7 +1399,7 @@ unsafe fn update( } *current_simple = TextureUses::COMPLEX; - current_state_set.complex.insert(index32, new_complex); + current_state_set.complex.insert(index, new_complex); } (SingleOrManyStates::Many(current_complex), SingleOrManyStates::Single(new_single)) => { for (mip_id, mip) in current_complex.mips.iter().enumerate() { @@ -1476,12 +1425,7 @@ unsafe fn update( } unsafe { *current_state_set.simple.get_unchecked_mut(index) = new_single }; - unsafe { - current_state_set - .complex - .remove(&index32) - .unwrap_unchecked() - }; + unsafe { current_state_set.complex.remove(&index).unwrap_unchecked() }; } (SingleOrManyStates::Many(current_complex), SingleOrManyStates::Many(new_many)) => { for (selector, new_state) in new_many { diff --git a/wgpu-hal/src/auxil/dxgi/factory.rs b/wgpu-hal/src/auxil/dxgi/factory.rs index af7e79b6c4..8b5f4a4561 100644 --- a/wgpu-hal/src/auxil/dxgi/factory.rs +++ b/wgpu-hal/src/auxil/dxgi/factory.rs @@ -139,7 +139,7 @@ pub fn create_factory( log::error!("IDXGIFactory1 creation function not found: {:?}", err); return Err(crate::InstanceError); } - // If we don't print it to info as all win7 will hit this case. + // If we don't print it to warn as all win7 will hit this case. Err(err) => { log::warn!("IDXGIFactory1 creation function not found: {:?}", err); None @@ -161,7 +161,7 @@ pub fn create_factory( log::warn!("Failed to cast IDXGIFactory4 to IDXGIFactory6: {:?}", err); return Err(crate::InstanceError); } - // If we don't print it to info. + // If we don't print it to warn. Err(err) => { log::warn!("Failed to cast IDXGIFactory4 to IDXGIFactory6: {:?}", err); return Ok((lib_dxgi, d3d12::DxgiFactory::Factory4(factory4))); @@ -199,7 +199,7 @@ pub fn create_factory( log::warn!("Failed to cast IDXGIFactory1 to IDXGIFactory2: {:?}", err); return Err(crate::InstanceError); } - // If we don't print it to info. + // If we don't print it to warn. Err(err) => { log::warn!("Failed to cast IDXGIFactory1 to IDXGIFactory2: {:?}", err); } From 9af2d0c4d2768827db0fae26f85001e1469e55da Mon Sep 17 00:00:00 2001 From: Mauro Gentile <62186646+gents83@users.noreply.github.com> Date: Sat, 27 May 2023 11:06:32 +0200 Subject: [PATCH 046/132] Adding some doc info --- wgpu-core/src/device/resource.rs | 14 +++++++++++++- wgpu-core/src/registry.rs | 11 +++++++++++ 2 files changed, 24 insertions(+), 1 deletion(-) diff --git a/wgpu-core/src/device/resource.rs b/wgpu-core/src/device/resource.rs index f949802e95..ce5e59c269 100644 --- a/wgpu-core/src/device/resource.rs +++ b/wgpu-core/src/device/resource.rs @@ -62,6 +62,18 @@ use super::{ /// 1. `life_tracker` is locked after `hub.devices`, enforced by the type system /// 1. `self.trackers` is locked last (unenforced) /// 1. `self.trace` is locked last (unenforced) +/// +/// Right now avoid locking twice same resource or registry in a call execution +/// and minimize the locking to the minimum scope possibile +/// Unless otherwise specified, no lock may be acquired while holding another lock. +/// This means that you must inspect function calls made while a lock is held +/// to see what locks the callee may try to acquire. +/// +/// As far as this point: +/// device_maintain_ids locks Device::lifetime_tracker, and calls... +/// triage_suspected locks Device::trackers, and calls... +/// Registry::unregister locks Registry::storage +/// pub struct Device { raw: Option, pub(crate) adapter_id: id::Valid, @@ -255,7 +267,7 @@ impl Device { /// submissions still in flight. (We have to take the locks needed to /// produce this information for other reasons, so we might as well just /// return it to our callers.) - pub(crate) fn maintain<'this, 'token: 'this, G: GlobalIdentityHandlerFactory>( + pub(crate) fn maintain<'this, G: GlobalIdentityHandlerFactory>( &'this self, hub: &Hub, fence: &A::Fence, diff --git a/wgpu-core/src/registry.rs b/wgpu-core/src/registry.rs index e2828f2790..869975f9cc 100644 --- a/wgpu-core/src/registry.rs +++ b/wgpu-core/src/registry.rs @@ -10,6 +10,17 @@ use crate::{ storage::{InvalidId, Storage, StorageReport}, }; +/// Registry is the primary holder of each resource type +/// Every resource is now arcanized so the last arc released +/// will in the end free the memory and release the inner raw resource +/// +/// Registry act as the main entry point to keep resource alive +/// when created and released from user land code +/// +/// A resource may still be alive when released from user land code +/// if it's used in active submission or anyway kept alive from +/// any other dependent resource +/// #[derive(Debug)] pub struct Registry, F: IdentityHandlerFactory> { identity: F::Filter, From c498c28fae09f04680740b58ba3c0e3ab04dae5d Mon Sep 17 00:00:00 2001 From: Mauro Gentile <62186646+gents83@users.noreply.github.com> Date: Mon, 5 Jun 2023 19:59:57 +0200 Subject: [PATCH 047/132] Add missing dependency device\adapter --- wgpu-core/src/device/global.rs | 19 +++++++------------ wgpu-core/src/device/resource.rs | 10 +++++----- wgpu-core/src/instance.rs | 25 +++++++++++-------------- 3 files changed, 23 insertions(+), 31 deletions(-) diff --git a/wgpu-core/src/device/global.rs b/wgpu-core/src/device/global.rs index 93d0d8cc74..2d215fdfd2 100644 --- a/wgpu-core/src/device/global.rs +++ b/wgpu-core/src/device/global.rs @@ -571,8 +571,7 @@ impl Global { )); } - let adapter = hub.adapters.get(device.adapter_id.0).unwrap(); - let texture = match device.create_texture(device_id, &adapter, desc) { + let texture = match device.create_texture(device_id, &device.adapter, desc) { Ok(texture) => texture, Err(error) => break error, }; @@ -673,10 +672,8 @@ impl Global { trace.add(trace::Action::CreateTexture(fid.id(), None, desc.clone())); } - let adapter = hub.adapters.get(device.adapter_id.0).unwrap(); - let format_features = match device - .describe_format_features(&adapter, desc.format) + .describe_format_features(&device.adapter, desc.format) .map_err(|error| resource::CreateTextureError::MissingFeatures(desc.format, error)) { Ok(features) => features, @@ -1603,7 +1600,6 @@ impl Global { Ok(device) => device, Err(_) => break DeviceError::Invalid.into(), }; - let adapter = hub.adapters.get(device.adapter_id.0).unwrap(); #[cfg(feature = "trace")] if let Some(ref mut trace) = *device.trace.lock() { trace.add(trace::Action::CreateRenderPipeline { @@ -1614,7 +1610,7 @@ impl Global { } let pipeline = - match device.create_render_pipeline(&adapter, desc, implicit_context, hub) { + match device.create_render_pipeline(&device.adapter, desc, implicit_context, hub) { Ok(pair) => pair, Err(e) => break e, }; @@ -1951,7 +1947,6 @@ impl Global { let hub = A::hub(self); let surface_guard = self.surfaces.read(); - let adapter_guard = hub.adapters.read(); let device_guard = hub.devices.read(); let error = 'outer: loop { @@ -1971,7 +1966,7 @@ impl Global { let caps = unsafe { let suf = A::get_surface(surface); - let adapter = &adapter_guard[device.adapter_id]; + let adapter = &device.adapter; match adapter .raw .adapter @@ -2238,11 +2233,11 @@ impl Global { debug_assert!(device.lock_life().queue_empty()); device.pending_writes.write().as_mut().unwrap().deactivate(); - let adapter = hub.adapters.get(device.adapter_id.0).unwrap(); // Adapter is only referenced by the device and itself. // This isn't a robust way to destroy them, we should find a better one. - if adapter.is_unique() { - free_adapter_id = Some(device.adapter_id.0); + // Check the refcount here should 2 -> registry and device + if device.adapter.ref_count() == 2 { + free_adapter_id = Some(device.adapter.info.id().0); } drop(device); diff --git a/wgpu-core/src/device/resource.rs b/wgpu-core/src/device/resource.rs index ce5e59c269..cb9e0e0bec 100644 --- a/wgpu-core/src/device/resource.rs +++ b/wgpu-core/src/device/resource.rs @@ -10,7 +10,7 @@ use crate::{ }, hal_api::HalApi, hub::Hub, - id::{self, AdapterId, DeviceId}, + id::{self, DeviceId}, identity::GlobalIdentityHandlerFactory, init_tracker::{ BufferInitTracker, BufferInitTrackerAction, MemoryInitKind, TextureInitRange, @@ -76,7 +76,7 @@ use super::{ /// pub struct Device { raw: Option, - pub(crate) adapter_id: id::Valid, + pub(crate) adapter: Arc>, pub(crate) queue: Option, pub(crate) zero_buffer: Option, //Note: The submission index here corresponds to the last submission that is done. @@ -107,7 +107,7 @@ pub struct Device { impl std::fmt::Debug for Device { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("Device") - .field("adapter_id", &self.adapter_id) + .field("adapter", &self.adapter.info.label()) .field("limits", &self.limits) .field("features", &self.features) .field("downlevel", &self.downlevel) @@ -165,7 +165,7 @@ impl Device { impl Device { pub(crate) fn new( open: hal::OpenDevice, - adapter_id: id::Valid, + adapter: &Arc>, alignments: hal::Alignments, downlevel: wgt::DownlevelCapabilities, desc: &DeviceDescriptor, @@ -218,7 +218,7 @@ impl Device { Ok(Self { raw: Some(open.device), - adapter_id, + adapter: adapter.clone(), queue: Some(open.queue), zero_buffer: Some(zero_buffer), info: ResourceInfo::new(""), diff --git a/wgpu-core/src/instance.rs b/wgpu-core/src/instance.rs index 5cda62748f..5ce5a59b9d 100644 --- a/wgpu-core/src/instance.rs +++ b/wgpu-core/src/instance.rs @@ -4,7 +4,7 @@ use crate::{ device::{resource::Device, DeviceDescriptor}, global::Global, hal_api::HalApi, - id::{AdapterId, DeviceId, SurfaceId, Valid}, + id::{AdapterId, DeviceId, SurfaceId}, identity::{GlobalIdentityHandlerFactory, Input}, present::Presentation, resource::{Resource, ResourceInfo}, @@ -188,7 +188,7 @@ impl Surface { pub struct Adapter { pub(crate) raw: hal::ExposedAdapter, - info: ResourceInfo, + pub(crate) info: ResourceInfo, } impl Adapter { @@ -293,8 +293,7 @@ impl Adapter { } fn create_device_from_hal( - &self, - self_id: AdapterId, + self: &Arc, open: hal::OpenDevice, desc: &DeviceDescriptor, trace_path: Option<&std::path::Path>, @@ -302,7 +301,7 @@ impl Adapter { let caps = &self.raw.capabilities; Device::new( open, - Valid(self_id), + self, caps.alignments.clone(), caps.downlevel.clone(), desc, @@ -312,8 +311,7 @@ impl Adapter { } fn create_device( - &self, - self_id: AdapterId, + self: &Arc, desc: &DeviceDescriptor, trace_path: Option<&std::path::Path>, ) -> Result, RequestDeviceError> { @@ -364,7 +362,7 @@ impl Adapter { }, )?; - self.create_device_from_hal(self_id, open, desc, trace_path) + self.create_device_from_hal(open, desc, trace_path) } } @@ -1060,7 +1058,7 @@ impl Global { Ok(adapter) => adapter, Err(_) => break RequestDeviceError::InvalidAdapter, }; - let device = match adapter.create_device(adapter_id, desc, trace_path) { + let device = match adapter.create_device(desc, trace_path) { Ok(device) => device, Err(e) => break e, }; @@ -1095,11 +1093,10 @@ impl Global { Ok(adapter) => adapter, Err(_) => break RequestDeviceError::InvalidAdapter, }; - let device = - match adapter.create_device_from_hal(adapter_id, hal_device, desc, trace_path) { - Ok(device) => device, - Err(e) => break e, - }; + let device = match adapter.create_device_from_hal(hal_device, desc, trace_path) { + Ok(device) => device, + Err(e) => break e, + }; let (id, _) = fid.assign(device); log::info!("Created Device {:?}", id); return (id.0, None); From f9acbd3a004120c0aab02d4f041f137688ef8b28 Mon Sep 17 00:00:00 2001 From: Mauro Gentile <62186646+gents83@users.noreply.github.com> Date: Mon, 5 Jun 2023 22:36:13 +0200 Subject: [PATCH 048/132] Removing useless typo code --- wgpu-core/src/command/compute.rs | 9 ++++++--- wgpu-core/src/command/mod.rs | 22 ---------------------- wgpu-core/src/command/query.rs | 8 ++++++-- wgpu-core/src/command/render.rs | 14 +++++++++++--- wgpu-core/src/command/transfer.rs | 20 +++++++++++++++----- 5 files changed, 38 insertions(+), 35 deletions(-) diff --git a/wgpu-core/src/command/compute.rs b/wgpu-core/src/command/compute.rs index 7f3540eafc..8296c7ce86 100644 --- a/wgpu-core/src/command/compute.rs +++ b/wgpu-core/src/command/compute.rs @@ -349,10 +349,13 @@ impl Global { }); } - // will be reset to true if recording is done without errors - let (encoder, status, tracker, buffer_memory_init_actions, texture_memory_actions) = - cmd_buf_data.raw_mut(); + let encoder = &mut cmd_buf_data.encoder; + let status = &mut cmd_buf_data.status; + let tracker = &mut cmd_buf_data.trackers; + let buffer_memory_init_actions = &mut cmd_buf_data.buffer_memory_init_actions; + let texture_memory_actions = &mut cmd_buf_data.texture_memory_actions; + // will be reset to true if recording is done without errors *status = CommandEncoderStatus::Error; let raw = encoder.open(); let device = &cmd_buf.device; diff --git a/wgpu-core/src/command/mod.rs b/wgpu-core/src/command/mod.rs index 51b2845d2e..3dc1f57080 100644 --- a/wgpu-core/src/command/mod.rs +++ b/wgpu-core/src/command/mod.rs @@ -118,28 +118,6 @@ impl CommandBufferMutable { let tracker = &mut self.trackers; (encoder, tracker) } - pub(crate) fn raw_mut( - &mut self, - ) -> ( - &mut CommandEncoder, - &mut CommandEncoderStatus, - &mut Tracker, - &mut Vec, - &mut CommandBufferTextureMemoryActions, - ) { - let encoder = &mut self.encoder; - let status = &mut self.status; - let tracker = &mut self.trackers; - let buffer_memory_init_actions = &mut self.buffer_memory_init_actions; - let texture_memory_actions = &mut self.texture_memory_actions; - ( - encoder, - status, - tracker, - buffer_memory_init_actions, - texture_memory_actions, - ) - } } pub struct CommandBuffer { diff --git a/wgpu-core/src/command/query.rs b/wgpu-core/src/command/query.rs index 3834e39775..c58260c5dd 100644 --- a/wgpu-core/src/command/query.rs +++ b/wgpu-core/src/command/query.rs @@ -312,7 +312,9 @@ impl Global { }); } - let (encoder, _, tracker, _, _) = cmd_buf_data.raw_mut(); + let encoder = &mut cmd_buf_data.encoder; + let tracker = &mut cmd_buf_data.trackers; + let raw_encoder = encoder.open(); let query_set_guard = hub.query_sets.read(); @@ -352,7 +354,9 @@ impl Global { }); } - let (encoder, _, tracker, buffer_memory_init_actions, _) = cmd_buf_data.raw_mut(); + let encoder = &mut cmd_buf_data.encoder; + let tracker = &mut cmd_buf_data.trackers; + let buffer_memory_init_actions = &mut cmd_buf_data.buffer_memory_init_actions; let raw_encoder = encoder.open(); if destination_offset % wgt::QUERY_RESOLVE_BUFFER_ALIGNMENT != 0 { diff --git a/wgpu-core/src/command/render.rs b/wgpu-core/src/command/render.rs index 75646ad9fb..85cd58e901 100644 --- a/wgpu-core/src/command/render.rs +++ b/wgpu-core/src/command/render.rs @@ -1230,8 +1230,12 @@ impl Global { }); } - let (encoder, status, tracker, buffer_memory_init_actions, texture_memory_actions) = - cmd_buf_data.raw_mut(); + let encoder = &mut cmd_buf_data.encoder; + let status = &mut cmd_buf_data.status; + let tracker = &mut cmd_buf_data.trackers; + let buffer_memory_init_actions = &mut cmd_buf_data.buffer_memory_init_actions; + let texture_memory_actions = &mut cmd_buf_data.texture_memory_actions; + // close everything while the new command encoder is filled encoder.close(); // will be reset to true if recording is done without errors @@ -2144,7 +2148,11 @@ impl Global { let cmd_buf = hub.command_buffers.get(encoder_id).unwrap(); let mut cmd_buf_data = cmd_buf.data.lock(); let cmd_buf_data = cmd_buf_data.as_mut().unwrap(); - let (encoder, status, tracker, _, _) = cmd_buf_data.raw_mut(); + + let encoder = &mut cmd_buf_data.encoder; + let status = &mut cmd_buf_data.status; + let tracker = &mut cmd_buf_data.trackers; + { let transit = encoder.open(); diff --git a/wgpu-core/src/command/transfer.rs b/wgpu-core/src/command/transfer.rs index 096e2f0829..3934d55b35 100644 --- a/wgpu-core/src/command/transfer.rs +++ b/wgpu-core/src/command/transfer.rs @@ -732,8 +732,12 @@ impl Global { size: *copy_size, }); } - let (encoder, _, tracker, buffer_memory_init_actions, texture_memory_actions) = - cmd_buf_data.raw_mut(); + + let encoder = &mut cmd_buf_data.encoder; + let tracker = &mut cmd_buf_data.trackers; + let buffer_memory_init_actions = &mut cmd_buf_data.buffer_memory_init_actions; + let texture_memory_actions = &mut cmd_buf_data.texture_memory_actions; + let texture_guard = hub.textures.read(); let device = &cmd_buf.device; @@ -885,8 +889,11 @@ impl Global { size: *copy_size, }); } - let (encoder, _, tracker, buffer_memory_init_actions, texture_memory_actions) = - cmd_buf_data.raw_mut(); + let encoder = &mut cmd_buf_data.encoder; + let tracker = &mut cmd_buf_data.trackers; + let buffer_memory_init_actions = &mut cmd_buf_data.buffer_memory_init_actions; + let texture_memory_actions = &mut cmd_buf_data.texture_memory_actions; + let texture_guard = hub.textures.read(); @@ -1056,7 +1063,10 @@ impl Global { size: *copy_size, }); } - let (encoder, _, tracker, _, texture_memory_actions) = cmd_buf_data.raw_mut(); + let encoder = &mut cmd_buf_data.encoder; + let tracker = &mut cmd_buf_data.trackers; + let texture_memory_actions = &mut cmd_buf_data.texture_memory_actions; + let texture_guard = hub.textures.read(); let device = &cmd_buf.device; From a3f46435bc4b626964f49835ae506d43b44a254b Mon Sep 17 00:00:00 2001 From: Mauro Gentile <62186646+gents83@users.noreply.github.com> Date: Mon, 5 Jun 2023 22:53:51 +0200 Subject: [PATCH 049/132] Fix merge conflicts errors --- wgpu-core/src/command/compute.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/wgpu-core/src/command/compute.rs b/wgpu-core/src/command/compute.rs index 6d2c5f34a0..abd3d5ab40 100644 --- a/wgpu-core/src/command/compute.rs +++ b/wgpu-core/src/command/compute.rs @@ -758,7 +758,7 @@ impl Global { unsafe { raw.end_compute_pass(); } - + // We've successfully recorded the compute pass, bring the // command buffer out of the error state. *status = CommandEncoderStatus::Recording; @@ -769,7 +769,7 @@ impl Global { // Create a new command buffer, which we will insert _before_ the body of the compute pass. // // Use that buffer to insert barriers and clear discarded images. - let transit = cmd_buf.encoder.open(); + let transit = encoder.open(); fixup_discarded_surfaces( pending_discard_init_fixups.into_iter(), transit, @@ -779,13 +779,13 @@ impl Global { ); CommandBuffer::insert_barriers_from_tracker( transit, - &mut cmd_buf.trackers, + tracker, &intermediate_trackers, &*buffer_guard, &*texture_guard, ); // Close the command buffer, and swap it with the previous. - cmd_buf.encoder.close_and_swap(); + encoder.close_and_swap(); Ok(()) } From bfca2cd9fe0a4cd20b918e08b838438fc054b28d Mon Sep 17 00:00:00 2001 From: Mauro Gentile <62186646+gents83@users.noreply.github.com> Date: Mon, 5 Jun 2023 23:01:42 +0200 Subject: [PATCH 050/132] Fix format --- wgpu-core/src/command/render.rs | 5 ++--- wgpu-core/src/command/transfer.rs | 3 +-- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/wgpu-core/src/command/render.rs b/wgpu-core/src/command/render.rs index 88d8532f7f..ed07b50560 100644 --- a/wgpu-core/src/command/render.rs +++ b/wgpu-core/src/command/render.rs @@ -1217,7 +1217,6 @@ impl Global { let hub = A::hub(self); let (scope, query_reset_state, pending_discard_init_fixups) = { - let cmd_buf = CommandBuffer::get_encoder(hub, encoder_id).map_pass_err(init_scope)?; let mut cmd_buf_data = cmd_buf.data.lock(); let cmd_buf_data = cmd_buf_data.as_mut().unwrap(); @@ -1236,7 +1235,7 @@ impl Global { let tracker = &mut cmd_buf_data.trackers; let buffer_memory_init_actions = &mut cmd_buf_data.buffer_memory_init_actions; let texture_memory_actions = &mut cmd_buf_data.texture_memory_actions; - + // We automatically keep extending command buffers over time, and because // we want to insert a command buffer _before_ what we're about to record, // we need to make sure to close the previous one. @@ -2151,7 +2150,7 @@ impl Global { let cmd_buf = hub.command_buffers.get(encoder_id).unwrap(); let mut cmd_buf_data = cmd_buf.data.lock(); let cmd_buf_data = cmd_buf_data.as_mut().unwrap(); - + let encoder = &mut cmd_buf_data.encoder; let status = &mut cmd_buf_data.status; let tracker = &mut cmd_buf_data.trackers; diff --git a/wgpu-core/src/command/transfer.rs b/wgpu-core/src/command/transfer.rs index 3934d55b35..0227bc712b 100644 --- a/wgpu-core/src/command/transfer.rs +++ b/wgpu-core/src/command/transfer.rs @@ -732,7 +732,7 @@ impl Global { size: *copy_size, }); } - + let encoder = &mut cmd_buf_data.encoder; let tracker = &mut cmd_buf_data.trackers; let buffer_memory_init_actions = &mut cmd_buf_data.buffer_memory_init_actions; @@ -894,7 +894,6 @@ impl Global { let buffer_memory_init_actions = &mut cmd_buf_data.buffer_memory_init_actions; let texture_memory_actions = &mut cmd_buf_data.texture_memory_actions; - let texture_guard = hub.textures.read(); let device = &cmd_buf.device; From 8850c8989dd82538984fec5f258c50cce51445a0 Mon Sep 17 00:00:00 2001 From: Mauro Gentile <62186646+gents83@users.noreply.github.com> Date: Tue, 6 Jun 2023 21:23:34 +0200 Subject: [PATCH 051/132] Removing wait of specific submission index --- wgpu/examples/hello-compute/main.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/wgpu/examples/hello-compute/main.rs b/wgpu/examples/hello-compute/main.rs index dc32686885..afdf7744c9 100644 --- a/wgpu/examples/hello-compute/main.rs +++ b/wgpu/examples/hello-compute/main.rs @@ -143,7 +143,7 @@ async fn execute_gpu_inner( encoder.copy_buffer_to_buffer(&storage_buffer, 0, &staging_buffer, 0, size); // Submits command encoder for processing - let submission_index = queue.submit(Some(encoder.finish())); + queue.submit(Some(encoder.finish())); // Note that we're not calling `.await` here. let buffer_slice = staging_buffer.slice(..); @@ -154,7 +154,7 @@ async fn execute_gpu_inner( // Poll the device in a blocking manner so that our future resolves. // In an actual application, `device.poll(...)` should // be called in an event loop or on another thread. - device.poll(wgpu::Maintain::WaitForSubmissionIndex(submission_index)); + device.poll(wgpu::Maintain::Wait); // Awaits until `buffer_future` can be read from if let Some(Ok(())) = receiver.receive().await { From 3a6fdc74a05f529c101b9a7089d25eea1087e32b Mon Sep 17 00:00:00 2001 From: Mauro Gentile <62186646+gents83@users.noreply.github.com> Date: Tue, 6 Jun 2023 22:02:56 +0200 Subject: [PATCH 052/132] rustup update --- Cargo.lock | 19 ++----------------- 1 file changed, 2 insertions(+), 17 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 93cfc3298a..9d767d2800 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1406,7 +1406,7 @@ dependencies = [ "proc-macro2", "quote", "regex", - "syn 1.0.105", + "syn 1.0.109", ] [[package]] @@ -2620,7 +2620,7 @@ checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.11", + "syn 2.0.15", ] [[package]] @@ -3313,21 +3313,6 @@ dependencies = [ "windows_x86_64_msvc 0.36.1", ] -[[package]] -name = "windows-sys" -version = "0.42.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a3e1820f08b8513f676f7ab6c1f99ff312fb97b553d30ff4dd86f9f15728aa7" -dependencies = [ - "windows_aarch64_gnullvm 0.42.2", - "windows_aarch64_msvc 0.42.2", - "windows_i686_gnu 0.42.2", - "windows_i686_msvc 0.42.2", - "windows_x86_64_gnu 0.42.2", - "windows_x86_64_gnullvm 0.42.2", - "windows_x86_64_msvc 0.42.2", -] - [[package]] name = "windows-sys" version = "0.45.0" From e0e2297f0b612b97accab718b66173334cd424e5 Mon Sep 17 00:00:00 2001 From: Jim Blandy Date: Tue, 30 May 2023 21:59:05 -0700 Subject: [PATCH 053/132] Document abandoned buffer conditions more explicitly. --- wgpu-core/src/device/life.rs | 12 ++++++------ wgpu-core/src/track/buffer.rs | 25 +++++++++++++++++++------ wgpu-core/src/track/metadata.rs | 2 +- 3 files changed, 26 insertions(+), 13 deletions(-) diff --git a/wgpu-core/src/device/life.rs b/wgpu-core/src/device/life.rs index d8d9d84d28..bd3ddd37b8 100644 --- a/wgpu-core/src/device/life.rs +++ b/wgpu-core/src/device/life.rs @@ -242,12 +242,12 @@ pub enum WaitIdleError { /// A buffer cannot be mapped until all active queue submissions that use it /// have completed. To that end: /// -/// - Each buffer's `LifeGuard::submission_index` records the index of the +/// - Each buffer's `ResourceInfo::submission_index` records the index of the /// most recent queue submission that uses that buffer. /// -/// - Calling `map_async` adds the buffer to `self.mapped`, and changes -/// `Buffer::map_state` to prevent it from being used in any new -/// submissions. +/// - Calling `Global::buffer_map_async` adds the buffer to +/// `self.mapped`, and changes `Buffer::map_state` to prevent it +/// from being used in any new submissions. /// /// - When the device is polled, the following `LifetimeTracker` methods decide /// what should happen next: @@ -270,8 +270,8 @@ pub enum WaitIdleError { /// /// 4) `cleanup` frees everything in `free_resources`. /// -/// Only `self.mapped` holds a `RefCount` for the buffer; it is dropped by -/// `triage_mapped`. +/// Only calling `Global::buffer_map_async` clones a new `Arc` for the +/// buffer. This new `Arc` is only dropped by `handle_mapping`. pub(crate) struct LifetimeTracker { /// Resources that the user has requested be mapped, but which are used by /// queue submissions still in flight. diff --git a/wgpu-core/src/track/buffer.rs b/wgpu-core/src/track/buffer.rs index 6cd2518d7f..a137db8d6c 100644 --- a/wgpu-core/src/track/buffer.rs +++ b/wgpu-core/src/track/buffer.rs @@ -518,13 +518,27 @@ impl BufferTracker { } } - /// Removes the given resource from the tracker iff we have the last reference to the - /// resource and the epoch matches. + /// Removes the buffer `id` from this tracker if it is otherwise unused. /// - /// Returns true if the resource was removed. + /// A buffer is 'otherwise unused' when the only references to it are: /// - /// If the ID is higher than the length of internal vectors, - /// false will be returned. + /// 1) the `Arc` that our caller, `LifetimeTracker::triage_suspected`, has just + /// drained from `LifetimeTracker::suspected_resources`, + /// + /// 2) its `Arc` in [`self.metadata`] (owned by [`Device::trackers`]), and + /// + /// 3) its `Arc` in the [`Hub::buffers`] registry. + /// + /// If the buffer is indeed unused, this function removes 2), and + /// `triage_suspected` will remove 3), leaving 1) as the sole + /// remaining reference. + /// + /// Return `true` if this tracker contained the buffer `id`. This + /// implies that we removed it. + /// + /// [`Device::trackers`]: crate::device::Device + /// [`self.metadata`]: BufferTracker::metadata + /// [`Hub::buffers`]: crate::hub::Hub::buffers pub fn remove_abandoned(&mut self, id: Valid) -> bool { let index = id.0.unzip().0 as usize; @@ -537,7 +551,6 @@ impl BufferTracker { unsafe { if self.metadata.contains_unchecked(index) { let existing_ref_count = self.metadata.get_ref_count_unchecked(index); - //3 ref count: Registry, Device Tracker and suspected resource itself if existing_ref_count <= 3 { self.metadata.remove(index); return true; diff --git a/wgpu-core/src/track/metadata.rs b/wgpu-core/src/track/metadata.rs index 0c64abe687..4c429a4cd2 100644 --- a/wgpu-core/src/track/metadata.rs +++ b/wgpu-core/src/track/metadata.rs @@ -114,7 +114,7 @@ impl> ResourceMetadata { } } - /// Get the [`RefCount`] of the resource with the given index. + /// Get the reference count of the resource with the given index. /// /// # Safety /// From 8bdc4b95415e8fe75a6598913a082458a55b29cd Mon Sep 17 00:00:00 2001 From: Mauro Gentile <62186646+gents83@users.noreply.github.com> Date: Sat, 10 Jun 2023 12:26:13 +0200 Subject: [PATCH 054/132] Fixing CI --- .github/workflows/ci.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 81f5a9ad7e..7e432aca4f 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -10,7 +10,6 @@ on: env: CARGO_INCREMENTAL: false CARGO_TERM_COLOR: always - RUST_LOG: info RUST_BACKTRACE: full RUST_LOG: info #needed to understand what's going on when tests fail MSRV: 1.65 From dcf751b5bd07c09de200df02cb2c8136e7d851b5 Mon Sep 17 00:00:00 2001 From: Mauro Gentile <62186646+gents83@users.noreply.github.com> Date: Sat, 17 Jun 2023 10:27:56 +0200 Subject: [PATCH 055/132] Move trackers locks after the pending_writes lock --- wgpu-core/src/device/life.rs | 80 ++++++++++++++++++++------------ wgpu-core/src/device/queue.rs | 51 ++++++++++---------- wgpu-core/src/device/resource.rs | 3 ++ wgpu-core/src/hub.rs | 2 +- 4 files changed, 82 insertions(+), 54 deletions(-) diff --git a/wgpu-core/src/device/life.rs b/wgpu-core/src/device/life.rs index bd3ddd37b8..085fb27ec4 100644 --- a/wgpu-core/src/device/life.rs +++ b/wgpu-core/src/device/life.rs @@ -508,11 +508,13 @@ impl LifetimeTracker { profiling::scope!("triage_suspected"); if !self.suspected_resources.render_bundles.is_empty() { - let mut trackers = trackers.lock(); - while let Some(bundle) = self.suspected_resources.render_bundles.pop() { let id = bundle.info.id(); - if trackers.bundles.remove_abandoned(id) { + let is_removed = { + let mut trackers = trackers.lock(); + trackers.bundles.remove_abandoned(id) + }; + if is_removed { log::info!("Bundle {:?} is removed from registry", id); #[cfg(feature = "trace")] if let Some(ref mut t) = trace { @@ -527,11 +529,13 @@ impl LifetimeTracker { } if !self.suspected_resources.bind_groups.is_empty() { - let mut trackers = trackers.lock(); - while let Some(resource) = self.suspected_resources.bind_groups.pop() { let id = resource.info.id(); - if trackers.bind_groups.remove_abandoned(id) { + let is_removed = { + let mut trackers = trackers.lock(); + trackers.bind_groups.remove_abandoned(id) + }; + if is_removed { log::info!("BindGroup {:?} is removed from registry", id); #[cfg(feature = "trace")] if let Some(ref mut t) = trace { @@ -559,12 +563,14 @@ impl LifetimeTracker { } if !self.suspected_resources.texture_views.is_empty() { - let mut trackers = trackers.lock(); - let mut list = mem::take(&mut self.suspected_resources.texture_views); for texture_view in list.drain(..) { let id = texture_view.info.id(); - if trackers.views.remove_abandoned(id) { + let is_removed = { + let mut trackers = trackers.lock(); + trackers.views.remove_abandoned(id) + }; + if is_removed { log::info!("TextureView {:?} is removed from registry", id); #[cfg(feature = "trace")] if let Some(ref mut t) = trace { @@ -591,11 +597,13 @@ impl LifetimeTracker { } if !self.suspected_resources.textures.is_empty() { - let mut trackers = trackers.lock(); - for texture in self.suspected_resources.textures.drain(..) { let id = texture.info.id(); - if trackers.textures.remove_abandoned(id) { + let is_removed = { + let mut trackers = trackers.lock(); + trackers.textures.remove_abandoned(id) + }; + if is_removed { log::info!("Texture {:?} is removed from registry", id); #[cfg(feature = "trace")] if let Some(ref mut t) = trace { @@ -625,11 +633,13 @@ impl LifetimeTracker { } if !self.suspected_resources.samplers.is_empty() { - let mut trackers = trackers.lock(); - for sampler in self.suspected_resources.samplers.drain(..) { let id = sampler.info.id(); - if trackers.samplers.remove_abandoned(id) { + let is_removed = { + let mut trackers = trackers.lock(); + trackers.samplers.remove_abandoned(id) + }; + if is_removed { log::info!("Sampler {:?} is removed from registry", id); #[cfg(feature = "trace")] if let Some(ref mut t) = trace { @@ -650,11 +660,13 @@ impl LifetimeTracker { } if !self.suspected_resources.buffers.is_empty() { - let mut trackers = trackers.lock(); - for buffer in self.suspected_resources.buffers.drain(..) { let id = buffer.info.id(); - if trackers.buffers.remove_abandoned(id) { + let is_removed = { + let mut trackers = trackers.lock(); + trackers.buffers.remove_abandoned(id) + }; + if is_removed { log::info!("Buffer {:?} is removed from registry", id); #[cfg(feature = "trace")] if let Some(ref mut t) = trace { @@ -681,11 +693,13 @@ impl LifetimeTracker { } if !self.suspected_resources.compute_pipelines.is_empty() { - let mut trackers = trackers.lock(); - for compute_pipeline in self.suspected_resources.compute_pipelines.drain(..) { let id = compute_pipeline.info.id(); - if trackers.compute_pipelines.remove_abandoned(id) { + let is_removed = { + let mut trackers = trackers.lock(); + trackers.compute_pipelines.remove_abandoned(id) + }; + if is_removed { log::info!("ComputePipeline {:?} is removed from registry", id); #[cfg(feature = "trace")] if let Some(ref mut t) = trace { @@ -706,11 +720,13 @@ impl LifetimeTracker { } if !self.suspected_resources.render_pipelines.is_empty() { - let mut trackers = trackers.lock(); - for render_pipeline in self.suspected_resources.render_pipelines.drain(..) { let id = render_pipeline.info.id(); - if trackers.render_pipelines.remove_abandoned(id) { + let is_removed = { + let mut trackers = trackers.lock(); + trackers.render_pipelines.remove_abandoned(id) + }; + if is_removed { log::info!("RenderPipeline {:?} is removed from registry", id); #[cfg(feature = "trace")] if let Some(ref mut t) = trace { @@ -783,11 +799,13 @@ impl LifetimeTracker { } if !self.suspected_resources.query_sets.is_empty() { - let mut trackers = trackers.lock(); - for query_set in self.suspected_resources.query_sets.drain(..) { let id = query_set.info.id(); - if trackers.query_sets.remove_abandoned(id) { + let is_removed = { + let mut trackers = trackers.lock(); + trackers.query_sets.remove_abandoned(id) + }; + if is_removed { log::info!("QuerySet {:?} is removed from registry", id); // #[cfg(feature = "trace")] // trace.map(|t| t.add(trace::Action::DestroyComputePipeline(id.0))); @@ -848,10 +866,14 @@ impl LifetimeTracker { } let mut pending_callbacks: Vec = Vec::with_capacity(self.ready_to_map.len()); - let mut trackers = trackers.lock(); + for buffer in self.ready_to_map.drain(..) { let buffer_id = buffer.info.id(); - if trackers.buffers.remove_abandoned(buffer_id) { + let is_removed = { + let mut trackers = trackers.lock(); + trackers.buffers.remove_abandoned(buffer_id) + }; + if is_removed { *buffer.map_state.lock() = resource::BufferMapState::Idle; log::info!("Buffer {:?} is removed from registry", buffer_id); if let Some(buf) = hub.buffers.unregister(buffer_id.0) { diff --git a/wgpu-core/src/device/queue.rs b/wgpu-core/src/device/queue.rs index df2d1cda55..05a7feeaff 100644 --- a/wgpu-core/src/device/queue.rs +++ b/wgpu-core/src/device/queue.rs @@ -146,6 +146,10 @@ impl EncoderInFlight { /// time the user submits a wgpu command buffer, ahead of the user's /// commands. /// +/// Important: +/// When locking pending_writes be sure that tracker is not locked +/// and try to lock trackers for the minimum timespan possible +/// /// All uses of [`StagingBuffer`]s end up here. #[derive(Debug)] pub(crate) struct PendingWrites { @@ -545,9 +549,9 @@ impl Global { ) -> Result<(), QueueWriteError> { let hub = A::hub(self); - let mut trackers = device.trackers.lock(); let (dst, transition) = { let buffer_guard = hub.buffers.read(); + let mut trackers = device.trackers.lock(); trackers .buffers .set_single(&buffer_guard, buffer_id, hal::BufferUses::COPY_DST) @@ -706,7 +710,6 @@ impl Global { (size.depth_or_array_layers - 1) * block_rows_per_image + height_blocks; let stage_size = stage_bytes_per_row as u64 * block_rows_in_copy as u64; - let mut trackers = device.trackers.lock(); let mut pending_writes = device.pending_writes.write(); let pending_writes = pending_writes.as_mut().unwrap(); let encoder = pending_writes.activate(); @@ -732,6 +735,7 @@ impl Global { .drain(init_layer_range) .collect::>>() { + let mut trackers = device.trackers.lock(); let texture_guard = hub.textures.read(); crate::command::clear_texture( &*texture_guard, @@ -758,16 +762,6 @@ impl Global { // call above. Since we've held `texture_guard` the whole time, we know // the texture hasn't gone away in the mean time, so we can unwrap. let dst = hub.textures.get(destination.texture).unwrap(); - let transition = trackers - .textures - .set_single( - &dst, - destination.texture, - selector, - hal::TextureUses::COPY_DST, - ) - .ok_or(TransferError::InvalidTexture(destination.texture))?; - dst.info .use_at(device.active_submission_index.load(Ordering::Relaxed) + 1); @@ -848,6 +842,16 @@ impl Global { usage: hal::BufferUses::MAP_WRITE..hal::BufferUses::COPY_SRC, }; + let mut trackers = device.trackers.lock(); + let transition = trackers + .textures + .set_single( + &dst, + destination.texture, + selector, + hal::TextureUses::COPY_DST, + ) + .ok_or(TransferError::InvalidTexture(destination.texture))?; unsafe { encoder.transition_textures(transition.map(|pending| pending.into_hal(&dst))); encoder.transition_buffers(iter::once(barrier)); @@ -979,7 +983,6 @@ impl Global { let (selector, dst_base) = extract_texture_selector(&destination.to_untagged(), &size, &dst)?; - let mut trackers = device.trackers.lock(); let mut pending_writes = device.pending_writes.write(); let encoder = pending_writes.as_mut().unwrap().activate(); @@ -1004,6 +1007,7 @@ impl Global { .drain(init_layer_range) .collect::>>() { + let mut trackers = device.trackers.lock(); crate::command::clear_texture( &*texture_guard, id::Valid(destination.texture), @@ -1023,17 +1027,6 @@ impl Global { .drain(init_layer_range); } } - - let transitions = trackers - .textures - .set_single( - &dst, - destination.texture, - selector, - hal::TextureUses::COPY_DST, - ) - .ok_or(TransferError::InvalidTexture(destination.texture))?; - dst.info .use_at(device.active_submission_index.load(Ordering::Relaxed) + 1); @@ -1056,6 +1049,16 @@ impl Global { }; unsafe { + let mut trackers = device.trackers.lock(); + let transitions = trackers + .textures + .set_single( + &dst, + destination.texture, + selector, + hal::TextureUses::COPY_DST, + ) + .ok_or(TransferError::InvalidTexture(destination.texture))?; encoder.transition_textures(transitions.map(|pending| pending.into_hal(&dst))); encoder.copy_external_image_to_texture( source, diff --git a/wgpu-core/src/device/resource.rs b/wgpu-core/src/device/resource.rs index cb9e0e0bec..56f5f20d08 100644 --- a/wgpu-core/src/device/resource.rs +++ b/wgpu-core/src/device/resource.rs @@ -74,6 +74,9 @@ use super::{ /// triage_suspected locks Device::trackers, and calls... /// Registry::unregister locks Registry::storage /// +/// Important: +/// When locking pending_writes please check that trackers is not locked +/// trackers should be locked only when needed for the shortest time possible pub struct Device { raw: Option, pub(crate) adapter: Arc>, diff --git a/wgpu-core/src/hub.rs b/wgpu-core/src/hub.rs index 214d521b1e..39af2ef086 100644 --- a/wgpu-core/src/hub.rs +++ b/wgpu-core/src/hub.rs @@ -163,7 +163,7 @@ use crate::{ storage::{Element, Storage, StorageReport}, }; -use std::{fmt::Debug, marker::PhantomData}; +use std::fmt::Debug; #[derive(Debug)] pub struct HubReport { From e21960417bd907120ddd2bdea62e0f4905a6be90 Mon Sep 17 00:00:00 2001 From: Mauro Gentile <62186646+gents83@users.noreply.github.com> Date: Sat, 17 Jun 2023 10:50:52 +0200 Subject: [PATCH 056/132] Adding additional doc --- wgpu-core/src/device/resource.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/wgpu-core/src/device/resource.rs b/wgpu-core/src/device/resource.rs index 56f5f20d08..6eee7060f5 100644 --- a/wgpu-core/src/device/resource.rs +++ b/wgpu-core/src/device/resource.rs @@ -92,6 +92,7 @@ pub struct Device { /// All live resources allocated with this [`Device`]. /// /// Has to be locked temporarily only (locked last) + /// and never before pending_writes pub(crate) trackers: Mutex>, // Life tracker should be locked right after the device and before anything else. life_tracker: Mutex>, From 2ce35f15d819a6f491759c2d765c2dc013998b78 Mon Sep 17 00:00:00 2001 From: Mauro Gentile <62186646+gents83@users.noreply.github.com> Date: Sat, 17 Jun 2023 12:01:12 +0200 Subject: [PATCH 057/132] Adding deadlock detection in tests --- Cargo.lock | 37 ++++++++++++++++++++++++++++++++++--- tests/Cargo.toml | 1 + tests/src/lib.rs | 32 +++++++++++++++++++++++++++++++- 3 files changed, 66 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3b3e44c544..265c09ebc1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -782,6 +782,12 @@ dependencies = [ "simd-adler32", ] +[[package]] +name = "fixedbitset" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37ab347416e802de484e4d03c7316c48f1ecb56574dfd4a46a80f173ce1de04d" + [[package]] name = "fixedbitset" version = "0.4.2" @@ -1533,7 +1539,7 @@ dependencies = [ "indexmap", "log", "num-traits 0.2.15", - "petgraph", + "petgraph 0.6.3", "pp-rs", "rustc-hash", "serde", @@ -1840,11 +1846,14 @@ version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "60a2cfe6f0ad2bfc16aefa463b497d5c7a5ecd44a23efa72aa342d90177356dc" dependencies = [ + "backtrace", "cfg-if", "instant", "libc", + "petgraph 0.5.1", "redox_syscall 0.2.16", "smallvec", + "thread-id", "winapi", ] @@ -1873,13 +1882,23 @@ version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9b2a4787296e9989611394c33f193f676704af1686e70b8f8033ab5ba9a35a94" +[[package]] +name = "petgraph" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "467d164a6de56270bd7c4d070df81d07beace25012d5103ced4e9ff08d6afdb7" +dependencies = [ + "fixedbitset 0.2.0", + "indexmap", +] + [[package]] name = "petgraph" version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4dd7d28ee937e54fe3080c91faa1c3a46c06de6252988a7f4592ba2310ef22a4" dependencies = [ - "fixedbitset", + "fixedbitset 0.4.2", "indexmap", ] @@ -2517,6 +2536,17 @@ dependencies = [ "syn 2.0.18", ] +[[package]] +name = "thread-id" +version = "4.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ee93aa2b8331c0fec9091548843f2c90019571814057da3b783f9de09349d73" +dependencies = [ + "libc", + "redox_syscall 0.2.16", + "winapi", +] + [[package]] name = "tiny-skia" version = "0.7.0" @@ -3286,6 +3316,7 @@ dependencies = [ "log", "naga", "nv-flip", + "parking_lot 0.11.2", "png", "pollster", "raw-window-handle 0.5.2", @@ -3633,4 +3664,4 @@ dependencies = [ name = "xml-rs" version = "0.8.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52839dc911083a8ef63efa4d039d1f58b5e409f923e44c80828f206f66e5541c" \ No newline at end of file +checksum = "52839dc911083a8ef63efa4d039d1f58b5e409f923e44c80828f206f66e5541c" diff --git a/tests/Cargo.toml b/tests/Cargo.toml index 5ab713ddd9..ceb67ebf1d 100644 --- a/tests/Cargo.toml +++ b/tests/Cargo.toml @@ -30,6 +30,7 @@ wgt.workspace = true [target.'cfg(not(target_arch = "wasm32"))'.dependencies] nv-flip.workspace = true +parking_lot = { workspace = true, features = ["deadlock_detection"] } [target.'cfg(target_arch = "wasm32")'.dependencies] console_log.workspace = true diff --git a/tests/src/lib.rs b/tests/src/lib.rs index b8a0fb2443..274b8b1936 100644 --- a/tests/src/lib.rs +++ b/tests/src/lib.rs @@ -1,7 +1,10 @@ //! This module contains common test-only code that needs to be shared between the examples and the tests. #![allow(dead_code)] // This module is used in a lot of contexts and only parts of it will be used -use std::panic::{catch_unwind, AssertUnwindSafe}; +use std::{ + panic::{catch_unwind, AssertUnwindSafe}, + sync::{atomic::AtomicBool, Arc}, +}; use wgpu::{Adapter, Device, DownlevelFlags, Instance, Queue, Surface}; use wgt::{Backends, DeviceDescriptor, DownlevelCapabilities, Features, Limits}; @@ -295,7 +298,34 @@ pub fn initialize_test(parameters: TestParameters, test_function: impl FnOnce(Te return; } + let is_running = Arc::new(AtomicBool::new(true)); + #[cfg(not(target_arch = "wasm32"))] + { + let is_running = is_running.clone(); + // Create a background thread which checks for deadlocks every 3 seconds + std::thread::spawn(move || loop { + if is_running.load(std::sync::atomic::Ordering::Relaxed) { + break; + } + std::thread::sleep(core::time::Duration::from_secs(3)); + let deadlocks = parking_lot::deadlock::check_deadlock(); + if deadlocks.is_empty() { + continue; + } + is_running.store(false, std::sync::atomic::Ordering::Relaxed); + println!("{} deadlocks detected", deadlocks.len()); + for (i, threads) in deadlocks.iter().enumerate() { + println!("Deadlock #{}", i); + for t in threads { + println!("Thread Id {:#?}", t.thread_id()); + println!("{:#?}", t.backtrace()); + } + } + }); + } + let panicked = catch_unwind(AssertUnwindSafe(|| test_function(context))).is_err(); + is_running.store(false, std::sync::atomic::Ordering::Relaxed); cfg_if::cfg_if!( if #[cfg(any(not(target_arch = "wasm32"), target_os = "emscripten"))] { let canary_set = wgpu::hal::VALIDATION_CANARY.get_and_reset(); From c6e9de8dd1c938ecf0bf4ee6dd44b75336b8c9f1 Mon Sep 17 00:00:00 2001 From: Mauro Gentile <62186646+gents83@users.noreply.github.com> Date: Sat, 17 Jun 2023 12:21:25 +0200 Subject: [PATCH 058/132] Fixed wrong check --- tests/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/src/lib.rs b/tests/src/lib.rs index 274b8b1936..ca94e1c92c 100644 --- a/tests/src/lib.rs +++ b/tests/src/lib.rs @@ -304,7 +304,7 @@ pub fn initialize_test(parameters: TestParameters, test_function: impl FnOnce(Te let is_running = is_running.clone(); // Create a background thread which checks for deadlocks every 3 seconds std::thread::spawn(move || loop { - if is_running.load(std::sync::atomic::Ordering::Relaxed) { + if !is_running.load(std::sync::atomic::Ordering::Relaxed) { break; } std::thread::sleep(core::time::Duration::from_secs(3)); From 7469572bcb27cae563ebb6ad379f12203d1bf631 Mon Sep 17 00:00:00 2001 From: Mauro Gentile <62186646+gents83@users.noreply.github.com> Date: Sat, 24 Jun 2023 11:57:07 +0200 Subject: [PATCH 059/132] Updating changelog --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 94acb283f6..adb0414f04 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -45,6 +45,8 @@ Bottom level categories: #### Misc Breaking Changes - Change `AdapterInfo::{device,vendor}` to be `u32` instead of `usize`. By @ameknite in [#3760](https://github.com/gfx-rs/wgpu/pull/3760) +- Arcanization of wgpu core resources: Removed 'Token' and 'LifeTime' related management, +removed 'RefCount' and 'MultiRefCount' in favour of using only 'Arc' internal reference count, removing mut from resources and added instead internal members locks on demand or atomics operations, resources now implement Drop and destroy stuff when last 'Arc' resources is released, resources hold an 'Arc' in order to be able to implement Drop, resources have an utility to retrieve the id of the resource itself, removed all guards and just retrive the 'Arc' needed on-demand to unlock registry of resources asap removing locking from hot paths. By @gents83 in [#3626](https://github.com/gfx-rs/wgpu/pull/3626) and tnx also to @jimblandy #### DX12 From f2ca336464e0c7f06309f466ca869c1659d7289f Mon Sep 17 00:00:00 2001 From: Mauro Gentile Date: Fri, 7 Jul 2023 17:25:04 +0200 Subject: [PATCH 060/132] Using mutex on pending_writes --- Cargo.lock | 2 +- wgpu-core/src/device/global.rs | 16 +++-- wgpu-core/src/device/queue.rs | 112 +++++++++++-------------------- wgpu-core/src/device/resource.rs | 8 +-- wgpu-core/src/resource.rs | 4 +- 5 files changed, 58 insertions(+), 84 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index dc1e88a36f..220ca4896a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1471,7 +1471,7 @@ name = "metal" version = "0.25.0" source = "git+https://github.com/gfx-rs/metal-rs.git?rev=a6a0446#a6a04463db388e8fd3e99095ab4fbb87cbe9d69c" dependencies = [ - "bitflags 2.3.1", + "bitflags 2.3.2", "block", "core-graphics-types", "foreign-types 0.5.0", diff --git a/wgpu-core/src/device/global.rs b/wgpu-core/src/device/global.rs index 2d215fdfd2..b7439e082a 100644 --- a/wgpu-core/src/device/global.rs +++ b/wgpu-core/src/device/global.rs @@ -470,7 +470,7 @@ impl Global { } let temp = queue::TempResource::Buffer(buffer.clone()); - let mut pending_writes = device.pending_writes.write(); + let mut pending_writes = device.pending_writes.lock(); let pending_writes = pending_writes.as_mut().unwrap(); if pending_writes.dst_buffers.contains_key(&buffer_id) { pending_writes.temp_resources.push(temp); @@ -514,7 +514,7 @@ impl Global { { if device .pending_writes - .read() + .lock() .as_ref() .unwrap() .dst_buffers @@ -751,7 +751,7 @@ impl Global { resource::TextureInner::Native { ref raw } => { if !raw.is_none() { let temp = queue::TempResource::Texture(texture.clone(), clear_views); - let mut pending_writes = device.pending_writes.write(); + let mut pending_writes = device.pending_writes.lock(); let pending_writes = pending_writes.as_mut().unwrap(); if pending_writes.dst_textures.contains_key(&texture_id) { pending_writes.temp_resources.push(temp); @@ -798,7 +798,7 @@ impl Global { let mut life_lock = device.lock_life(); if device .pending_writes - .read() + .lock() .as_ref() .unwrap() .dst_textures @@ -2231,7 +2231,11 @@ impl Global { // need to wait for submissions or triage them. We know we were // just polled, so `life_tracker.free_resources` is empty. debug_assert!(device.lock_life().queue_empty()); - device.pending_writes.write().as_mut().unwrap().deactivate(); + { + let mut pending_writes = device.pending_writes.lock(); + let pending_writes = pending_writes.as_mut().unwrap(); + pending_writes.deactivate(); + } // Adapter is only referenced by the device and itself. // This isn't a robust way to destroy them, we should find a better one. @@ -2485,7 +2489,7 @@ impl Global { buffer: raw_buf, usage: hal::BufferUses::empty()..hal::BufferUses::COPY_DST, }; - let mut pending_writes = device.pending_writes.write(); + let mut pending_writes = device.pending_writes.lock(); let pending_writes = pending_writes.as_mut().unwrap(); let encoder = pending_writes.activate(); unsafe { diff --git a/wgpu-core/src/device/queue.rs b/wgpu-core/src/device/queue.rs index c1e993e824..a23306843b 100644 --- a/wgpu-core/src/device/queue.rs +++ b/wgpu-core/src/device/queue.rs @@ -401,34 +401,27 @@ impl Global { // freed, even if an error occurs. All paths from here must call // `device.pending_writes.consume`. let (staging_buffer, staging_buffer_ptr) = prepare_staging_buffer(&device, data_size)?; + let mut pending_writes = device.pending_writes.lock(); + let pending_writes = pending_writes.as_mut().unwrap(); if let Err(flush_error) = unsafe { profiling::scope!("copy"); ptr::copy_nonoverlapping(data.as_ptr(), staging_buffer_ptr, data.len()); staging_buffer.flush(device.raw()) } { - device - .pending_writes - .write() - .as_mut() - .unwrap() - .consume(&device, Arc::new(staging_buffer)); + pending_writes.consume(&device, Arc::new(staging_buffer)); return Err(flush_error.into()); } let result = self.queue_write_staging_buffer_impl( &device, + pending_writes, &staging_buffer, buffer_id, buffer_offset, ); - device - .pending_writes - .write() - .as_mut() - .unwrap() - .consume(&device, Arc::new(staging_buffer)); + pending_writes.consume(&device, Arc::new(staging_buffer)); result } @@ -478,34 +471,27 @@ impl Global { ))); } let staging_buffer = staging_buffer.unwrap(); + let mut pending_writes = device.pending_writes.lock(); + let pending_writes = pending_writes.as_mut().unwrap(); // At this point, we have taken ownership of the staging_buffer from the // user. Platform validation requires that the staging buffer always // be freed, even if an error occurs. All paths from here must call // `device.pending_writes.consume`. if let Err(flush_error) = unsafe { staging_buffer.flush(device.raw()) } { - device - .pending_writes - .write() - .as_mut() - .unwrap() - .consume(&device, staging_buffer); + pending_writes.consume(&device, staging_buffer); return Err(flush_error.into()); } let result = self.queue_write_staging_buffer_impl( &device, + pending_writes, &staging_buffer, buffer_id, buffer_offset, ); - device - .pending_writes - .write() - .as_mut() - .unwrap() - .consume(&device, staging_buffer); + pending_writes.consume(&device, staging_buffer); result } @@ -563,6 +549,7 @@ impl Global { fn queue_write_staging_buffer_impl( &self, device: &Device, + pending_writes: &mut PendingWrites, staging_buffer: &StagingBuffer, buffer_id: id::BufferId, buffer_offset: u64, @@ -599,8 +586,6 @@ impl Global { usage: hal::BufferUses::MAP_WRITE..hal::BufferUses::COPY_SRC, }) .chain(transition.map(|pending| pending.into_hal(&dst))); - let mut pending_writes = device.pending_writes.write(); - let pending_writes = pending_writes.as_mut().unwrap(); let encoder = pending_writes.activate(); unsafe { encoder.transition_buffers(barriers); @@ -730,7 +715,7 @@ impl Global { (size.depth_or_array_layers - 1) * block_rows_per_image + height_blocks; let stage_size = stage_bytes_per_row as u64 * block_rows_in_copy as u64; - let mut pending_writes = device.pending_writes.write(); + let mut pending_writes = device.pending_writes.lock(); let pending_writes = pending_writes.as_mut().unwrap(); let encoder = pending_writes.activate(); @@ -1003,7 +988,7 @@ impl Global { let (selector, dst_base) = extract_texture_selector(&destination.to_untagged(), &size, &dst)?; - let mut pending_writes = device.pending_writes.write(); + let mut pending_writes = device.pending_writes.lock(); let encoder = pending_writes.as_mut().unwrap().activate(); // If the copy does not fully cover the layers, we need to initialize to @@ -1382,16 +1367,11 @@ impl Global { { let texture_guard = hub.textures.read(); + let mut pending_writes = device.pending_writes.lock(); + let pending_writes = pending_writes.as_mut().unwrap(); used_surface_textures.set_size(texture_guard.len()); - for (&id, texture) in device - .pending_writes - .read() - .as_ref() - .unwrap() - .dst_textures - .iter() - { + for (&id, texture) in pending_writes.dst_textures.iter() { match *texture.inner.as_ref().unwrap() { TextureInner::Native { raw: None } => { return Err(QueueSubmitError::DestroyedTexture(id)); @@ -1425,53 +1405,43 @@ impl Global { }); unsafe { - device - .pending_writes - .write() - .as_mut() - .unwrap() + pending_writes .command_encoder .transition_textures(texture_barriers); }; } } + } - { - let mut pending_writes = device.pending_writes.write(); - let pending_writes = pending_writes.as_mut().unwrap(); - let refs = pending_writes - .pre_submit() - .into_iter() - .chain( - active_executions - .iter() - .flat_map(|pool_execution| pool_execution.cmd_buffers.iter()), - ) - .collect::>(); - unsafe { - device - .queue - .as_ref() - .unwrap() - .submit(&refs, Some((fence, submit_index))) - .map_err(DeviceError::from)?; - } - } + let mut pending_writes = device.pending_writes.lock(); + let pending_writes = pending_writes.as_mut().unwrap(); + let refs = pending_writes + .pre_submit() + .into_iter() + .chain( + active_executions + .iter() + .flat_map(|pool_execution| pool_execution.cmd_buffers.iter()), + ) + .collect::>(); + unsafe { + device + .queue + .as_ref() + .unwrap() + .submit(&refs, Some((fence, submit_index))) + .map_err(DeviceError::from)?; } profiling::scope!("cleanup"); - if let Some(pending_execution) = - device.pending_writes.write().as_mut().unwrap().post_submit( - device.command_allocator.lock().as_mut().unwrap(), - device.raw(), - device.queue.as_ref().unwrap(), - ) - { + if let Some(pending_execution) = pending_writes.post_submit( + device.command_allocator.lock().as_mut().unwrap(), + device.raw(), + device.queue.as_ref().unwrap(), + ) { active_executions.push(pending_execution); } - let mut pending_writes = device.pending_writes.write(); - let pending_writes = pending_writes.as_mut().unwrap(); // this will register the new submission to the life time tracker let mut pending_write_resources = mem::take(&mut pending_writes.temp_resources); device.lock_life().track_submission( diff --git a/wgpu-core/src/device/resource.rs b/wgpu-core/src/device/resource.rs index 6eee7060f5..b96b5dfab6 100644 --- a/wgpu-core/src/device/resource.rs +++ b/wgpu-core/src/device/resource.rs @@ -103,7 +103,7 @@ pub struct Device { pub(crate) limits: wgt::Limits, pub(crate) features: wgt::Features, pub(crate) downlevel: wgt::DownlevelCapabilities, - pub(crate) pending_writes: RwLock>>, + pub(crate) pending_writes: Mutex>>, #[cfg(feature = "trace")] pub(crate) trace: Mutex>, } @@ -123,7 +123,7 @@ impl Drop for Device { fn drop(&mut self) { log::info!("Destroying Device {:?}", self.info.label()); let raw = self.raw.take().unwrap(); - let pending_writes = self.pending_writes.write().take().unwrap(); + let pending_writes = self.pending_writes.lock().take().unwrap(); pending_writes.dispose(&raw); self.command_allocator.lock().take().unwrap().dispose(&raw); unsafe { @@ -250,7 +250,7 @@ impl Device { limits: desc.limits.clone(), features: desc.features, downlevel, - pending_writes: RwLock::new(Some(pending_writes)), + pending_writes: Mutex::new(Some(pending_writes)), }) } @@ -3083,7 +3083,7 @@ impl Device { /// Wait for idle and remove resources that we can, before we die. pub(crate) fn prepare_to_die(&self) { - self.pending_writes.write().as_mut().unwrap().deactivate(); + self.pending_writes.lock().as_mut().unwrap().deactivate(); let mut life_tracker = self.lock_life(); let current_index = self.active_submission_index.load(Ordering::Relaxed); if let Err(error) = unsafe { diff --git a/wgpu-core/src/resource.rs b/wgpu-core/src/resource.rs index 11a1f28899..a2b2f33438 100644 --- a/wgpu-core/src/resource.rs +++ b/wgpu-core/src/resource.rs @@ -189,7 +189,7 @@ pub(crate) enum BufferMapState { not(target_feature = "atomics") ) ))] -unsafe impl Send for BufferMapState {} +unsafe impl Send for BufferMapState {} #[cfg(any( not(target_arch = "wasm32"), all( @@ -197,7 +197,7 @@ unsafe impl Send for BufferMapState {} not(target_feature = "atomics") ) ))] -unsafe impl Sync for BufferMapState {} +unsafe impl Sync for BufferMapState {} #[repr(C)] pub struct BufferMapCallbackC { From 9243a926dd79b96bdc82d95ac49d5feafa4f0676 Mon Sep 17 00:00:00 2001 From: Mauro Gentile Date: Fri, 7 Jul 2023 18:46:51 +0200 Subject: [PATCH 061/132] Pending writes locked before textures transitions --- wgpu-core/src/device/queue.rs | 83 +++++++++++++++++------------------ 1 file changed, 41 insertions(+), 42 deletions(-) diff --git a/wgpu-core/src/device/queue.rs b/wgpu-core/src/device/queue.rs index a23306843b..de2babf414 100644 --- a/wgpu-core/src/device/queue.rs +++ b/wgpu-core/src/device/queue.rs @@ -1364,57 +1364,56 @@ impl Global { log::trace!("Device after submission {}", submit_index); } + } - { - let texture_guard = hub.textures.read(); - let mut pending_writes = device.pending_writes.lock(); - let pending_writes = pending_writes.as_mut().unwrap(); - - used_surface_textures.set_size(texture_guard.len()); - for (&id, texture) in pending_writes.dst_textures.iter() { - match *texture.inner.as_ref().unwrap() { - TextureInner::Native { raw: None } => { - return Err(QueueSubmitError::DestroyedTexture(id)); - } - TextureInner::Native { raw: Some(_) } => {} - TextureInner::Surface { ref has_work, .. } => { - has_work.store(true, Ordering::Relaxed); - unsafe { - used_surface_textures - .merge_single( - &*texture_guard, - id::Valid(id), - None, - hal::TextureUses::PRESENT, - ) - .unwrap() - }; - } + let mut pending_writes = device.pending_writes.lock(); + let pending_writes = pending_writes.as_mut().unwrap(); + + { + let texture_guard = hub.textures.read(); + + used_surface_textures.set_size(texture_guard.len()); + for (&id, texture) in pending_writes.dst_textures.iter() { + match *texture.inner.as_ref().unwrap() { + TextureInner::Native { raw: None } => { + return Err(QueueSubmitError::DestroyedTexture(id)); + } + TextureInner::Native { raw: Some(_) } => {} + TextureInner::Surface { ref has_work, .. } => { + has_work.store(true, Ordering::Relaxed); + unsafe { + used_surface_textures + .merge_single( + &*texture_guard, + id::Valid(id), + None, + hal::TextureUses::PRESENT, + ) + .unwrap() + }; } } + } - if !used_surface_textures.is_empty() { - let mut trackers = device.trackers.lock(); + if !used_surface_textures.is_empty() { + let mut trackers = device.trackers.lock(); - trackers - .textures - .set_from_usage_scope(&*texture_guard, &used_surface_textures); - let texture_barriers = trackers.textures.drain().map(|pending| { - let tex = unsafe { texture_guard.get_unchecked(pending.id) }; - pending.into_hal(tex) - }); + trackers + .textures + .set_from_usage_scope(&*texture_guard, &used_surface_textures); + let texture_barriers = trackers.textures.drain().map(|pending| { + let tex = unsafe { texture_guard.get_unchecked(pending.id) }; + pending.into_hal(tex) + }); - unsafe { - pending_writes - .command_encoder - .transition_textures(texture_barriers); - }; - } + unsafe { + pending_writes + .command_encoder + .transition_textures(texture_barriers); + }; } } - let mut pending_writes = device.pending_writes.lock(); - let pending_writes = pending_writes.as_mut().unwrap(); let refs = pending_writes .pre_submit() .into_iter() From 52cf370c855a68103c28ecca80fc506da0526989 Mon Sep 17 00:00:00 2001 From: Mauro Gentile Date: Sat, 15 Jul 2023 10:59:24 +0200 Subject: [PATCH 062/132] Fixing merge integration resolve conflicts --- wgpu-core/src/device/global.rs | 22 +++++++--------------- wgpu-core/src/device/resource.rs | 19 ++++++++----------- 2 files changed, 15 insertions(+), 26 deletions(-) diff --git a/wgpu-core/src/device/global.rs b/wgpu-core/src/device/global.rs index c9c614406e..16c2620ecf 100644 --- a/wgpu-core/src/device/global.rs +++ b/wgpu-core/src/device/global.rs @@ -732,11 +732,10 @@ impl Global { profiling::scope!("Device::create_buffer"); let hub = A::hub(self); - let mut token = Token::root(); let fid = hub.buffers.prepare(id_in); - let (device_guard, mut token) = hub.devices.read(&mut token); let error = loop { + let device_guard = hub.devices.read(); let device = match device_guard.get(device_id) { Ok(device) => device, Err(_) => break DeviceError::Invalid.into(), @@ -745,32 +744,25 @@ impl Global { // NB: Any change done through the raw buffer handle will not be // recorded in the replay #[cfg(feature = "trace")] - if let Some(ref trace) = device.trace { - trace - .lock() - .add(trace::Action::CreateBuffer(fid.id(), desc.clone())); + if let Some(trace) = device.trace.lock().as_mut() { + trace.add(trace::Action::CreateBuffer(fid.id(), desc.clone())); } - let mut buffer = device.create_buffer_from_hal(hal_buffer, device_id, desc); - - // Assume external buffers are initialized - buffer.initialization_status = crate::init_tracker::BufferInitTracker::new(0); + let buffer = device.create_buffer_from_hal(hal_buffer, device_id, desc); - let ref_count = buffer.life_guard.add_ref(); - - let id = fid.assign(buffer, &mut token); + let (id, buffer) = fid.assign(buffer); log::info!("Created buffer {:?} with {:?}", id, desc); device .trackers .lock() .buffers - .insert_single(id, ref_count, hal::BufferUses::empty()); + .insert_single(id, buffer, hal::BufferUses::empty()); return (id.0, None); }; - let id = fid.assign_error(desc.label.borrow_or_default(), &mut token); + let id = fid.assign_error(desc.label.borrow_or_default()); (id, Some(error)) } diff --git a/wgpu-core/src/device/resource.rs b/wgpu-core/src/device/resource.rs index 61b456a2f8..ea0940d857 100644 --- a/wgpu-core/src/device/resource.rs +++ b/wgpu-core/src/device/resource.rs @@ -526,28 +526,25 @@ impl Device { } pub fn create_buffer_from_hal( - &self, + self: &Arc, hal_buffer: A::Buffer, - self_id: id::DeviceId, + self_id: DeviceId, desc: &resource::BufferDescriptor, ) -> Buffer { debug_assert_eq!(self_id.backend(), A::VARIANT); Buffer { raw: Some(hal_buffer), - device_id: Stored { - value: id::Valid(self_id), - ref_count: self.life_guard.add_ref(), - }, + device: self.clone(), usage: desc.usage, size: desc.size, - initialization_status: BufferInitTracker::new(0), - sync_mapped_writes: None, - map_state: resource::BufferMapState::Idle, - life_guard: LifeGuard::new(desc.label.borrow_or_default()), + initialization_status: RwLock::new(BufferInitTracker::new(0)), + sync_mapped_writes: Mutex::new(None), + map_state: Mutex::new(resource::BufferMapState::Idle), + info: ResourceInfo::new(desc.label.borrow_or_default()), } } - + pub(crate) fn create_texture( self: &Arc, self_id: DeviceId, From 71b2af60bdcdc403dee31a9783d0ae62f5623b49 Mon Sep 17 00:00:00 2001 From: Mauro Gentile Date: Sat, 15 Jul 2023 11:43:00 +0200 Subject: [PATCH 063/132] Fix fmt --- wgpu-hal/src/gles/egl.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/wgpu-hal/src/gles/egl.rs b/wgpu-hal/src/gles/egl.rs index edf98bd8f9..44c98b8226 100644 --- a/wgpu-hal/src/gles/egl.rs +++ b/wgpu-hal/src/gles/egl.rs @@ -756,7 +756,7 @@ impl crate::Instance for Instance { #[cfg(target_os = "emscripten")] let egl1_5: Option<&Arc> = Some(&egl); - + let (display, display_owner, wsi_kind) = if let (Some(library), Some(egl)) = (wayland_library, egl1_5) { log::info!("Using Wayland platform"); From a65bb071b75a00f3e40ee0fdfe736a9c4bf7bc05 Mon Sep 17 00:00:00 2001 From: gents83 Date: Mon, 24 Jul 2023 16:30:23 +0200 Subject: [PATCH 064/132] Adding more info on resource tracking --- wgpu-core/src/track/buffer.rs | 4 +++- wgpu-core/src/track/stateless.rs | 2 ++ wgpu-core/src/track/texture.rs | 4 +++- 3 files changed, 8 insertions(+), 2 deletions(-) diff --git a/wgpu-core/src/track/buffer.rs b/wgpu-core/src/track/buffer.rs index a137db8d6c..1eb2406068 100644 --- a/wgpu-core/src/track/buffer.rs +++ b/wgpu-core/src/track/buffer.rs @@ -11,7 +11,7 @@ use super::PendingTransition; use crate::{ hal_api::HalApi, id::{BufferId, TypedId, Valid}, - resource::Buffer, + resource::{Buffer, Resource}, storage::Storage, track::{ invalid_resource_state, skip_barrier, ResourceMetadata, ResourceMetadataProvider, @@ -554,6 +554,8 @@ impl BufferTracker { if existing_ref_count <= 3 { self.metadata.remove(index); return true; + } else { + log::info!("{:?} is still referenced from {}", self.metadata.get_resource_unchecked(index).as_info().label(), existing_ref_count); } } } diff --git a/wgpu-core/src/track/stateless.rs b/wgpu-core/src/track/stateless.rs index b003ea9e17..4916fac57f 100644 --- a/wgpu-core/src/track/stateless.rs +++ b/wgpu-core/src/track/stateless.rs @@ -186,6 +186,8 @@ impl> StatelessTracker { if existing_ref_count <= 3 { self.metadata.remove(index); return true; + } else { + log::info!("{:?} is still referenced from {}", self.metadata.get_resource_unchecked(index).label(), existing_ref_count); } } } diff --git a/wgpu-core/src/track/texture.rs b/wgpu-core/src/track/texture.rs index a2a2111ec0..a7f952d979 100644 --- a/wgpu-core/src/track/texture.rs +++ b/wgpu-core/src/track/texture.rs @@ -23,7 +23,7 @@ use super::{range::RangedStates, PendingTransition}; use crate::{ hal_api::HalApi, id::{TextureId, TypedId, Valid}, - resource::Texture, + resource::{Texture, Resource}, storage::Storage, track::{ invalid_resource_state, skip_barrier, ResourceMetadata, ResourceMetadataProvider, @@ -720,6 +720,8 @@ impl TextureTracker { self.end_set.complex.remove(&index); self.metadata.remove(index); return true; + } else { + log::info!("{:?} is still referenced from {}", self.metadata.get_resource_unchecked(index).as_info().label(), existing_ref_count); } } } From 633035f1708601483714f51868ac10ec5c7e9f18 Mon Sep 17 00:00:00 2001 From: gents83 Date: Mon, 24 Jul 2023 16:36:33 +0200 Subject: [PATCH 065/132] Fix fmt --- wgpu-core/src/track/buffer.rs | 9 ++++++++- wgpu-core/src/track/stateless.rs | 6 +++++- wgpu-core/src/track/texture.rs | 11 +++++++++-- 3 files changed, 22 insertions(+), 4 deletions(-) diff --git a/wgpu-core/src/track/buffer.rs b/wgpu-core/src/track/buffer.rs index 1eb2406068..96e3cbb768 100644 --- a/wgpu-core/src/track/buffer.rs +++ b/wgpu-core/src/track/buffer.rs @@ -555,7 +555,14 @@ impl BufferTracker { self.metadata.remove(index); return true; } else { - log::info!("{:?} is still referenced from {}", self.metadata.get_resource_unchecked(index).as_info().label(), existing_ref_count); + log::info!( + "{:?} is still referenced from {}", + self.metadata + .get_resource_unchecked(index) + .as_info() + .label(), + existing_ref_count + ); } } } diff --git a/wgpu-core/src/track/stateless.rs b/wgpu-core/src/track/stateless.rs index 4916fac57f..61f23faf9f 100644 --- a/wgpu-core/src/track/stateless.rs +++ b/wgpu-core/src/track/stateless.rs @@ -187,7 +187,11 @@ impl> StatelessTracker { self.metadata.remove(index); return true; } else { - log::info!("{:?} is still referenced from {}", self.metadata.get_resource_unchecked(index).label(), existing_ref_count); + log::info!( + "{:?} is still referenced from {}", + self.metadata.get_resource_unchecked(index).label(), + existing_ref_count + ); } } } diff --git a/wgpu-core/src/track/texture.rs b/wgpu-core/src/track/texture.rs index a7f952d979..ebff03c9e0 100644 --- a/wgpu-core/src/track/texture.rs +++ b/wgpu-core/src/track/texture.rs @@ -23,7 +23,7 @@ use super::{range::RangedStates, PendingTransition}; use crate::{ hal_api::HalApi, id::{TextureId, TypedId, Valid}, - resource::{Texture, Resource}, + resource::{Resource, Texture}, storage::Storage, track::{ invalid_resource_state, skip_barrier, ResourceMetadata, ResourceMetadataProvider, @@ -721,7 +721,14 @@ impl TextureTracker { self.metadata.remove(index); return true; } else { - log::info!("{:?} is still referenced from {}", self.metadata.get_resource_unchecked(index).as_info().label(), existing_ref_count); + log::info!( + "{:?} is still referenced from {}", + self.metadata + .get_resource_unchecked(index) + .as_info() + .label(), + existing_ref_count + ); } } } From 94801a5fd65455f7a57f7788c311a8ce0c708097 Mon Sep 17 00:00:00 2001 From: gents83 Date: Mon, 24 Jul 2023 16:51:23 +0200 Subject: [PATCH 066/132] Aligning to workspace --- Cargo.lock | 36 ++++++++++++++++-------------------- wgpu-core/Cargo.toml | 4 +--- wgpu-hal/Cargo.toml | 8 ++------ 3 files changed, 19 insertions(+), 29 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 77d22f9751..59e4fc2b7f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -866,12 +866,6 @@ dependencies = [ "simd-adler32", ] -[[package]] -name = "fixedbitset" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37ab347416e802de484e4d03c7316c48f1ecb56574dfd4a46a80f173ce1de04d" - [[package]] name = "fixedbitset" version = "0.4.2" @@ -1931,7 +1925,7 @@ version = "0.3.45" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "221d488cd70617f1bd599ed8ceb659df2147d9393717954d82a0f5e8032a6ab1" dependencies = [ - "redox_syscall", + "redox_syscall 0.3.5", ] [[package]] @@ -1980,10 +1974,13 @@ version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "93f00c865fe7cabf650081affecd3871070f26767e7b2070a3ffae14c654b447" dependencies = [ + "backtrace", "cfg-if", "libc", - "redox_syscall", + "petgraph", + "redox_syscall 0.3.5", "smallvec", + "thread-id", "windows-targets 0.48.1", ] @@ -1999,16 +1996,6 @@ version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9b2a4787296e9989611394c33f193f676704af1686e70b8f8033ab5ba9a35a94" -[[package]] -name = "petgraph" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "467d164a6de56270bd7c4d070df81d07beace25012d5103ced4e9ff08d6afdb7" -dependencies = [ - "fixedbitset 0.2.0", - "indexmap", -] - [[package]] name = "petgraph" version = "0.6.3" @@ -2242,6 +2229,15 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f2ff9a1f06a88b01621b7ae906ef0211290d1c8a168a15542486a8f61c0833b9" +[[package]] +name = "redox_syscall" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a" +dependencies = [ + "bitflags 1.3.2", +] + [[package]] name = "redox_syscall" version = "0.3.5" @@ -3487,7 +3483,7 @@ dependencies = [ "log", "naga", "nv-flip", - "parking_lot 0.11.2", + "parking_lot", "png", "pollster", "raw-window-handle 0.5.2", @@ -3824,7 +3820,7 @@ dependencies = [ "orbclient", "percent-encoding", "raw-window-handle 0.5.2", - "redox_syscall", + "redox_syscall 0.3.5", "sctk-adwaita 0.5.4", "smithay-client-toolkit", "wasm-bindgen", diff --git a/wgpu-core/Cargo.toml b/wgpu-core/Cargo.toml index dc5352979e..05a9443a4c 100644 --- a/wgpu-core/Cargo.toml +++ b/wgpu-core/Cargo.toml @@ -71,9 +71,7 @@ smallvec = "1" thiserror = "1" [dependencies.naga] -git = "https://github.com/gfx-rs/naga" -rev = "bac2d82a430fbfcf100ee22b7c3bc12f3d593079" -version = "0.13.0" +workspace = true features = ["clone", "span", "validate"] [dependencies.wgt] diff --git a/wgpu-hal/Cargo.toml b/wgpu-hal/Cargo.toml index 550752eb37..d8e8b9a6dc 100644 --- a/wgpu-hal/Cargo.toml +++ b/wgpu-hal/Cargo.toml @@ -119,16 +119,12 @@ libc = "0.2" android_system_properties = "0.1.1" [dependencies.naga] -git = "https://github.com/gfx-rs/naga" -rev = "bac2d82a430fbfcf100ee22b7c3bc12f3d593079" -version = "0.13.0" +workspace = true features = ["clone"] # DEV dependencies [dev-dependencies.naga] -git = "https://github.com/gfx-rs/naga" -rev = "bac2d82a430fbfcf100ee22b7c3bc12f3d593079" -version = "0.13.0" +workspace = true features = ["wgsl-in"] [dev-dependencies] From 6babd06dafc32bd70db36d107fb517bb7b63f35a Mon Sep 17 00:00:00 2001 From: gents83 Date: Thu, 27 Jul 2023 19:42:37 +0200 Subject: [PATCH 067/132] Clearing temp_suspected once moved to suspected --- wgpu-core/src/device/resource.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/wgpu-core/src/device/resource.rs b/wgpu-core/src/device/resource.rs index ea0940d857..d6c56a780d 100644 --- a/wgpu-core/src/device/resource.rs +++ b/wgpu-core/src/device/resource.rs @@ -289,6 +289,7 @@ impl Device { life_tracker .suspected_resources .extend(&self.temp_suspected.lock()); + self.temp_suspected.lock().clear(); life_tracker.triage_suspected( hub, From d5943e7354ed4a32cdfb6f9f108ce21ce0586c29 Mon Sep 17 00:00:00 2001 From: gents83 Date: Mon, 31 Jul 2023 09:07:00 +0200 Subject: [PATCH 068/132] Restoring Action::CreateTexture for Player --- deno_webgpu/texture.rs | 2 +- player/src/lib.rs | 4 +- player/tests/data/clear-buffer-texture.ron | 2 +- player/tests/data/quad.ron | 7 +- .../tests/data/zero-init-texture-binding.ron | 16 +++- .../data/zero-init-texture-copytobuffer.ron | 2 +- .../data/zero-init-texture-rendertarget.ron | 7 +- wgpu-core/src/command/clear.rs | 7 +- wgpu-core/src/conv.rs | 30 +++++- wgpu-core/src/device/global.rs | 93 ++++++++----------- wgpu-core/src/device/resource.rs | 37 +------- wgpu-core/src/device/trace.rs | 6 +- wgpu-core/src/present.rs | 41 ++++---- wgpu-core/src/registry.rs | 1 + wgpu-core/src/resource.rs | 28 ++++-- wgpu-hal/src/dx11/mod.rs | 2 +- wgpu-hal/src/dx12/mod.rs | 2 +- wgpu-hal/src/empty.rs | 2 +- wgpu-hal/src/gles/mod.rs | 2 +- wgpu-hal/src/lib.rs | 2 +- wgpu-hal/src/metal/mod.rs | 2 +- wgpu-hal/src/vulkan/mod.rs | 2 +- wgpu/src/backend/direct.rs | 2 +- 23 files changed, 155 insertions(+), 144 deletions(-) diff --git a/deno_webgpu/texture.rs b/deno_webgpu/texture.rs index 5dfdaa848d..92c8457071 100644 --- a/deno_webgpu/texture.rs +++ b/deno_webgpu/texture.rs @@ -83,7 +83,7 @@ pub fn op_webgpu_create_texture( let (val, maybe_err) = gfx_select!(device => instance.device_create_texture( device, &descriptor, - (), Some(()) + () )); let rid = state.resource_table.add(WebGpuTexture { diff --git a/player/src/lib.rs b/player/src/lib.rs index f2395a4b79..910a174abd 100644 --- a/player/src/lib.rs +++ b/player/src/lib.rs @@ -180,9 +180,9 @@ impl GlobalPlay for wgc::global::Global { Action::DestroyBuffer(id) => { self.buffer_drop::(id, true); } - Action::CreateTexture(id, tv_id, desc) => { + Action::CreateTexture(id, desc) => { self.device_maintain_ids::(device).unwrap(); - let (_, error) = self.device_create_texture::(device, &desc, id, tv_id); + let (_, error) = self.device_create_texture::(device, &desc, id); if let Some(e) = error { panic!("{:?}", e); } diff --git a/player/tests/data/clear-buffer-texture.ron b/player/tests/data/clear-buffer-texture.ron index dad6a86175..c6879e31da 100644 --- a/player/tests/data/clear-buffer-texture.ron +++ b/player/tests/data/clear-buffer-texture.ron @@ -20,7 +20,7 @@ ) ], actions: [ - CreateTexture(Id(0, 1, Empty), None, ( + CreateTexture(Id(0, 1, Empty), ( label: Some("Output Texture"), size: ( width: 64, diff --git a/player/tests/data/quad.ron b/player/tests/data/quad.ron index 0a3962f605..563ba24b84 100644 --- a/player/tests/data/quad.ron +++ b/player/tests/data/quad.ron @@ -17,7 +17,7 @@ ), data: "quad.wgsl", ), - CreateTexture(Id(0, 1, Empty), Some(Id(0, 1, Empty)), ( + CreateTexture(Id(0, 1, Empty), ( label: Some("Output Texture"), size: ( width: 64, @@ -30,6 +30,11 @@ usage: 27, view_formats: [], )), + CreateTextureView( + id: Id(0, 1, Empty), + parent_id: Id(0, 1, Empty), + desc: (), + ), CreateBuffer( Id(0, 1, Empty), ( diff --git a/player/tests/data/zero-init-texture-binding.ron b/player/tests/data/zero-init-texture-binding.ron index f526bcf455..17aa3b4279 100644 --- a/player/tests/data/zero-init-texture-binding.ron +++ b/player/tests/data/zero-init-texture-binding.ron @@ -17,7 +17,7 @@ // MISSING: Partial views ], actions: [ - CreateTexture(Id(0, 1, Empty), Some(Id(0, 1, Empty)), ( + CreateTexture(Id(0, 1, Empty), ( label: Some("Sampled Texture"), size: ( width: 64, @@ -30,6 +30,11 @@ usage: 5, // SAMPLED + COPY_SRC view_formats: [], )), + CreateTextureView( + id: Id(0, 1, Empty), + parent_id: Id(0, 1, Empty), + desc: (), + ), CreateBuffer( Id(0, 1, Empty), ( @@ -39,7 +44,7 @@ mapped_at_creation: false, ), ), - CreateTexture(Id(1, 1, Empty), Some(Id(1, 1, Empty)), ( + CreateTexture(Id(1, 1, Empty), ( label: Some("Storage Texture"), size: ( width: 64, @@ -51,7 +56,12 @@ format: "rgba8unorm", usage: 9, // STORAGE + COPY_SRC view_formats: [], - )), + )), + CreateTextureView( + id: Id(1, 1, Empty), + parent_id: Id(1, 1, Empty), + desc: (), + ), CreateBuffer( Id(1, 1, Empty), ( diff --git a/player/tests/data/zero-init-texture-copytobuffer.ron b/player/tests/data/zero-init-texture-copytobuffer.ron index 5ff1cebf90..0bb16ccebb 100644 --- a/player/tests/data/zero-init-texture-copytobuffer.ron +++ b/player/tests/data/zero-init-texture-copytobuffer.ron @@ -10,7 +10,7 @@ // MISSING: Partial copies ], actions: [ - CreateTexture(Id(0, 1, Empty), Some(Id(0, 1, Empty)), ( + CreateTexture(Id(0, 1, Empty), ( label: Some("Copy To Buffer Texture"), size: ( width: 64, diff --git a/player/tests/data/zero-init-texture-rendertarget.ron b/player/tests/data/zero-init-texture-rendertarget.ron index 5f90c72a6b..831af942a2 100644 --- a/player/tests/data/zero-init-texture-rendertarget.ron +++ b/player/tests/data/zero-init-texture-rendertarget.ron @@ -10,7 +10,7 @@ // MISSING: Partial view. ], actions: [ - CreateTexture(Id(0, 1, Empty), Some(Id(0, 1, Empty)), ( + CreateTexture(Id(0, 1, Empty), ( label: Some("Render Target Texture"), size: ( width: 64, @@ -23,6 +23,11 @@ usage: 17, // RENDER_ATTACHMENT + COPY_SRC view_formats: [], )), + CreateTextureView( + id: Id(0, 1, Empty), + parent_id: Id(0, 1, Empty), + desc: (), + ), CreateBuffer( Id(0, 1, Empty), ( diff --git a/wgpu-core/src/command/clear.rs b/wgpu-core/src/command/clear.rs index 76016e46c9..25aa12210a 100644 --- a/wgpu-core/src/command/clear.rs +++ b/wgpu-core/src/command/clear.rs @@ -261,7 +261,9 @@ pub(crate) fn clear_texture( TextureClearMode::RenderPass { is_color: false, .. } => hal::TextureUses::DEPTH_STENCIL_WRITE, - TextureClearMode::RenderPass { is_color: true, .. } => hal::TextureUses::COLOR_TARGET, + TextureClearMode::Surface { .. } | TextureClearMode::RenderPass { is_color: true, .. } => { + hal::TextureUses::COLOR_TARGET + } TextureClearMode::None => { return Err(ClearError::NoValidTextureClearMode(dst_texture_id.0)); } @@ -303,6 +305,9 @@ pub(crate) fn clear_texture( encoder, dst_raw, ), + TextureClearMode::Surface { .. } => { + clear_texture_via_render_passes(dst_texture, range, true, encoder)? + } TextureClearMode::RenderPass { is_color, .. } => { clear_texture_via_render_passes(dst_texture, range, is_color, encoder)? } diff --git a/wgpu-core/src/conv.rs b/wgpu-core/src/conv.rs index 75a97eb087..c8edbc98f4 100644 --- a/wgpu-core/src/conv.rs +++ b/wgpu-core/src/conv.rs @@ -1,4 +1,6 @@ -use crate::resource; +use wgt::TextureFormatFeatures; + +use crate::resource::{self, TextureDescriptor}; pub fn is_power_of_two_u16(val: u16) -> bool { val != 0 && (val & (val - 1)) == 0 @@ -131,6 +133,32 @@ pub fn map_texture_usage( u } +pub fn map_texture_usage_for_texture( + desc: &TextureDescriptor, + format_features: &TextureFormatFeatures, +) -> hal::TextureUses { + // Enforce having COPY_DST/DEPTH_STENCIL_WRITE/COLOR_TARGET otherwise we + // wouldn't be able to initialize the texture. + map_texture_usage(desc.usage, desc.format.into()) + | if desc.format.is_depth_stencil_format() { + hal::TextureUses::DEPTH_STENCIL_WRITE + } else if desc.usage.contains(wgt::TextureUsages::COPY_DST) { + hal::TextureUses::COPY_DST // (set already) + } else { + // Use COPY_DST only if we can't use COLOR_TARGET + if format_features + .allowed_usages + .contains(wgt::TextureUsages::RENDER_ATTACHMENT) + && desc.dimension == wgt::TextureDimension::D2 + // Render targets dimension must be 2d + { + hal::TextureUses::COLOR_TARGET + } else { + hal::TextureUses::COPY_DST + } + } +} + pub fn map_texture_usage_from_hal(uses: hal::TextureUses) -> wgt::TextureUsages { let mut u = wgt::TextureUsages::empty(); u.set( diff --git a/wgpu-core/src/device/global.rs b/wgpu-core/src/device/global.rs index 5720dad6f0..ac4ee4ddae 100644 --- a/wgpu-core/src/device/global.rs +++ b/wgpu-core/src/device/global.rs @@ -11,7 +11,7 @@ use crate::{ instance::{self, Adapter, Surface}, pipeline, present, resource::{self, Buffer, BufferAccessResult, BufferMapState, Resource}, - resource::{BufferAccessError, BufferMapOperation, TextureClearMode}, + resource::{BufferAccessError, BufferMapOperation}, validation::check_buffer_usage, FastHashMap, Label, LabelHelpers as _, }; @@ -555,16 +555,12 @@ impl Global { device_id: DeviceId, desc: &resource::TextureDescriptor, id_in: Input, - idtv_in: Option>, ) -> (id::TextureId, Option) { profiling::scope!("Device::create_texture"); let hub = A::hub(self); let fid = hub.textures.prepare(id_in); - let mut fid_tv = idtv_in - .as_ref() - .map(|id| hub.texture_views.prepare(id.clone())); let error = loop { let device = match hub.devices.get(device_id) { @@ -573,11 +569,7 @@ impl Global { }; #[cfg(feature = "trace")] if let Some(ref mut trace) = *device.trace.lock() { - trace.add(trace::Action::CreateTexture( - fid.id(), - fid_tv.as_ref().map(|id| id.id()), - desc.clone(), - )); + trace.add(trace::Action::CreateTexture(fid.id(), desc.clone())); } let texture = match device.create_texture(device_id, &device.adapter, desc) { @@ -587,54 +579,44 @@ impl Global { let (id, resource) = fid.assign(texture); log::info!("Created Texture {:?} with {:?}", id, desc); - if let TextureClearMode::RenderPass { - ref mut clear_views, - is_color: _, - } = *resource.clear_mode.write() + let format_features = device + .describe_format_features(&device.adapter, desc.format) + .unwrap(); + let hal_usage = conv::map_texture_usage_for_texture(desc, &format_features); + if hal_usage + .intersects(hal::TextureUses::DEPTH_STENCIL_WRITE | hal::TextureUses::COLOR_TARGET) { - if idtv_in.is_some() { - let dimension = match desc.dimension { - wgt::TextureDimension::D1 => wgt::TextureViewDimension::D1, - wgt::TextureDimension::D2 => wgt::TextureViewDimension::D2, - wgt::TextureDimension::D3 => unreachable!(), - }; + let is_color = !desc.format.is_depth_stencil_format(); + let dimension = match desc.dimension { + wgt::TextureDimension::D1 => wgt::TextureViewDimension::D1, + wgt::TextureDimension::D2 => wgt::TextureViewDimension::D2, + wgt::TextureDimension::D3 => unreachable!(), + }; - for mip_level in 0..desc.mip_level_count { - for array_layer in 0..desc.size.depth_or_array_layers { - let descriptor = resource::TextureViewDescriptor { - label: Some(Cow::Borrowed("(wgpu internal) clear texture view")), - format: Some(desc.format), - dimension: Some(dimension), - range: wgt::ImageSubresourceRange { - aspect: wgt::TextureAspect::All, - base_mip_level: mip_level, - mip_level_count: Some(1), - base_array_layer: array_layer, - array_layer_count: Some(1), - }, - }; - - let texture_view = device - .create_texture_view(&resource, id.0, &descriptor) - .unwrap(); - let fid_tv = if fid_tv.is_some() { - fid_tv.take().unwrap() - } else { - hub.texture_views.prepare(idtv_in.clone().unwrap()) - }; - let (tv_id, texture_view) = fid_tv.assign(texture_view); - log::info!("Created TextureView {:?} for texture {:?}", tv_id, id); - - clear_views.push(texture_view.clone()); - - device - .trackers - .lock() - .views - .insert_single(tv_id, texture_view); - } + let mut clear_views = SmallVec::new(); + for mip_level in 0..desc.mip_level_count { + for array_layer in 0..desc.size.depth_or_array_layers { + let desc = resource::TextureViewDescriptor { + label: Some(Cow::Borrowed("(wgpu internal) clear texture view")), + format: Some(desc.format), + dimension: Some(dimension), + range: wgt::ImageSubresourceRange { + aspect: wgt::TextureAspect::All, + base_mip_level: mip_level, + mip_level_count: Some(1), + base_array_layer: array_layer, + array_layer_count: Some(1), + }, + }; + let view = device.create_texture_view(&resource, id.0, &desc).unwrap(); + clear_views.push(Arc::new(view)); } } + let mut clear_mode = resource.clear_mode.write(); + *clear_mode = resource::TextureClearMode::RenderPass { + clear_views, + is_color, + }; } device.trackers.lock().textures.insert_single( @@ -678,7 +660,7 @@ impl Global { // recorded in the replay #[cfg(feature = "trace")] if let Some(ref mut trace) = *device.trace.lock() { - trace.add(trace::Action::CreateTexture(fid.id(), None, desc.clone())); + trace.add(trace::Action::CreateTexture(fid.id(), desc.clone())); } let format_features = match device @@ -802,6 +784,7 @@ impl Global { ) { resource::TextureClearMode::BufferCopy => SmallVec::new(), resource::TextureClearMode::RenderPass { clear_views, .. } => clear_views, + resource::TextureClearMode::Surface { .. } => SmallVec::new(), resource::TextureClearMode::None => SmallVec::new(), }; diff --git a/wgpu-core/src/device/resource.rs b/wgpu-core/src/device/resource.rs index d6c56a780d..915caae5ed 100644 --- a/wgpu-core/src/device/resource.rs +++ b/wgpu-core/src/device/resource.rs @@ -32,7 +32,7 @@ use crate::{ use arrayvec::ArrayVec; use hal::{CommandEncoder as _, Device as _}; use parking_lot::{Mutex, MutexGuard, RwLock}; -use smallvec::SmallVec; + use thiserror::Error; use wgt::{TextureFormat, TextureSampleType, TextureViewDimension}; @@ -700,26 +700,7 @@ impl Device { self.require_downlevel_flags(wgt::DownlevelFlags::VIEW_FORMATS)?; } - // Enforce having COPY_DST/DEPTH_STENCIL_WRITE/COLOR_TARGET otherwise we - // wouldn't be able to initialize the texture. - let hal_usage = conv::map_texture_usage(desc.usage, desc.format.into()) - | if desc.format.is_depth_stencil_format() { - hal::TextureUses::DEPTH_STENCIL_WRITE - } else if desc.usage.contains(wgt::TextureUsages::COPY_DST) { - hal::TextureUses::COPY_DST // (set already) - } else { - // Use COPY_DST only if we can't use COLOR_TARGET - if format_features - .allowed_usages - .contains(wgt::TextureUsages::RENDER_ATTACHMENT) - && desc.dimension == wgt::TextureDimension::D2 - // Render targets dimension must be 2d - { - hal::TextureUses::COLOR_TARGET - } else { - hal::TextureUses::COPY_DST - } - }; + let hal_usage = conv::map_texture_usage_for_texture(desc, &format_features); let hal_desc = hal::TextureDescriptor { label: desc.label.borrow_option(), @@ -741,19 +722,7 @@ impl Device { .map_err(DeviceError::from)? }; - let clear_mode = if hal_usage - .intersects(hal::TextureUses::DEPTH_STENCIL_WRITE | hal::TextureUses::COLOR_TARGET) - { - let is_color = !desc.format.is_depth_stencil_format(); - let clear_views = SmallVec::new(); - resource::TextureClearMode::RenderPass { - clear_views, - is_color, - } - } else { - resource::TextureClearMode::BufferCopy - }; - + let clear_mode = resource::TextureClearMode::BufferCopy; let mut texture = self.create_texture_from_hal( raw_texture, hal_usage, diff --git a/wgpu-core/src/device/trace.rs b/wgpu-core/src/device/trace.rs index a7c92f302b..57f82c181e 100644 --- a/wgpu-core/src/device/trace.rs +++ b/wgpu-core/src/device/trace.rs @@ -47,11 +47,7 @@ pub enum Action<'a> { CreateBuffer(id::BufferId, crate::resource::BufferDescriptor<'a>), FreeBuffer(id::BufferId), DestroyBuffer(id::BufferId), - CreateTexture( - id::TextureId, - Option, - crate::resource::TextureDescriptor<'a>, - ), + CreateTexture(id::TextureId, crate::resource::TextureDescriptor<'a>), FreeTexture(id::TextureId), DestroyTexture(id::TextureId), CreateTextureView { diff --git a/wgpu-core/src/present.rs b/wgpu-core/src/present.rs index 008b23d38b..1d367a8f03 100644 --- a/wgpu-core/src/present.rs +++ b/wgpu-core/src/present.rs @@ -10,7 +10,7 @@ extract it from the hub. !*/ use std::{ - borrow::{Borrow, Cow}, + borrow::Borrow, sync::{ atomic::{AtomicBool, Ordering}, Arc, @@ -176,25 +176,21 @@ impl Global { flags: wgt::TextureFormatFeatureFlags::MULTISAMPLE_X4 | wgt::TextureFormatFeatureFlags::MULTISAMPLE_RESOLVE, }; - let mut clear_views = smallvec::SmallVec::new(); - - let descriptor = resource::TextureViewDescriptor { - label: Some(Cow::Borrowed("(wgpu internal) clear surface texture view")), - format: Some(config.format), - dimension: Some(wgt::TextureViewDimension::D2), + let clear_view_desc = hal::TextureViewDescriptor { + label: Some("(wgpu internal) clear surface texture view"), + format: config.format, + dimension: wgt::TextureViewDimension::D2, + usage: hal::TextureUses::COLOR_TARGET, range: wgt::ImageSubresourceRange::default(), }; - let view = device - .create_texture_inner_view( + let clear_view = unsafe { + hal::Device::create_texture_view( + device.raw(), ast.texture.borrow(), - fid.id(), - &texture_desc, - &hal::TextureUses::COLOR_TARGET, - &format_features, - &descriptor, + &clear_view_desc, ) - .unwrap(); - clear_views.push(Arc::new(view)); + } + .map_err(DeviceError::from)?; let mut presentation = surface.presentation.lock(); let present = presentation.as_mut().unwrap(); @@ -214,9 +210,8 @@ impl Global { mips: 0..1, }, info: ResourceInfo::new(""), - clear_mode: RwLock::new(resource::TextureClearMode::RenderPass { - clear_views, - is_color: true, + clear_mode: RwLock::new(resource::TextureClearMode::Surface { + clear_view: Some(clear_view), }), }; @@ -310,12 +305,12 @@ impl Global { if let Ok(mut texture) = Arc::try_unwrap(texture) { let mut clear_mode = texture.clear_mode.write(); let clear_mode = &mut *clear_mode; - if let resource::TextureClearMode::RenderPass { - ref mut clear_views, - .. + if let resource::TextureClearMode::Surface { + ref mut clear_view, .. } = *clear_mode { - clear_views.clear(); + let view = clear_view.take().unwrap(); + drop(view); } let suf = A::get_surface(&surface); diff --git a/wgpu-core/src/registry.rs b/wgpu-core/src/registry.rs index 869975f9cc..7d56c4aca0 100644 --- a/wgpu-core/src/registry.rs +++ b/wgpu-core/src/registry.rs @@ -49,6 +49,7 @@ pub(crate) struct FutureId<'a, I: id::TypedId, T: Resource> { } impl> FutureId<'_, I, T> { + #[allow(dead_code)] pub fn id(&self) -> I { self.id } diff --git a/wgpu-core/src/resource.rs b/wgpu-core/src/resource.rs index a2b2f33438..28c2c38ba3 100644 --- a/wgpu-core/src/resource.rs +++ b/wgpu-core/src/resource.rs @@ -537,6 +537,9 @@ pub enum TextureClearMode { clear_views: SmallVec<[Arc>; 1]>, is_color: bool, }, + Surface { + clear_view: Option, + }, // Texture can't be cleared, attempting to do so will cause panic. // (either because it is impossible for the type of texture or it is being destroyed) None, @@ -561,13 +564,21 @@ impl Drop for Texture { use hal::Device; let mut clear_mode = self.clear_mode.write(); let clear_mode = &mut *clear_mode; - if let TextureClearMode::RenderPass { - ref mut clear_views, - .. - } = *clear_mode - { - clear_views.clear(); - } + match *clear_mode { + TextureClearMode::Surface { + ref mut clear_view, .. + } => { + let view = clear_view.take(); + drop(view); + } + TextureClearMode::RenderPass { + ref mut clear_views, + .. + } => { + clear_views.clear(); + } + _ => {} + }; if self.inner.is_none() { return; } @@ -594,6 +605,7 @@ impl Texture { TextureClearMode::None => { panic!("Given texture can't be cleared") } + TextureClearMode::Surface { ref clear_view, .. } => clear_view.as_ref().unwrap(), TextureClearMode::RenderPass { ref clear_views, .. } => { @@ -724,6 +736,8 @@ pub enum TextureDimensionError { pub enum CreateTextureError { #[error(transparent)] Device(#[from] DeviceError), + #[error(transparent)] + CreateTextureView(#[from] CreateTextureViewError), #[error("Invalid usage flags {0:?}")] InvalidUsage(wgt::TextureUsages), #[error(transparent)] diff --git a/wgpu-hal/src/dx11/mod.rs b/wgpu-hal/src/dx11/mod.rs index 25eb62aab0..5d920d50d5 100644 --- a/wgpu-hal/src/dx11/mod.rs +++ b/wgpu-hal/src/dx11/mod.rs @@ -9,7 +9,7 @@ mod device; mod instance; mod library; -#[derive(Clone)] +#[derive(Clone, Debug)] pub struct Api; impl crate::Api for Api { diff --git a/wgpu-hal/src/dx12/mod.rs b/wgpu-hal/src/dx12/mod.rs index b86d0d9d8f..d0638a749e 100644 --- a/wgpu-hal/src/dx12/mod.rs +++ b/wgpu-hal/src/dx12/mod.rs @@ -55,7 +55,7 @@ use winapi::{ Interface as _, }; -#[derive(Clone)] +#[derive(Clone, Debug)] pub struct Api; impl crate::Api for Api { diff --git a/wgpu-hal/src/empty.rs b/wgpu-hal/src/empty.rs index 7b58e5a9d4..beca5e80c0 100644 --- a/wgpu-hal/src/empty.rs +++ b/wgpu-hal/src/empty.rs @@ -2,7 +2,7 @@ use std::ops::Range; -#[derive(Clone)] +#[derive(Clone, Debug)] pub struct Api; pub struct Context; #[derive(Debug)] diff --git a/wgpu-hal/src/gles/mod.rs b/wgpu-hal/src/gles/mod.rs index 03397a1010..a84f97d662 100644 --- a/wgpu-hal/src/gles/mod.rs +++ b/wgpu-hal/src/gles/mod.rs @@ -91,7 +91,7 @@ use parking_lot::Mutex; use std::sync::atomic::{AtomicU32, AtomicU8}; use std::{fmt, ops::Range, sync::Arc}; -#[derive(Clone)] +#[derive(Clone, Debug)] pub struct Api; //Note: we can support more samplers if not every one of them is used at a time, diff --git a/wgpu-hal/src/lib.rs b/wgpu-hal/src/lib.rs index 6c3fd2a200..12709707cb 100644 --- a/wgpu-hal/src/lib.rs +++ b/wgpu-hal/src/lib.rs @@ -154,7 +154,7 @@ pub enum SurfaceError { #[error("Not supported")] pub struct InstanceError; -pub trait Api: Clone + Sized { +pub trait Api: Clone + Sized + std::fmt::Debug { type Instance: Instance; type Surface: Surface; type Adapter: Adapter; diff --git a/wgpu-hal/src/metal/mod.rs b/wgpu-hal/src/metal/mod.rs index 8fb2df3f48..c4d9827b8e 100644 --- a/wgpu-hal/src/metal/mod.rs +++ b/wgpu-hal/src/metal/mod.rs @@ -36,7 +36,7 @@ use arrayvec::ArrayVec; use metal::foreign_types::ForeignTypeRef as _; use parking_lot::{Mutex, RwLock}; -#[derive(Clone)] +#[derive(Clone, Debug)] pub struct Api; type ResourceIndex = u32; diff --git a/wgpu-hal/src/vulkan/mod.rs b/wgpu-hal/src/vulkan/mod.rs index 21d1824a81..88473e88f0 100644 --- a/wgpu-hal/src/vulkan/mod.rs +++ b/wgpu-hal/src/vulkan/mod.rs @@ -52,7 +52,7 @@ use parking_lot::{Mutex, RwLock}; const MILLIS_TO_NANOS: u64 = 1_000_000; const MAX_TOTAL_ATTACHMENTS: usize = crate::MAX_COLOR_ATTACHMENTS * 2 + 1; -#[derive(Clone)] +#[derive(Clone, Debug)] pub struct Api; impl crate::Api for Api { diff --git a/wgpu/src/backend/direct.rs b/wgpu/src/backend/direct.rs index 421df31f31..bc859659fa 100644 --- a/wgpu/src/backend/direct.rs +++ b/wgpu/src/backend/direct.rs @@ -1310,7 +1310,7 @@ impl crate::Context for Context { let (id, error) = wgc::gfx_select!(device => global.device_create_texture( *device, &wgt_desc, - (), Some(()) + () )); if let Some(cause) = error { self.handle_error( From b763431bec28c8dcdbe4fc2e9f7056e8b7470a4c Mon Sep 17 00:00:00 2001 From: gents83 Date: Tue, 1 Aug 2023 21:43:36 +0200 Subject: [PATCH 069/132] Fix memleaks due to duplicated items + drain usage --- wgpu-core/src/device/global.rs | 35 +- wgpu-core/src/device/life.rs | 631 +++++++++++++++++++++---------- wgpu-core/src/device/queue.rs | 49 ++- wgpu-core/src/device/resource.rs | 32 +- 4 files changed, 497 insertions(+), 250 deletions(-) diff --git a/wgpu-core/src/device/global.rs b/wgpu-core/src/device/global.rs index ac4ee4ddae..eb5688afe4 100644 --- a/wgpu-core/src/device/global.rs +++ b/wgpu-core/src/device/global.rs @@ -538,7 +538,7 @@ impl Global { .lock_life() .suspected_resources .buffers - .push(buffer.clone()); + .insert(buffer_id, buffer.clone()); } } @@ -847,7 +847,10 @@ impl Global { { life_lock.future_suspected_textures.push(texture.clone()); } else { - life_lock.suspected_resources.textures.push(texture.clone()); + life_lock + .suspected_resources + .textures + .insert(texture_id, texture.clone()); } } @@ -933,7 +936,7 @@ impl Global { .lock_life() .suspected_resources .texture_views - .push(view.clone()); + .insert(texture_view_id, view.clone()); if wait { match view.device.wait_for_submit(last_submit_index) { @@ -1012,7 +1015,7 @@ impl Global { .lock_life() .suspected_resources .samplers - .push(sampler.clone()); + .insert(sampler_id, sampler.clone()); } pub fn device_create_bind_group_layout( @@ -1111,7 +1114,7 @@ impl Global { .lock_life() .suspected_resources .bind_group_layouts - .push(layout.clone()); + .insert(bind_group_layout_id, layout.clone()); } pub fn device_create_pipeline_layout( @@ -1185,7 +1188,7 @@ impl Global { .lock_life() .suspected_resources .pipeline_layouts - .push(layout.clone()); + .insert(pipeline_layout_id, layout.clone()); } pub fn device_create_bind_group( @@ -1259,7 +1262,7 @@ impl Global { .lock_life() .suspected_resources .bind_groups - .push(bind_group.clone()); + .insert(bind_group_id, bind_group.clone()); } pub fn device_create_shader_module( @@ -1542,7 +1545,7 @@ impl Global { .lock_life() .suspected_resources .render_bundles - .push(bundle.clone()); + .insert(render_bundle_id, bundle.clone()); } pub fn device_create_query_set( @@ -1612,7 +1615,7 @@ impl Global { .lock_life() .suspected_resources .query_sets - .push(query_set.clone()); + .insert(query_set_id, query_set.clone()); } pub fn query_set_label(&self, id: id::QuerySetId) -> String { @@ -1737,9 +1740,12 @@ impl Global { life_lock .suspected_resources .render_pipelines - .push(pipeline.clone()); + .insert(render_pipeline_id, pipeline.clone()); let layout = hub.pipeline_layouts.get(layout_id.0).unwrap(); - life_lock.suspected_resources.pipeline_layouts.push(layout); + life_lock + .suspected_resources + .pipeline_layouts + .insert(layout_id.0, layout); } pub fn device_create_compute_pipeline( @@ -1860,9 +1866,12 @@ impl Global { life_lock .suspected_resources .compute_pipelines - .push(pipeline.clone()); + .insert(compute_pipeline_id, pipeline.clone()); let layout = hub.pipeline_layouts.get(layout_id.0).unwrap(); - life_lock.suspected_resources.pipeline_layouts.push(layout); + life_lock + .suspected_resources + .pipeline_layouts + .insert(layout_id.0, layout); } pub fn surface_configure( diff --git a/wgpu-core/src/device/life.rs b/wgpu-core/src/device/life.rs index 085fb27ec4..4ae9209af9 100644 --- a/wgpu-core/src/device/life.rs +++ b/wgpu-core/src/device/life.rs @@ -9,11 +9,11 @@ use crate::{ }, hal_api::HalApi, hub::Hub, - id, + id::{self}, identity::GlobalIdentityHandlerFactory, pipeline::{ComputePipeline, RenderPipeline}, resource::{self, Buffer, QuerySet, Resource, Sampler, Texture, TextureView}, - track::{BindGroupStates, RenderBundleScope, Tracker}, + track::Tracker, SubmissionIndex, }; use smallvec::SmallVec; @@ -21,37 +21,37 @@ use smallvec::SmallVec; use parking_lot::Mutex; use thiserror::Error; -use std::{mem, sync::Arc}; +use std::{collections::HashMap, sync::Arc}; /// A struct that keeps lists of resources that are no longer needed by the user. pub(crate) struct SuspectedResources { - pub(crate) buffers: Vec>>, - pub(crate) textures: Vec>>, - pub(crate) texture_views: Vec>>, - pub(crate) samplers: Vec>>, - pub(crate) bind_groups: Vec>>, - pub(crate) compute_pipelines: Vec>>, - pub(crate) render_pipelines: Vec>>, - pub(crate) bind_group_layouts: Vec>>, - pub(crate) pipeline_layouts: Vec>>, - pub(crate) render_bundles: Vec>>, - pub(crate) query_sets: Vec>>, + pub(crate) buffers: HashMap>>, + pub(crate) textures: HashMap>>, + pub(crate) texture_views: HashMap>>, + pub(crate) samplers: HashMap>>, + pub(crate) bind_groups: HashMap>>, + pub(crate) compute_pipelines: HashMap>>, + pub(crate) render_pipelines: HashMap>>, + pub(crate) bind_group_layouts: HashMap>>, + pub(crate) pipeline_layouts: HashMap>>, + pub(crate) render_bundles: HashMap>>, + pub(crate) query_sets: HashMap>>, } impl SuspectedResources { pub(crate) fn new() -> Self { Self { - buffers: Vec::new(), - textures: Vec::new(), - texture_views: Vec::new(), - samplers: Vec::new(), - bind_groups: Vec::new(), - compute_pipelines: Vec::new(), - render_pipelines: Vec::new(), - bind_group_layouts: Vec::new(), - pipeline_layouts: Vec::new(), - render_bundles: Vec::new(), - query_sets: Vec::new(), + buffers: HashMap::new(), + textures: HashMap::new(), + texture_views: HashMap::new(), + samplers: HashMap::new(), + bind_groups: HashMap::new(), + compute_pipelines: HashMap::new(), + render_pipelines: HashMap::new(), + bind_group_layouts: HashMap::new(), + pipeline_layouts: HashMap::new(), + render_bundles: HashMap::new(), + query_sets: HashMap::new(), } } pub(crate) fn clear(&mut self) { @@ -69,43 +69,39 @@ impl SuspectedResources { } pub(crate) fn extend(&mut self, other: &Self) { - self.buffers.extend_from_slice(&other.buffers); - self.textures.extend_from_slice(&other.textures); - self.texture_views.extend_from_slice(&other.texture_views); - self.samplers.extend_from_slice(&other.samplers); - self.bind_groups.extend_from_slice(&other.bind_groups); - self.compute_pipelines - .extend_from_slice(&other.compute_pipelines); - self.render_pipelines - .extend_from_slice(&other.render_pipelines); - self.bind_group_layouts - .extend_from_slice(&other.bind_group_layouts); - self.pipeline_layouts - .extend_from_slice(&other.pipeline_layouts); - self.render_bundles.extend_from_slice(&other.render_bundles); - self.query_sets.extend_from_slice(&other.query_sets); - } - - pub(crate) fn add_render_bundle_scope(&mut self, trackers: &RenderBundleScope) { - self.buffers - .extend(trackers.buffers.used_resources().cloned()); - self.textures - .extend(trackers.textures.used_resources().cloned()); - self.bind_groups - .extend(trackers.bind_groups.used_resources().cloned()); - self.render_pipelines - .extend(trackers.render_pipelines.used_resources().cloned()); - self.query_sets - .extend(trackers.query_sets.used_resources().cloned()); - } - - pub(crate) fn add_bind_group_states(&mut self, trackers: &BindGroupStates) { - self.buffers - .extend(trackers.buffers.used_resources().cloned()); - self.textures - .extend(trackers.textures.used_resources().cloned()); - self.texture_views.extend(trackers.views.used_resources()); - self.samplers.extend(trackers.samplers.used_resources()); + other.buffers.iter().for_each(|(id, v)| { + self.buffers.insert(*id, v.clone()); + }); + other.textures.iter().for_each(|(id, v)| { + self.textures.insert(*id, v.clone()); + }); + other.texture_views.iter().for_each(|(id, v)| { + self.texture_views.insert(*id, v.clone()); + }); + other.samplers.iter().for_each(|(id, v)| { + self.samplers.insert(*id, v.clone()); + }); + other.bind_groups.iter().for_each(|(id, v)| { + self.bind_groups.insert(*id, v.clone()); + }); + other.compute_pipelines.iter().for_each(|(id, v)| { + self.compute_pipelines.insert(*id, v.clone()); + }); + other.render_pipelines.iter().for_each(|(id, v)| { + self.render_pipelines.insert(*id, v.clone()); + }); + other.bind_group_layouts.iter().for_each(|(id, v)| { + self.bind_group_layouts.insert(*id, v.clone()); + }); + other.pipeline_layouts.iter().for_each(|(id, v)| { + self.pipeline_layouts.insert(*id, v.clone()); + }); + other.render_bundles.iter().for_each(|(id, v)| { + self.render_bundles.insert(*id, v.clone()); + }); + other.query_sets.iter().for_each(|(id, v)| { + self.query_sets.insert(*id, v.clone()); + }); } } @@ -354,12 +350,16 @@ impl LifetimeTracker { } pub fn post_submit(&mut self) { - self.suspected_resources - .buffers - .append(&mut self.future_suspected_buffers); - self.suspected_resources - .textures - .append(&mut self.future_suspected_textures); + for v in self.future_suspected_buffers.drain(..).take(1) { + self.suspected_resources + .buffers + .insert(v.as_info().id().0, v); + } + for v in self.future_suspected_textures.drain(..).take(1) { + self.suspected_resources + .textures + .insert(v.as_info().id().0, v); + } } pub(crate) fn map(&mut self, value: &Arc>) { @@ -460,55 +460,19 @@ impl LifetimeTracker { } impl LifetimeTracker { - /// Identify resources to free, according to `trackers` and `self.suspected_resources`. - /// - /// Given `trackers`, the [`Tracker`] belonging to same [`Device`] as - /// `self`, and `hub`, the [`Hub`] to which that `Device` belongs: - /// - /// Remove from `trackers` each resource mentioned in - /// [`self.suspected_resources`]. If `trackers` held the final reference to - /// that resource, add it to the appropriate free list, to be destroyed by - /// the hal: - /// - /// - Add resources used by queue submissions still in flight to the - /// [`last_resources`] table of the last such submission's entry in - /// [`self.active`]. When that submission has finished execution. the - /// [`triage_submissions`] method will move them to - /// [`self.free_resources`]. - /// - /// - Add resources that can be freed right now to [`self.free_resources`] - /// directly. [`LifetimeTracker::cleanup`] will take care of them as - /// part of this poll. - /// - /// ## Entrained resources - /// - /// This function finds resources that are used only by other resources - /// ready to be freed, and adds those to the free lists as well. For - /// example, if there's some texture `T` used only by some texture view - /// `TV`, then if `TV` can be freed, `T` gets added to the free lists too. - /// - /// Since `wgpu-core` resource ownership patterns are acyclic, we can visit - /// each type that can be owned after all types that could possibly own - /// it. This way, we can detect all free-able objects in a single pass, - /// simply by starting with types that are roots of the ownership DAG (like - /// render bundles) and working our way towards leaf types (like buffers). - /// - /// [`Device`]: super::Device - /// [`self.suspected_resources`]: LifetimeTracker::suspected_resources - /// [`last_resources`]: ActiveSubmission::last_resources - /// [`self.active`]: LifetimeTracker::active - /// [`triage_submissions`]: LifetimeTracker::triage_submissions - /// [`self.free_resources`]: LifetimeTracker::free_resources - pub(crate) fn triage_suspected( + fn triage_suspected_render_bundles( &mut self, hub: &Hub, trackers: &Mutex>, - #[cfg(feature = "trace")] mut trace: Option<&mut trace::Trace>, - ) { - profiling::scope!("triage_suspected"); - - if !self.suspected_resources.render_bundles.is_empty() { - while let Some(bundle) = self.suspected_resources.render_bundles.pop() { + mut f: F, + ) -> &mut Self + where + G: GlobalIdentityHandlerFactory, + F: FnMut(&id::RenderBundleId), + { + self.suspected_resources + .render_bundles + .retain(|bundle_id, bundle| { let id = bundle.info.id(); let is_removed = { let mut trackers = trackers.lock(); @@ -516,39 +480,90 @@ impl LifetimeTracker { }; if is_removed { log::info!("Bundle {:?} is removed from registry", id); - #[cfg(feature = "trace")] - if let Some(ref mut t) = trace { - t.add(trace::Action::DestroyRenderBundle(id.0)); - } + f(bundle_id); if let Some(res) = hub.render_bundles.unregister(id.0) { - self.suspected_resources.add_render_bundle_scope(&res.used); + for v in res.used.buffers.used_resources() { + self.suspected_resources + .buffers + .insert(v.as_info().id().0, v.clone()); + } + for v in res.used.textures.used_resources() { + self.suspected_resources + .textures + .insert(v.as_info().id().0, v.clone()); + } + for v in res.used.bind_groups.used_resources() { + self.suspected_resources + .bind_groups + .insert(v.as_info().id().0, v.clone()); + } + for v in res.used.render_pipelines.used_resources() { + self.suspected_resources + .render_pipelines + .insert(v.as_info().id().0, v.clone()); + } + for v in res.used.query_sets.used_resources() { + self.suspected_resources + .query_sets + .insert(v.as_info().id().0, v.clone()); + } } } - } - } + !is_removed + }); + self + } - if !self.suspected_resources.bind_groups.is_empty() { - while let Some(resource) = self.suspected_resources.bind_groups.pop() { - let id = resource.info.id(); + fn triage_suspected_bind_groups( + &mut self, + hub: &Hub, + trackers: &Mutex>, + mut f: F, + ) -> &mut Self + where + G: GlobalIdentityHandlerFactory, + F: FnMut(&id::BindGroupId), + { + self.suspected_resources + .bind_groups + .retain(|bind_group_id, bind_group| { + let id = bind_group.info.id(); let is_removed = { let mut trackers = trackers.lock(); trackers.bind_groups.remove_abandoned(id) }; if is_removed { log::info!("BindGroup {:?} is removed from registry", id); - #[cfg(feature = "trace")] - if let Some(ref mut t) = trace { - t.add(trace::Action::DestroyBindGroup(id.0)); - } + f(bind_group_id); if let Some(res) = hub.bind_groups.unregister(id.0) { - self.suspected_resources.add_bind_group_states(&res.used); + for v in res.used.buffers.used_resources() { + self.suspected_resources + .buffers + .insert(v.as_info().id().0, v.clone()); + } + for v in res.used.textures.used_resources() { + self.suspected_resources + .textures + .insert(v.as_info().id().0, v.clone()); + } + for v in res.used.views.used_resources() { + self.suspected_resources + .texture_views + .insert(v.as_info().id().0, v.clone()); + } + for v in res.used.samplers.used_resources() { + self.suspected_resources + .samplers + .insert(v.as_info().id().0, v.clone()); + } + let bind_group_layout = hub.bind_group_layouts.get(res.layout_id.0).unwrap(); self.suspected_resources .bind_group_layouts - .push(bind_group_layout); + .insert(res.layout_id.0, bind_group_layout); let submit_index = res.info.submission_index(); self.active @@ -559,29 +574,43 @@ impl LifetimeTracker { .push(res); } } - } - } + !is_removed + }); + self + } - if !self.suspected_resources.texture_views.is_empty() { - let mut list = mem::take(&mut self.suspected_resources.texture_views); - for texture_view in list.drain(..) { - let id = texture_view.info.id(); + fn triage_suspected_texture_views( + &mut self, + hub: &Hub, + trackers: &Mutex>, + mut f: F, + ) -> &mut Self + where + G: GlobalIdentityHandlerFactory, + F: FnMut(&id::TextureViewId), + { + self.suspected_resources + .texture_views + .retain(|view_id, view| { + let id = view.info.id(); let is_removed = { let mut trackers = trackers.lock(); trackers.views.remove_abandoned(id) }; + println!( + "TextureView {:?} examined with refcount {}", + id, + Arc::strong_count(view) + ); if is_removed { log::info!("TextureView {:?} is removed from registry", id); - #[cfg(feature = "trace")] - if let Some(ref mut t) = trace { - t.add(trace::Action::DestroyTextureView(id.0)); - } + f(view_id); if let Some(res) = hub.texture_views.unregister(id.0) { if let Some(parent_texture) = res.parent.as_ref() { self.suspected_resources .textures - .push(parent_texture.clone()); + .insert(parent_texture.as_info().id().0, parent_texture.clone()); } let submit_index = res.info.submission_index(); self.active @@ -592,12 +621,24 @@ impl LifetimeTracker { .push(res); } } - } - self.suspected_resources.texture_views = list; - } + !is_removed + }); + self + } - if !self.suspected_resources.textures.is_empty() { - for texture in self.suspected_resources.textures.drain(..) { + fn triage_suspected_textures( + &mut self, + hub: &Hub, + trackers: &Mutex>, + mut f: F, + ) -> &mut Self + where + G: GlobalIdentityHandlerFactory, + F: FnMut(&id::TextureId), + { + self.suspected_resources + .textures + .retain(|texture_id, texture| { let id = texture.info.id(); let is_removed = { let mut trackers = trackers.lock(); @@ -605,10 +646,7 @@ impl LifetimeTracker { }; if is_removed { log::info!("Texture {:?} is removed from registry", id); - #[cfg(feature = "trace")] - if let Some(ref mut t) = trace { - t.add(trace::Action::DestroyTexture(id.0)); - } + f(texture_id); if let Some(res) = hub.textures.unregister(id.0) { let submit_index = res.info.submission_index(); @@ -629,11 +667,24 @@ impl LifetimeTracker { non_referenced_resources.textures.push(res); } } - } - } + !is_removed + }); + self + } - if !self.suspected_resources.samplers.is_empty() { - for sampler in self.suspected_resources.samplers.drain(..) { + fn triage_suspected_samplers( + &mut self, + hub: &Hub, + trackers: &Mutex>, + mut f: F, + ) -> &mut Self + where + G: GlobalIdentityHandlerFactory, + F: FnMut(&id::SamplerId), + { + self.suspected_resources + .samplers + .retain(|sampler_id, sampler| { let id = sampler.info.id(); let is_removed = { let mut trackers = trackers.lock(); @@ -641,10 +692,7 @@ impl LifetimeTracker { }; if is_removed { log::info!("Sampler {:?} is removed from registry", id); - #[cfg(feature = "trace")] - if let Some(ref mut t) = trace { - t.add(trace::Action::DestroySampler(id.0)); - } + f(sampler_id); if let Some(res) = hub.samplers.unregister(id.0) { let submit_index = res.info.submission_index(); @@ -656,11 +704,24 @@ impl LifetimeTracker { .push(res); } } - } - } + !is_removed + }); + self + } - if !self.suspected_resources.buffers.is_empty() { - for buffer in self.suspected_resources.buffers.drain(..) { + fn triage_suspected_buffers( + &mut self, + hub: &Hub, + trackers: &Mutex>, + mut f: F, + ) -> &mut Self + where + G: GlobalIdentityHandlerFactory, + F: FnMut(&id::BufferId), + { + self.suspected_resources + .buffers + .retain(|buffer_id, buffer| { let id = buffer.info.id(); let is_removed = { let mut trackers = trackers.lock(); @@ -668,10 +729,7 @@ impl LifetimeTracker { }; if is_removed { log::info!("Buffer {:?} is removed from registry", id); - #[cfg(feature = "trace")] - if let Some(ref mut t) = trace { - t.add(trace::Action::DestroyBuffer(id.0)); - } + f(buffer_id); if let Some(res) = hub.buffers.unregister(id.0) { let submit_index = res.info.submission_index(); @@ -689,11 +747,23 @@ impl LifetimeTracker { .push(res); } } - } - } + !is_removed + }); + self + } - if !self.suspected_resources.compute_pipelines.is_empty() { - for compute_pipeline in self.suspected_resources.compute_pipelines.drain(..) { + fn triage_suspected_compute_pipelines( + &mut self, + hub: &Hub, + trackers: &Mutex>, + mut f: F, + ) -> &mut Self + where + G: GlobalIdentityHandlerFactory, + F: FnMut(&id::ComputePipelineId), + { + self.suspected_resources.compute_pipelines.retain( + |compute_pipeline_id, compute_pipeline| { let id = compute_pipeline.info.id(); let is_removed = { let mut trackers = trackers.lock(); @@ -701,10 +771,7 @@ impl LifetimeTracker { }; if is_removed { log::info!("ComputePipeline {:?} is removed from registry", id); - #[cfg(feature = "trace")] - if let Some(ref mut t) = trace { - t.add(trace::Action::DestroyComputePipeline(id.0)); - } + f(compute_pipeline_id); if let Some(res) = hub.compute_pipelines.unregister(id.0) { let submit_index = res.info.submission_index(); @@ -716,11 +783,25 @@ impl LifetimeTracker { .push(res); } } - } - } + !is_removed + }, + ); + self + } - if !self.suspected_resources.render_pipelines.is_empty() { - for render_pipeline in self.suspected_resources.render_pipelines.drain(..) { + fn triage_suspected_render_pipelines( + &mut self, + hub: &Hub, + trackers: &Mutex>, + mut f: F, + ) -> &mut Self + where + G: GlobalIdentityHandlerFactory, + F: FnMut(&id::RenderPipelineId), + { + self.suspected_resources + .render_pipelines + .retain(|render_pipeline_id, render_pipeline| { let id = render_pipeline.info.id(); let is_removed = { let mut trackers = trackers.lock(); @@ -728,10 +809,7 @@ impl LifetimeTracker { }; if is_removed { log::info!("RenderPipeline {:?} is removed from registry", id); - #[cfg(feature = "trace")] - if let Some(ref mut t) = trace { - t.add(trace::Action::DestroyRenderPipeline(id.0)); - } + f(render_pipeline_id); if let Some(res) = hub.render_pipelines.unregister(id.0) { let submit_index = res.info.submission_index(); @@ -743,21 +821,25 @@ impl LifetimeTracker { .push(res); } } - } - } - - if !self.suspected_resources.pipeline_layouts.is_empty() { - let mut pipeline_layouts_locked = hub.pipeline_layouts.write(); + !is_removed + }); + self + } - for pipeline_layout in self.suspected_resources.pipeline_layouts.drain(..) { + fn triage_suspected_pipeline_layouts(&mut self, hub: &Hub, mut f: F) -> &mut Self + where + G: GlobalIdentityHandlerFactory, + F: FnMut(&id::PipelineLayoutId), + { + let mut pipeline_layouts_locked = hub.pipeline_layouts.write(); + self.suspected_resources + .pipeline_layouts + .retain(|pipeline_layout_id, pipeline_layout| { let id = pipeline_layout.info.id(); //Note: this has to happen after all the suspected pipelines are destroyed if pipeline_layouts_locked.is_unique(id.0).unwrap() { log::debug!("PipelineLayout {:?} will be removed from registry", id); - #[cfg(feature = "trace")] - if let Some(ref mut t) = trace { - t.add(trace::Action::DestroyPipelineLayout(id.0)); - } + f(pipeline_layout_id); if let Some(lay) = hub .pipeline_layouts @@ -765,41 +847,61 @@ impl LifetimeTracker { { for bgl_id in &lay.bind_group_layout_ids { let bgl = hub.bind_group_layouts.get(bgl_id.0).unwrap(); - self.suspected_resources.bind_group_layouts.push(bgl); + self.suspected_resources + .bind_group_layouts + .insert(bgl_id.0, bgl); } self.free_resources.pipeline_layouts.push(lay); } + return false; } - } - } - - if !self.suspected_resources.bind_group_layouts.is_empty() { - let mut bind_group_layouts_locked = hub.bind_group_layouts.write(); + true + }); + self + } - for bgl in self.suspected_resources.bind_group_layouts.drain(..) { - let id = bgl.as_info().id(); + fn triage_suspected_bind_group_layouts(&mut self, hub: &Hub, mut f: F) -> &mut Self + where + G: GlobalIdentityHandlerFactory, + F: FnMut(&id::BindGroupLayoutId), + { + let mut bind_group_layouts_locked = hub.bind_group_layouts.write(); + self.suspected_resources.bind_group_layouts.retain( + |bind_group_layout_id, bind_group_layout| { + let id = bind_group_layout.as_info().id(); //Note: this has to happen after all the suspected pipelines are destroyed //Note: nothing else can bump the refcount since the guard is locked exclusively //Note: same BGL can appear multiple times in the list, but only the last // encounter could drop the refcount to 0. if bind_group_layouts_locked.is_unique(id.0).unwrap() { log::debug!("BindGroupLayout {:?} will be removed from registry", id); - #[cfg(feature = "trace")] - if let Some(ref mut t) = trace { - t.add(trace::Action::DestroyBindGroupLayout(id.0)); - } + f(bind_group_layout_id); + if let Some(lay) = hub .bind_group_layouts .unregister_locked(id.0, &mut *bind_group_layouts_locked) { self.free_resources.bind_group_layouts.push(lay); } + return false; } - } - } + true + }, + ); + self + } - if !self.suspected_resources.query_sets.is_empty() { - for query_set in self.suspected_resources.query_sets.drain(..) { + fn triage_suspected_query_sets( + &mut self, + hub: &Hub, + trackers: &Mutex>, + ) -> &mut Self + where + G: GlobalIdentityHandlerFactory, + { + self.suspected_resources + .query_sets + .retain(|_query_set_id, query_set| { let id = query_set.info.id(); let is_removed = { let mut trackers = trackers.lock(); @@ -819,8 +921,119 @@ impl LifetimeTracker { .push(res); } } + !is_removed + }); + self + } + + /// Identify resources to free, according to `trackers` and `self.suspected_resources`. + /// + /// Given `trackers`, the [`Tracker`] belonging to same [`Device`] as + /// `self`, and `hub`, the [`Hub`] to which that `Device` belongs: + /// + /// Remove from `trackers` each resource mentioned in + /// [`self.suspected_resources`]. If `trackers` held the final reference to + /// that resource, add it to the appropriate free list, to be destroyed by + /// the hal: + /// + /// - Add resources used by queue submissions still in flight to the + /// [`last_resources`] table of the last such submission's entry in + /// [`self.active`]. When that submission has finished execution. the + /// [`triage_submissions`] method will move them to + /// [`self.free_resources`]. + /// + /// - Add resources that can be freed right now to [`self.free_resources`] + /// directly. [`LifetimeTracker::cleanup`] will take care of them as + /// part of this poll. + /// + /// ## Entrained resources + /// + /// This function finds resources that are used only by other resources + /// ready to be freed, and adds those to the free lists as well. For + /// example, if there's some texture `T` used only by some texture view + /// `TV`, then if `TV` can be freed, `T` gets added to the free lists too. + /// + /// Since `wgpu-core` resource ownership patterns are acyclic, we can visit + /// each type that can be owned after all types that could possibly own + /// it. This way, we can detect all free-able objects in a single pass, + /// simply by starting with types that are roots of the ownership DAG (like + /// render bundles) and working our way towards leaf types (like buffers). + /// + /// [`Device`]: super::Device + /// [`self.suspected_resources`]: LifetimeTracker::suspected_resources + /// [`last_resources`]: ActiveSubmission::last_resources + /// [`self.active`]: LifetimeTracker::active + /// [`triage_submissions`]: LifetimeTracker::triage_submissions + /// [`self.free_resources`]: LifetimeTracker::free_resources + pub(crate) fn triage_suspected( + &mut self, + hub: &Hub, + trackers: &Mutex>, + #[cfg(feature = "trace")] mut trace: Option<&mut trace::Trace>, + ) { + profiling::scope!("triage_suspected"); + + self.triage_suspected_render_bundles(hub, trackers, |_id| { + #[cfg(feature = "trace")] + if let Some(ref mut t) = trace { + t.add(trace::Action::DestroyRenderBundle(*_id)); } - } + }); + self.triage_suspected_bind_groups(hub, trackers, |_id| { + #[cfg(feature = "trace")] + if let Some(ref mut t) = trace { + t.add(trace::Action::DestroyBindGroup(*_id)); + } + }); + self.triage_suspected_texture_views(hub, trackers, |_id| { + #[cfg(feature = "trace")] + if let Some(ref mut t) = trace { + t.add(trace::Action::DestroyTextureView(*_id)); + } + }); + self.triage_suspected_textures(hub, trackers, |_id| { + #[cfg(feature = "trace")] + if let Some(ref mut t) = trace { + t.add(trace::Action::DestroyTexture(*_id)); + } + }); + self.triage_suspected_samplers(hub, trackers, |_id| { + #[cfg(feature = "trace")] + if let Some(ref mut t) = trace { + t.add(trace::Action::DestroySampler(*_id)); + } + }); + self.triage_suspected_buffers(hub, trackers, |_id| { + #[cfg(feature = "trace")] + if let Some(ref mut t) = trace { + t.add(trace::Action::DestroyBuffer(*_id)); + } + }); + self.triage_suspected_compute_pipelines(hub, trackers, |_id| { + #[cfg(feature = "trace")] + if let Some(ref mut t) = trace { + t.add(trace::Action::DestroyComputePipeline(*_id)); + } + }); + self.triage_suspected_render_pipelines(hub, trackers, |_id| { + #[cfg(feature = "trace")] + if let Some(ref mut t) = trace { + t.add(trace::Action::DestroyRenderPipeline(*_id)); + } + }); + self.triage_suspected_pipeline_layouts(hub, |_id| { + #[cfg(feature = "trace")] + if let Some(ref mut t) = trace { + t.add(trace::Action::DestroyPipelineLayout(*_id)); + } + }); + self.triage_suspected_bind_group_layouts(hub, |_id| { + #[cfg(feature = "trace")] + if let Some(ref mut t) = trace { + t.add(trace::Action::DestroyBindGroupLayout(*_id)); + } + }); + self.triage_suspected_query_sets(hub, trackers); } /// Determine which buffers are ready to map, and which must wait for the diff --git a/wgpu-core/src/device/queue.rs b/wgpu-core/src/device/queue.rs index de2babf414..8d72f5114d 100644 --- a/wgpu-core/src/device/queue.rs +++ b/wgpu-core/src/device/queue.rs @@ -1174,7 +1174,11 @@ impl Global { unsafe { device.raw().unmap_buffer(raw_buf) } .map_err(DeviceError::from)?; } - device.temp_suspected.lock().buffers.push(buffer.clone()); + device + .temp_suspected + .lock() + .buffers + .insert(id.0, buffer.clone()); } else { match *buffer.map_state.lock() { BufferMapState::Idle => (), @@ -1196,7 +1200,11 @@ impl Global { }; texture.info.use_at(submit_index); if texture.is_unique() { - device.temp_suspected.lock().textures.push(texture.clone()); + device + .temp_suspected + .lock() + .textures + .insert(id.0, texture.clone()); } if should_extend { unsafe { @@ -1215,11 +1223,10 @@ impl Global { for texture_view in cmd_buf_trackers.views.used_resources() { texture_view.info.use_at(submit_index); if texture_view.is_unique() { - device - .temp_suspected - .lock() - .texture_views - .push(texture_view.clone()); + device.temp_suspected.lock().texture_views.insert( + texture_view.as_info().id().0, + texture_view.clone(), + ); } } { @@ -1238,7 +1245,11 @@ impl Global { sampler_guard[sub_id].info.use_at(submit_index); } if bg.is_unique() { - device.temp_suspected.lock().bind_groups.push(bg.clone()); + device + .temp_suspected + .lock() + .bind_groups + .insert(bg.as_info().id().0, bg.clone()); } } } @@ -1248,11 +1259,10 @@ impl Global { { compute_pipeline.info.use_at(submit_index); if compute_pipeline.is_unique() { - device - .temp_suspected - .lock() - .compute_pipelines - .push(compute_pipeline.clone()); + device.temp_suspected.lock().compute_pipelines.insert( + compute_pipeline.as_info().id().0, + compute_pipeline.clone(), + ); } } for render_pipeline in @@ -1260,11 +1270,10 @@ impl Global { { render_pipeline.info.use_at(submit_index); if render_pipeline.is_unique() { - device - .temp_suspected - .lock() - .render_pipelines - .push(render_pipeline.clone()); + device.temp_suspected.lock().render_pipelines.insert( + render_pipeline.as_info().id().0, + render_pipeline.clone(), + ); } } for query_set in cmd_buf_trackers.query_sets.used_resources() { @@ -1274,7 +1283,7 @@ impl Global { .temp_suspected .lock() .query_sets - .push(query_set.clone()); + .insert(query_set.as_info().id().0, query_set.clone()); } } for bundle in cmd_buf_trackers.bundles.used_resources() { @@ -1294,7 +1303,7 @@ impl Global { .temp_suspected .lock() .render_bundles - .push(bundle.clone()); + .insert(bundle.as_info().id().0, bundle.clone()); } } } diff --git a/wgpu-core/src/device/resource.rs b/wgpu-core/src/device/resource.rs index 26ded0d598..ffb4f033d5 100644 --- a/wgpu-core/src/device/resource.rs +++ b/wgpu-core/src/device/resource.rs @@ -350,42 +350,58 @@ impl Device { { for resource in trackers.buffers.used_resources() { if resource.is_unique() { - temp_suspected.buffers.push(resource.clone()); + temp_suspected + .buffers + .insert(resource.as_info().id().0, resource.clone()); } } for resource in trackers.textures.used_resources() { if resource.is_unique() { - temp_suspected.textures.push(resource.clone()); + temp_suspected + .textures + .insert(resource.as_info().id().0, resource.clone()); } } for resource in trackers.views.used_resources() { if resource.is_unique() { - temp_suspected.texture_views.push(resource.clone()); + temp_suspected + .texture_views + .insert(resource.as_info().id().0, resource.clone()); } } for resource in trackers.bind_groups.used_resources() { if resource.is_unique() { - temp_suspected.bind_groups.push(resource.clone()); + temp_suspected + .bind_groups + .insert(resource.as_info().id().0, resource.clone()); } } for resource in trackers.samplers.used_resources() { if resource.is_unique() { - temp_suspected.samplers.push(resource.clone()); + temp_suspected + .samplers + .insert(resource.as_info().id().0, resource.clone()); } } for resource in trackers.compute_pipelines.used_resources() { if resource.is_unique() { - temp_suspected.compute_pipelines.push(resource.clone()); + temp_suspected + .compute_pipelines + .insert(resource.as_info().id().0, resource.clone()); } } for resource in trackers.render_pipelines.used_resources() { if resource.is_unique() { - temp_suspected.render_pipelines.push(resource.clone()); + temp_suspected + .render_pipelines + .insert(resource.as_info().id().0, resource.clone()); } } for resource in trackers.query_sets.used_resources() { if resource.is_unique() { - temp_suspected.query_sets.push(resource.clone()); + temp_suspected + .query_sets + .insert(resource.as_info().id().0, resource.clone()); } } } From 7b28fe4e93b69d0aeff94290c97fe0d501f11c5a Mon Sep 17 00:00:00 2001 From: gents83 Date: Wed, 2 Aug 2023 09:53:31 +0200 Subject: [PATCH 070/132] Adding proper release of surface texture view --- wgpu-core/src/device/global.rs | 10 +++++++++- wgpu-core/src/device/life.rs | 5 ----- wgpu-core/src/present.rs | 5 ++++- wgpu-core/src/resource.rs | 11 +++++++---- 4 files changed, 20 insertions(+), 11 deletions(-) diff --git a/wgpu-core/src/device/global.rs b/wgpu-core/src/device/global.rs index eb5688afe4..a6fc5cb44c 100644 --- a/wgpu-core/src/device/global.rs +++ b/wgpu-core/src/device/global.rs @@ -784,7 +784,15 @@ impl Global { ) { resource::TextureClearMode::BufferCopy => SmallVec::new(), resource::TextureClearMode::RenderPass { clear_views, .. } => clear_views, - resource::TextureClearMode::Surface { .. } => SmallVec::new(), + resource::TextureClearMode::Surface { mut clear_view } => { + if let Some(view) = clear_view.take() { + unsafe { + use hal::Device; + device.raw().destroy_texture_view(view); + } + } + SmallVec::new() + } resource::TextureClearMode::None => SmallVec::new(), }; diff --git a/wgpu-core/src/device/life.rs b/wgpu-core/src/device/life.rs index 4ae9209af9..d56be6f23d 100644 --- a/wgpu-core/src/device/life.rs +++ b/wgpu-core/src/device/life.rs @@ -597,11 +597,6 @@ impl LifetimeTracker { let mut trackers = trackers.lock(); trackers.views.remove_abandoned(id) }; - println!( - "TextureView {:?} examined with refcount {}", - id, - Arc::strong_count(view) - ); if is_removed { log::info!("TextureView {:?} is removed from registry", id); f(view_id); diff --git a/wgpu-core/src/present.rs b/wgpu-core/src/present.rs index 1d367a8f03..751ac4aa52 100644 --- a/wgpu-core/src/present.rs +++ b/wgpu-core/src/present.rs @@ -310,7 +310,10 @@ impl Global { } = *clear_mode { let view = clear_view.take().unwrap(); - drop(view); + unsafe { + use hal::Device; + device.raw().destroy_texture_view(view); + } } let suf = A::get_surface(&surface); diff --git a/wgpu-core/src/resource.rs b/wgpu-core/src/resource.rs index 28c2c38ba3..197cd3bb5d 100644 --- a/wgpu-core/src/resource.rs +++ b/wgpu-core/src/resource.rs @@ -83,10 +83,10 @@ impl ResourceInfo { let mut label = String::new(); #[cfg(debug_assertions)] { - label = self.label.clone(); + label = format!("[{}] ", self.label); } if let Some(id) = self.id.as_ref() { - label = format!("{:?}", id); + label.push_str(format!("{:?}", id).as_str()); } label } @@ -568,8 +568,11 @@ impl Drop for Texture { TextureClearMode::Surface { ref mut clear_view, .. } => { - let view = clear_view.take(); - drop(view); + if let Some(view) = clear_view.take() { + unsafe { + self.device.raw().destroy_texture_view(view); + } + } } TextureClearMode::RenderPass { ref mut clear_views, From 481c9144e2d6c3fe0d267da9d6f2cd3d4485ef48 Mon Sep 17 00:00:00 2001 From: gents83 Date: Thu, 17 Aug 2023 10:58:37 +0200 Subject: [PATCH 071/132] Fix integration issues --- CHANGELOG.md | 4 +-- deno_webgpu/error.rs | 4 +-- wgpu-core/src/command/compute.rs | 7 ++--- wgpu-core/src/command/query.rs | 7 +++-- wgpu-core/src/command/render.rs | 51 ++++++++++++++++---------------- wgpu-core/src/device/life.rs | 2 +- 6 files changed, 37 insertions(+), 38 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 63cf2da5f6..4c3495c800 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -41,6 +41,8 @@ Bottom level categories: ## Unreleased ### Major changes +- Arcanization of wgpu core resources: Removed 'Token' and 'LifeTime' related management, +removed 'RefCount' and 'MultiRefCount' in favour of using only 'Arc' internal reference count, removing mut from resources and added instead internal members locks on demand or atomics operations, resources now implement Drop and destroy stuff when last 'Arc' resources is released, resources hold an 'Arc' in order to be able to implement Drop, resources have an utility to retrieve the id of the resource itself, removed all guards and just retrive the 'Arc' needed on-demand to unlock registry of resources asap removing locking from hot paths. By @gents83 in [#3626](https://github.com/gfx-rs/wgpu/pull/3626) and tnx also to @jimblandy #### Pass timestamp queries @@ -140,8 +142,6 @@ By @fornwall in [#3904](https://github.com/gfx-rs/wgpu/pull/3904) and [#3905](ht - Change `AdapterInfo::{device,vendor}` to be `u32` instead of `usize`. By @ameknite in [#3760](https://github.com/gfx-rs/wgpu/pull/3760) - Remove the `backend_bits` parameter in `initialize_adapter_from_env` and `initialize_adapter_from_env_or_default` - use [InstanceDescriptor::backends](https://docs.rs/wgpu/latest/wgpu/struct.InstanceDescriptor.html#structfield.backends) instead. By @fornwall in [#3904](https://github.com/gfx-rs/wgpu/pull/3904) -- Arcanization of wgpu core resources: Removed 'Token' and 'LifeTime' related management, -removed 'RefCount' and 'MultiRefCount' in favour of using only 'Arc' internal reference count, removing mut from resources and added instead internal members locks on demand or atomics operations, resources now implement Drop and destroy stuff when last 'Arc' resources is released, resources hold an 'Arc' in order to be able to implement Drop, resources have an utility to retrieve the id of the resource itself, removed all guards and just retrive the 'Arc' needed on-demand to unlock registry of resources asap removing locking from hot paths. By @gents83 in [#3626](https://github.com/gfx-rs/wgpu/pull/3626) and tnx also to @jimblandy #### DX12 diff --git a/deno_webgpu/error.rs b/deno_webgpu/error.rs index a68592adfc..6c509a80d3 100644 --- a/deno_webgpu/error.rs +++ b/deno_webgpu/error.rs @@ -104,9 +104,7 @@ impl From for WebGpuError { match err { DeviceError::Lost => WebGpuError::Lost, DeviceError::OutOfMemory => WebGpuError::OutOfMemory, - DeviceError::ResourceCreationFailed | DeviceError::Invalid => { - WebGpuError::Validation(fmt_err(&err)) - } + _ => WebGpuError::Validation(fmt_err(&err)), } } } diff --git a/wgpu-core/src/command/compute.rs b/wgpu-core/src/command/compute.rs index b9ac20750e..342e1da138 100644 --- a/wgpu-core/src/command/compute.rs +++ b/wgpu-core/src/command/compute.rs @@ -415,8 +415,7 @@ impl Global { let mut active_query = None; let timestamp_writes = if let Some(tw) = timestamp_writes { - let query_set: &resource::QuerySet = cmd_buf - .trackers + let query_set: &resource::QuerySet = tracker .query_sets .add_single(&*query_set_guard, tw.query_set) .ok_or(ComputePassErrorInner::InvalidQuerySet(tw.query_set)) @@ -437,12 +436,12 @@ impl Global { // But no point in erroring over that nuance here! if let Some(range) = range { unsafe { - raw.reset_queries(&query_set.raw, range); + raw.reset_queries(query_set.raw.as_ref().unwrap(), range); } } Some(hal::ComputePassTimestampWrites { - query_set: &query_set.raw, + query_set: query_set.raw.as_ref().unwrap(), beginning_of_pass_write_index: tw.beginning_of_pass_write_index, end_of_pass_write_index: tw.end_of_pass_write_index, }) diff --git a/wgpu-core/src/command/query.rs b/wgpu-core/src/command/query.rs index e7b5ad5ed6..d3254ab5bc 100644 --- a/wgpu-core/src/command/query.rs +++ b/wgpu-core/src/command/query.rs @@ -18,7 +18,7 @@ use thiserror::Error; use wgt::BufferAddress; #[derive(Debug)] -pub(super) struct QueryResetMap { +pub(crate) struct QueryResetMap { map: FastHashMap, Epoch)>, _phantom: PhantomData, } @@ -265,7 +265,8 @@ impl QuerySet { unsafe { // If we don't have a reset state tracker which can defer resets, we must reset now. if needs_reset { - raw_encoder.reset_queries(&self.raw, query_index..(query_index + 1)); + raw_encoder + .reset_queries(self.raw.as_ref().unwrap(), query_index..(query_index + 1)); } raw_encoder.begin_query(query_set, query_index); } @@ -317,7 +318,7 @@ pub(super) fn end_occlusion_query( // We can unwrap here as the validity was validated when the active query was set let query_set = storage.get(query_set_id).unwrap(); - unsafe { raw_encoder.end_query(&query_set.raw, query_index) }; + unsafe { raw_encoder.end_query(query_set.raw.as_ref().unwrap(), query_index) }; Ok(()) } else { diff --git a/wgpu-core/src/command/render.rs b/wgpu-core/src/command/render.rs index 0ba4208cba..cbaa868c85 100644 --- a/wgpu-core/src/command/render.rs +++ b/wgpu-core/src/command/render.rs @@ -46,6 +46,7 @@ use std::{borrow::Cow, fmt, iter, marker::PhantomData, mem, num::NonZeroU32, ops use super::{ memory_init::TextureSurfaceDiscard, CommandBufferTextureMemoryActions, CommandEncoder, + QueryResetMap, }; /// Operation to perform to the output attachment at the start of a renderpass. @@ -760,7 +761,10 @@ impl<'a, A: HalApi> RenderPassInfo<'a, A> { depth_stencil_attachment: Option<&RenderPassDepthStencilAttachment>, timestamp_writes: Option<&RenderPassTimestampWrites>, occlusion_query_set: Option, - cmd_buf: &mut CommandBufferMutable, + encoder: &mut CommandEncoder, + trackers: &mut Tracker, + texture_memory_actions: &mut CommandBufferTextureMemoryActions, + pending_query_resets: &mut QueryResetMap, view_guard: &'a Storage, id::TextureViewId>, buffer_guard: &'a Storage, id::BufferId>, texture_guard: &'a Storage, id::TextureId>, @@ -854,7 +858,7 @@ impl<'a, A: HalApi> RenderPassInfo<'a, A> { let mut depth_stencil = None; if let Some(at) = depth_stencil_attachment { - let view: &TextureView = tracker + let view: &TextureView = trackers .views .add_single(view_guard, at.view) .ok_or(RenderPassErrorInner::InvalidAttachment(at.view))?; @@ -991,7 +995,7 @@ impl<'a, A: HalApi> RenderPassInfo<'a, A> { colors.push(None); continue; }; - let color_view: &TextureView = tracker + let color_view: &TextureView = trackers .views .add_single(view_guard, at.view) .ok_or(RenderPassErrorInner::InvalidAttachment(at.view))?; @@ -1026,7 +1030,7 @@ impl<'a, A: HalApi> RenderPassInfo<'a, A> { let mut hal_resolve_target = None; if let Some(resolve_target) = at.resolve_target { - let resolve_view: &TextureView = tracker + let resolve_view: &TextureView = trackers .views .add_single(view_guard, resolve_target) .ok_or(RenderPassErrorInner::InvalidAttachment(resolve_target))?; @@ -1130,25 +1134,20 @@ impl<'a, A: HalApi> RenderPassInfo<'a, A> { }; let timestamp_writes = if let Some(tw) = timestamp_writes { - let query_set = cmd_buf - .trackers + let query_set = trackers .query_sets .add_single(query_set_guard, tw.query_set) .ok_or(RenderPassErrorInner::InvalidQuerySet(tw.query_set))?; if let Some(index) = tw.beginning_of_pass_write_index { - cmd_buf - .pending_query_resets - .use_query_set(tw.query_set, query_set, index); + pending_query_resets.use_query_set(tw.query_set, query_set, index); } if let Some(index) = tw.end_of_pass_write_index { - cmd_buf - .pending_query_resets - .use_query_set(tw.query_set, query_set, index); + pending_query_resets.use_query_set(tw.query_set, query_set, index); } Some(hal::RenderPassTimestampWrites { - query_set: &query_set.raw, + query_set: query_set.raw.as_ref().unwrap(), beginning_of_pass_write_index: tw.beginning_of_pass_write_index, end_of_pass_write_index: tw.end_of_pass_write_index, }) @@ -1157,13 +1156,12 @@ impl<'a, A: HalApi> RenderPassInfo<'a, A> { }; let occlusion_query_set = if let Some(occlusion_query_set) = occlusion_query_set { - let query_set = cmd_buf - .trackers + let query_set = trackers .query_sets .add_single(query_set_guard, occlusion_query_set) .ok_or(RenderPassErrorInner::InvalidQuerySet(occlusion_query_set))?; - Some(&query_set.raw) + Some(query_set.raw.as_ref().unwrap()) } else { None }; @@ -1309,7 +1307,7 @@ impl Global { let hub = A::hub(self); - let (scope, query_reset_state, pending_discard_init_fixups) = { + let (scope, pending_discard_init_fixups) = { let cmd_buf = CommandBuffer::get_encoder(hub, encoder_id).map_pass_err(init_scope)?; let mut cmd_buf_data = cmd_buf.data.lock(); let cmd_buf_data = cmd_buf_data.as_mut().unwrap(); @@ -1330,6 +1328,7 @@ impl Global { let tracker = &mut cmd_buf_data.trackers; let buffer_memory_init_actions = &mut cmd_buf_data.buffer_memory_init_actions; let texture_memory_actions = &mut cmd_buf_data.texture_memory_actions; + let pending_query_resets = &mut cmd_buf_data.pending_query_resets; // We automatically keep extending command buffers over time, and because // we want to insert a command buffer _before_ what we're about to record, @@ -1361,7 +1360,10 @@ impl Global { depth_stencil_attachment, timestamp_writes, occlusion_query_set_id, - &mut cmd_buf_data, + encoder, + tracker, + texture_memory_actions, + pending_query_resets, &*view_guard, &*buffer_guard, &*texture_guard, @@ -2124,7 +2126,7 @@ impl Global { raw, query_set_id, query_index, - Some(&mut cmd_buf.pending_query_resets), + Some(&mut cmd_buf_data.pending_query_resets), ) .map_pass_err(scope)?; } @@ -2135,8 +2137,7 @@ impl Global { .ok_or(RenderPassErrorInner::MissingOcclusionQuerySet) .map_pass_err(scope)?; - let query_set = cmd_buf - .trackers + let query_set = tracker .query_sets .add_single(&*query_set_guard, query_set_id) .ok_or(RenderCommandError::InvalidQuerySet(query_set_id)) @@ -2147,7 +2148,7 @@ impl Global { raw, query_set_id, query_index, - Some(&mut cmd_buf.pending_query_resets), + Some(&mut cmd_buf_data.pending_query_resets), &mut active_query, ) .map_pass_err(scope)?; @@ -2175,7 +2176,7 @@ impl Global { raw, query_set_id, query_index, - Some(&mut cmd_buf.pending_query_resets), + Some(&mut cmd_buf_data.pending_query_resets), &mut active_query, ) .map_pass_err(scope)?; @@ -2270,7 +2271,7 @@ impl Global { info.finish(raw, &*texture_guard).map_pass_err(init_scope)?; encoder.close(); - (trackers, query_reset_state, pending_discard_init_fixups) + (trackers, pending_discard_init_fixups) }; let query_set_guard = hub.query_sets.read(); @@ -2296,7 +2297,7 @@ impl Global { &cmd_buf.device, ); - cmd_buf + cmd_buf_data .pending_query_resets .reset_queries( transit, diff --git a/wgpu-core/src/device/life.rs b/wgpu-core/src/device/life.rs index 290f0d737b..6714aa7769 100644 --- a/wgpu-core/src/device/life.rs +++ b/wgpu-core/src/device/life.rs @@ -305,7 +305,7 @@ pub(crate) struct LifetimeTracker { /// Buffers the user has asked us to map, and which are not used by any /// queue submission still in flight. ready_to_map: Vec>>, - + /// Queue "on_submitted_work_done" closures that were initiated for while there is no /// currently pending submissions. These cannot be immeidately invoked as they /// must happen _after_ all mapped buffer callbacks are mapped, so we defer them From 1312b26f7de0b4091a1cb8b9853fbe5b5fd9d65b Mon Sep 17 00:00:00 2001 From: gents83 Date: Thu, 17 Aug 2023 12:01:27 +0200 Subject: [PATCH 072/132] Using always is_unique() --- wgpu-core/src/storage.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/wgpu-core/src/storage.rs b/wgpu-core/src/storage.rs index c978120dc9..6bacaef287 100644 --- a/wgpu-core/src/storage.rs +++ b/wgpu-core/src/storage.rs @@ -122,7 +122,7 @@ where pub(crate) fn is_unique(&self, id: I) -> Result { let (index, epoch, _) = id.unzip(); let (result, storage_epoch) = match self.map.get(index as usize) { - Some(&Element::Occupied(ref v, epoch)) => (Ok(Arc::strong_count(v) == 1), epoch), + Some(&Element::Occupied(ref v, epoch)) => (Ok(v.is_unique()), epoch), Some(&Element::Vacant) => panic!("{}[{}] does not exist", self.kind, index), Some(&Element::Error(epoch, ..)) => (Err(InvalidId), epoch), None => return Err(InvalidId), From 6690fe47ae26a64a255f32fd62719af2c03d5e27 Mon Sep 17 00:00:00 2001 From: gents83 Date: Fri, 18 Aug 2023 17:42:36 +0200 Subject: [PATCH 073/132] Queue is now keeping device alive till needed --- deno_webgpu/lib.rs | 3 +- player/src/bin/play.rs | 3 +- player/tests/test.rs | 17 ++-- tests/tests/device.rs | 32 +++++++ wgpu-core/src/device/global.rs | 71 +++++++--------- wgpu-core/src/device/mod.rs | 2 + wgpu-core/src/device/queue.rs | 138 ++++++++++++++++++++----------- wgpu-core/src/device/resource.rs | 31 ++++--- wgpu-core/src/global.rs | 2 +- wgpu-core/src/hub.rs | 9 +- wgpu-core/src/instance.rs | 107 +++++++++++++++--------- wgpu-core/src/present.rs | 8 +- wgpu-core/src/registry.rs | 5 +- wgpu-core/src/storage.rs | 9 +- wgpu/src/backend/direct.rs | 15 +++- wgpu/src/backend/web.rs | 4 + wgpu/src/context.rs | 8 ++ wgpu/src/lib.rs | 8 ++ 18 files changed, 304 insertions(+), 168 deletions(-) diff --git a/deno_webgpu/lib.rs b/deno_webgpu/lib.rs index 92a6a51334..fff502ebe2 100644 --- a/deno_webgpu/lib.rs +++ b/deno_webgpu/lib.rs @@ -649,10 +649,11 @@ pub async fn op_webgpu_request_device( limits: required_limits.unwrap_or_default(), }; - let (device, maybe_err) = gfx_select!(adapter => instance.adapter_request_device( + let (device, _queue, maybe_err) = gfx_select!(adapter => instance.adapter_request_device( adapter, &descriptor, std::env::var("DENO_WEBGPU_TRACE").ok().as_ref().map(std::path::Path::new), + (), () )); if let Some(err) = maybe_err { diff --git a/player/src/bin/play.rs b/player/src/bin/play.rs index a9a47ce5ff..ce0b4c3bd4 100644 --- a/player/src/bin/play.rs +++ b/player/src/bin/play.rs @@ -81,10 +81,11 @@ fn main() { let info = gfx_select!(adapter => global.adapter_get_info(adapter)).unwrap(); log::info!("Picked '{}'", info.name); let id = wgc::id::TypedId::zip(1, 0, backend); - let (_, error) = gfx_select!(adapter => global.adapter_request_device( + let (_, _, error) = gfx_select!(adapter => global.adapter_request_device( adapter, &desc, None, + id, id )); if let Some(e) = error { diff --git a/player/tests/test.rs b/player/tests/test.rs index cd1302777e..2cfa030101 100644 --- a/player/tests/test.rs +++ b/player/tests/test.rs @@ -84,8 +84,8 @@ impl Test<'_> { test_num: u32, ) { let backend = adapter.backend(); - let device = wgc::id::TypedId::zip(test_num, 0, backend); - let (_, error) = wgc::gfx_select!(adapter => global.adapter_request_device( + let device_id = wgc::id::TypedId::zip(test_num, 0, backend); + let (_, _, error) = wgc::gfx_select!(adapter => global.adapter_request_device( adapter, &wgt::DeviceDescriptor { label: None, @@ -93,7 +93,8 @@ impl Test<'_> { limits: wgt::Limits::default(), }, None, - device + device_id, + device_id )); if let Some(e) = error { panic!("{:?}", e); @@ -102,12 +103,12 @@ impl Test<'_> { let mut command_buffer_id_manager = wgc::identity::IdentityManager::default(); println!("\t\t\tRunning..."); for action in self.actions { - wgc::gfx_select!(device => global.process(device, action, dir, &mut command_buffer_id_manager)); + wgc::gfx_select!(device_id => global.process(device_id, action, dir, &mut command_buffer_id_manager)); } println!("\t\t\tMapping..."); for expect in &self.expectations { let buffer = wgc::id::TypedId::zip(expect.buffer.index, expect.buffer.epoch, backend); - wgc::gfx_select!(device => global.buffer_map_async( + wgc::gfx_select!(device_id => global.buffer_map_async( buffer, expect.offset .. expect.offset+expect.data.len() as wgt::BufferAddress, wgc::resource::BufferMapOperation { @@ -121,13 +122,13 @@ impl Test<'_> { } println!("\t\t\tWaiting..."); - wgc::gfx_select!(device => global.device_poll(device, wgt::Maintain::Wait)).unwrap(); + wgc::gfx_select!(device_id => global.device_poll(device_id, wgt::Maintain::Wait)).unwrap(); for expect in self.expectations { println!("\t\t\tChecking {}", expect.name); let buffer = wgc::id::TypedId::zip(expect.buffer.index, expect.buffer.epoch, backend); let (ptr, size) = - wgc::gfx_select!(device => global.buffer_get_mapped_range(buffer, expect.offset, Some(expect.data.len() as wgt::BufferAddress))) + wgc::gfx_select!(device_id => global.buffer_get_mapped_range(buffer, expect.offset, Some(expect.data.len() as wgt::BufferAddress))) .unwrap(); let contents = unsafe { slice::from_raw_parts(ptr, size as usize) }; let expected_data = match expect.data { @@ -155,7 +156,7 @@ impl Test<'_> { } } - wgc::gfx_select!(device => global.clear_backend(())); + wgc::gfx_select!(device_id => global.clear_backend(())); } } diff --git a/tests/tests/device.rs b/tests/tests/device.rs index 945d5476d7..9b2fd4266c 100644 --- a/tests/tests/device.rs +++ b/tests/tests/device.rs @@ -36,3 +36,35 @@ fn device_mismatch() { ctx.device.poll(wgpu::Maintain::Poll); }); } + +#[test] +fn device_lifetime_check() { + use pollster::FutureExt as _; + + env_logger::init(); + let instance = wgpu::Instance::new(wgpu::InstanceDescriptor { + backends: wgpu::util::backend_bits_from_env().unwrap_or(wgpu::Backends::all()), + dx12_shader_compiler: wgpu::util::dx12_shader_compiler_from_env().unwrap_or_default(), + gles_minor_version: wgpu::util::gles_minor_version_from_env().unwrap_or_default(), + }); + + let adapter = wgpu::util::initialize_adapter_from_env_or_default(&instance, None) + .block_on() + .expect("failed to create adapter"); + + let (device, queue) = adapter + .request_device(&wgpu::DeviceDescriptor::default(), None) + .block_on() + .expect("failed to create device"); + + instance.poll_all(false); + + let pre_report = instance.generate_report(); + + drop(queue); + drop(device); + + let post_report = instance.generate_report(); + + assert_ne!(pre_report, post_report, "Queue and Device has not been dropped as expected"); +} diff --git a/wgpu-core/src/device/global.rs b/wgpu-core/src/device/global.rs index a6fc5cb44c..2573cea0ea 100644 --- a/wgpu-core/src/device/global.rs +++ b/wgpu-core/src/device/global.rs @@ -5,12 +5,12 @@ use crate::{ device::{life::WaitIdleError, map_buffer, queue, Device, DeviceError, HostMap}, global::Global, hal_api::HalApi, - id::{self, AdapterId, DeviceId, SurfaceId}, + id::{self, AdapterId, DeviceId, QueueId, SurfaceId}, identity::{GlobalIdentityHandlerFactory, Input}, init_tracker::TextureInitTracker, instance::{self, Adapter, Surface}, pipeline, present, - resource::{self, Buffer, BufferAccessResult, BufferMapState, Resource}, + resource::{self, Buffer, BufferAccessResult, BufferMapState}, resource::{BufferAccessError, BufferMapOperation}, validation::check_buffer_usage, FastHashMap, Label, LabelHelpers as _, @@ -1409,12 +1409,16 @@ impl Global { Ok(device) => device, Err(_) => break DeviceError::Invalid, }; + let queue = match hub.queues.get(device.queue_id.read().unwrap()) { + Ok(queue) => queue, + Err(_) => break DeviceError::InvalidQueueId, + }; let encoder = match device .command_allocator .lock() .as_mut() .unwrap() - .acquire_encoder(device.raw(), device.queue.as_ref().unwrap()) + .acquire_encoder(device.raw(), queue.raw.as_ref().unwrap()) { Ok(raw) => raw, Err(_) => break DeviceError::OutOfMemory, @@ -2181,12 +2185,11 @@ impl Global { profiling::scope!("poll_device"); let hub = A::hub(self); - let mut devices_to_drop = vec![]; let mut all_queue_empty = true; { let device_guard = hub.devices.read(); - for (id, device) in device_guard.iter(A::VARIANT) { + for (_id, device) in device_guard.iter(A::VARIANT) { let maintain = if force_wait { wgt::Maintain::Wait } else { @@ -2197,19 +2200,10 @@ impl Global { let (cbs, queue_empty) = device.maintain(hub, fence, maintain)?; all_queue_empty = all_queue_empty && queue_empty; - // If the device's own `RefCount` is the only one left, and - // its submission queue is empty, then it can be freed. - if queue_empty && device.is_unique() { - devices_to_drop.push(id); - } closures.extend(cbs); } } - for device_id in devices_to_drop { - self.exit_device::(device_id); - } - Ok(all_queue_empty) } @@ -2275,40 +2269,31 @@ impl Global { pub fn device_drop(&self, device_id: DeviceId) { profiling::scope!("Device::drop"); log::debug!("Device {:?} is asked to be dropped", device_id); - } - /// Exit the unreferenced, inactive device `device_id`. - fn exit_device(&self, device_id: DeviceId) { let hub = A::hub(self); - let mut free_adapter_id = None; - { - let device = hub.devices.unregister(device_id); - if let Some(device) = device { - // The things `Device::prepare_to_die` takes care are mostly - // unnecessary here. We know our queue is empty, so we don't - // need to wait for submissions or triage them. We know we were - // just polled, so `life_tracker.free_resources` is empty. - debug_assert!(device.lock_life().queue_empty()); - { - let mut pending_writes = device.pending_writes.lock(); - let pending_writes = pending_writes.as_mut().unwrap(); - pending_writes.deactivate(); - } - - // Adapter is only referenced by the device and itself. - // This isn't a robust way to destroy them, we should find a better one. - // Check the refcount here should 2 -> registry and device - if device.adapter.ref_count() == 2 { - free_adapter_id = Some(device.adapter.info.id().0); - } - - drop(device); + if let Some(device) = hub.devices.unregister(device_id) { + // The things `Device::prepare_to_die` takes care are mostly + // unnecessary here. We know our queue is empty, so we don't + // need to wait for submissions or triage them. We know we were + // just polled, so `life_tracker.free_resources` is empty. + debug_assert!(device.lock_life().queue_empty()); + { + let mut pending_writes = device.pending_writes.lock(); + let pending_writes = pending_writes.as_mut().unwrap(); + pending_writes.deactivate(); } + + drop(device); } + } - // Free the adapter now that we've dropped the `Device`. - if let Some(free_adapter_id) = free_adapter_id { - let _ = hub.adapters.unregister(free_adapter_id); + pub fn queue_drop(&self, queue_id: QueueId) { + profiling::scope!("Queue::drop"); + log::debug!("Queue {:?} is asked to be dropped", queue_id); + + let hub = A::hub(self); + if let Some(queue) = hub.queues.unregister(queue_id) { + drop(queue); } } diff --git a/wgpu-core/src/device/mod.rs b/wgpu-core/src/device/mod.rs index 8f1232f215..2e73f2e665 100644 --- a/wgpu-core/src/device/mod.rs +++ b/wgpu-core/src/device/mod.rs @@ -302,6 +302,8 @@ pub enum DeviceError { OutOfMemory, #[error("Creation of a resource failed for a reason other than running out of memory.")] ResourceCreationFailed, + #[error("QueueId is invalid")] + InvalidQueueId, } impl From for DeviceError { diff --git a/wgpu-core/src/device/queue.rs b/wgpu-core/src/device/queue.rs index 489d56279c..b150511fec 100644 --- a/wgpu-core/src/device/queue.rs +++ b/wgpu-core/src/device/queue.rs @@ -10,7 +10,7 @@ use crate::{ get_lowest_common_denom, global::Global, hal_api::HalApi, - id, + id::{self, QueueId}, identity::{GlobalIdentityHandlerFactory, Input}, init_tracker::{has_copy_partial_init_tracker_coverage, BufferInitTracker, TextureInitRange}, resource::{ @@ -31,6 +31,31 @@ use thiserror::Error; use super::Device; +pub struct Queue { + pub device: Option>>, + pub raw: Option, + pub info: ResourceInfo, +} + +impl Resource for Queue { + const TYPE: &'static str = "Queue"; + + fn as_info(&self) -> &ResourceInfo { + &self.info + } + + fn as_info_mut(&mut self) -> &mut ResourceInfo { + &mut self.info + } +} + +impl Drop for Queue { + fn drop(&mut self) { + let queue = self.raw.take().unwrap(); + self.device.as_ref().unwrap().release_queue(queue); + } +} + /// Number of command buffers that we generate from the same pool /// for the write_xxx commands, before the pool is recycled. /// @@ -115,7 +140,7 @@ impl SubmittedWorkDoneClosure { #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct WrappedSubmissionIndex { - pub queue_id: id::QueueId, + pub queue_id: QueueId, pub index: SubmissionIndex, } @@ -365,7 +390,7 @@ pub enum QueueSubmitError { impl Global { pub fn queue_write_buffer( &self, - queue_id: id::QueueId, + queue_id: QueueId, buffer_id: id::BufferId, buffer_offset: wgt::BufferAddress, data: &[u8], @@ -374,10 +399,12 @@ impl Global { let hub = A::hub(self); - let device = hub - .devices + let queue = hub + .queues .get(queue_id) - .map_err(|_| DeviceError::Invalid)?; + .map_err(|_| DeviceError::InvalidQueueId)?; + + let device = queue.device.as_ref().unwrap(); let data_size = data.len() as wgt::BufferAddress; @@ -400,7 +427,7 @@ impl Global { // Platform validation requires that the staging buffer always be // freed, even if an error occurs. All paths from here must call // `device.pending_writes.consume`. - let (staging_buffer, staging_buffer_ptr) = prepare_staging_buffer(&device, data_size)?; + let (staging_buffer, staging_buffer_ptr) = prepare_staging_buffer(device, data_size)?; let mut pending_writes = device.pending_writes.lock(); let pending_writes = pending_writes.as_mut().unwrap(); @@ -409,38 +436,40 @@ impl Global { ptr::copy_nonoverlapping(data.as_ptr(), staging_buffer_ptr, data.len()); staging_buffer.flush(device.raw()) } { - pending_writes.consume(&device, Arc::new(staging_buffer)); + pending_writes.consume(device, Arc::new(staging_buffer)); return Err(flush_error.into()); } let result = self.queue_write_staging_buffer_impl( - &device, + device, pending_writes, &staging_buffer, buffer_id, buffer_offset, ); - pending_writes.consume(&device, Arc::new(staging_buffer)); + pending_writes.consume(device, Arc::new(staging_buffer)); result } pub fn queue_create_staging_buffer( &self, - queue_id: id::QueueId, + queue_id: QueueId, buffer_size: wgt::BufferSize, id_in: Input, ) -> Result<(id::StagingBufferId, *mut u8), QueueWriteError> { profiling::scope!("Queue::create_staging_buffer"); let hub = A::hub(self); - let device = hub - .devices + let queue = hub + .queues .get(queue_id) - .map_err(|_| DeviceError::Invalid)?; + .map_err(|_| DeviceError::InvalidQueueId)?; + + let device = queue.device.as_ref().unwrap(); let (staging_buffer, staging_buffer_ptr) = - prepare_staging_buffer(&device, buffer_size.get())?; + prepare_staging_buffer(device, buffer_size.get())?; let fid = hub.staging_buffers.prepare(id_in); let (id, _) = fid.assign(staging_buffer); @@ -451,7 +480,7 @@ impl Global { pub fn queue_write_staging_buffer( &self, - queue_id: id::QueueId, + queue_id: QueueId, buffer_id: id::BufferId, buffer_offset: wgt::BufferAddress, staging_buffer_id: id::StagingBufferId, @@ -459,10 +488,12 @@ impl Global { profiling::scope!("Queue::write_staging_buffer"); let hub = A::hub(self); - let device = hub - .devices + let queue = hub + .queues .get(queue_id) - .map_err(|_| DeviceError::Invalid)?; + .map_err(|_| DeviceError::InvalidQueueId)?; + + let device = queue.device.as_ref().unwrap(); let staging_buffer = hub.staging_buffers.unregister(staging_buffer_id); if staging_buffer.is_none() { @@ -479,25 +510,25 @@ impl Global { // be freed, even if an error occurs. All paths from here must call // `device.pending_writes.consume`. if let Err(flush_error) = unsafe { staging_buffer.flush(device.raw()) } { - pending_writes.consume(&device, staging_buffer); + pending_writes.consume(device, staging_buffer); return Err(flush_error.into()); } let result = self.queue_write_staging_buffer_impl( - &device, + device, pending_writes, &staging_buffer, buffer_id, buffer_offset, ); - pending_writes.consume(&device, staging_buffer); + pending_writes.consume(device, staging_buffer); result } pub fn queue_validate_write_buffer( &self, - _queue_id: id::QueueId, + _queue_id: QueueId, buffer_id: id::BufferId, buffer_offset: u64, buffer_size: u64, @@ -611,7 +642,7 @@ impl Global { pub fn queue_write_texture( &self, - queue_id: id::QueueId, + queue_id: QueueId, destination: &ImageCopyTexture, data: &[u8], data_layout: &wgt::ImageDataLayout, @@ -621,10 +652,12 @@ impl Global { let hub = A::hub(self); - let device = hub - .devices + let queue = hub + .queues .get(queue_id) - .map_err(|_| DeviceError::Invalid)?; + .map_err(|_| DeviceError::InvalidQueueId)?; + + let device = queue.device.as_ref().unwrap(); #[cfg(feature = "trace")] if let Some(ref mut trace) = *device.trace.lock() { @@ -784,7 +817,7 @@ impl Global { // Platform validation requires that the staging buffer always be // freed, even if an error occurs. All paths from here must call // `device.pending_writes.consume`. - let (staging_buffer, staging_buffer_ptr) = prepare_staging_buffer(&device, stage_size)?; + let (staging_buffer, staging_buffer_ptr) = prepare_staging_buffer(device, stage_size)?; if stage_bytes_per_row == bytes_per_row { profiling::scope!("copy aligned"); @@ -820,7 +853,7 @@ impl Global { } if let Err(e) = unsafe { staging_buffer.flush(device.raw()) } { - pending_writes.consume(&device, Arc::new(staging_buffer)); + pending_writes.consume(device, Arc::new(staging_buffer)); return Err(e.into()); } @@ -864,7 +897,7 @@ impl Global { } } - pending_writes.consume(&device, Arc::new(staging_buffer)); + pending_writes.consume(device, Arc::new(staging_buffer)); pending_writes .dst_textures .insert(destination.texture, dst.clone()); @@ -875,7 +908,7 @@ impl Global { #[cfg(all(target_arch = "wasm32", not(target_os = "emscripten")))] pub fn queue_copy_external_image_to_texture( &self, - queue_id: id::QueueId, + queue_id: QueueId, source: &wgt::ImageCopyExternalImage, destination: crate::command::ImageCopyTextureTagged, size: wgt::Extent3d, @@ -884,10 +917,12 @@ impl Global { let hub = A::hub(self); - let device = hub - .devices + let queue = hub + .queues .get(queue_id) - .map_err(|_| DeviceError::Invalid)?; + .map_err(|_| DeviceError::InvalidQueueId)?; + + let device = queue.device.as_ref().unwrap(); if size.width == 0 || size.height == 0 || size.depth_or_array_layers == 0 { log::trace!("Ignoring write_texture of size 0"); @@ -1078,7 +1113,7 @@ impl Global { pub fn queue_submit( &self, - queue_id: id::QueueId, + queue_id: QueueId, command_buffer_ids: &[id::CommandBufferId], ) -> Result { profiling::scope!("Queue::submit"); @@ -1086,10 +1121,12 @@ impl Global { let (submit_index, callbacks) = { let hub = A::hub(self); - let device = hub - .devices + let queue = hub + .queues .get(queue_id) - .map_err(|_| DeviceError::Invalid)?; + .map_err(|_| DeviceError::InvalidQueueId)?; + + let device = queue.device.as_ref().unwrap(); let mut fence = device.fence.write(); let fence = fence.as_mut().unwrap(); @@ -1324,7 +1361,7 @@ impl Global { .initialize_buffer_memory(&mut *trackers, &*buffer_guard) .map_err(|err| QueueSubmitError::DestroyedBuffer(err.0))?; baked - .initialize_texture_memory(&mut *trackers, &*texture_guard, &device) + .initialize_texture_memory(&mut *trackers, &*texture_guard, device) .map_err(|err| QueueSubmitError::DestroyedTexture(err.0))?; //Note: stateless trackers are not merged: // device already knows these resources exist. @@ -1433,8 +1470,8 @@ impl Global { ) .collect::>(); unsafe { - device - .queue + queue + .raw .as_ref() .unwrap() .submit(&refs, Some((fence, submit_index))) @@ -1445,7 +1482,7 @@ impl Global { if let Some(pending_execution) = pending_writes.post_submit( device.command_allocator.lock().as_mut().unwrap(), device.raw(), - device.queue.as_ref().unwrap(), + queue.raw.as_ref().unwrap(), ) { active_executions.push(pending_execution); } @@ -1486,24 +1523,29 @@ impl Global { pub fn queue_get_timestamp_period( &self, - queue_id: id::QueueId, + queue_id: QueueId, ) -> Result { let hub = A::hub(self); - match hub.devices.get(queue_id) { - Ok(device) => Ok(unsafe { device.queue.as_ref().unwrap().get_timestamp_period() }), + match hub.queues.get(queue_id) { + Ok(queue) => Ok(unsafe { queue.raw.as_ref().unwrap().get_timestamp_period() }), Err(_) => Err(InvalidQueue), } } pub fn queue_on_submitted_work_done( &self, - queue_id: id::QueueId, + queue_id: QueueId, closure: SubmittedWorkDoneClosure, ) -> Result<(), InvalidQueue> { //TODO: flush pending writes let hub = A::hub(self); - match hub.devices.get(queue_id) { - Ok(device) => device.lock_life().add_work_done_closure(closure), + match hub.queues.get(queue_id) { + Ok(queue) => queue + .device + .as_ref() + .unwrap() + .lock_life() + .add_work_done_closure(closure), Err(_) => return Err(InvalidQueue), } Ok(()) diff --git a/wgpu-core/src/device/resource.rs b/wgpu-core/src/device/resource.rs index ffb4f033d5..bac6748fa3 100644 --- a/wgpu-core/src/device/resource.rs +++ b/wgpu-core/src/device/resource.rs @@ -10,7 +10,7 @@ use crate::{ }, hal_api::HalApi, hub::Hub, - id::{self, DeviceId}, + id::{self, DeviceId, QueueId}, identity::GlobalIdentityHandlerFactory, init_tracker::{ BufferInitTracker, BufferInitTrackerAction, MemoryInitKind, TextureInitRange, @@ -48,7 +48,8 @@ use std::{ use super::{ life::{self, SuspectedResources}, - queue, DeviceDescriptor, DeviceError, ImplicitPipelineContext, UserClosures, EP_FAILURE, + queue::{self}, + DeviceDescriptor, DeviceError, ImplicitPipelineContext, UserClosures, EP_FAILURE, IMPLICIT_FAILURE, ZERO_BUFFER_SIZE, }; @@ -80,12 +81,13 @@ use super::{ pub struct Device { raw: Option, pub(crate) adapter: Arc>, - pub(crate) queue: Option, + pub(crate) queue_id: RwLock>, + queue_to_drop: RwLock>, pub(crate) zero_buffer: Option, - //Note: The submission index here corresponds to the last submission that is done. pub(crate) info: ResourceInfo, pub(crate) command_allocator: Mutex>>, + //Note: The submission index here corresponds to the last submission that is done. pub(crate) active_submission_index: AtomicU64, //SubmissionIndex, pub(crate) fence: RwLock>, @@ -129,7 +131,8 @@ impl Drop for Device { unsafe { raw.destroy_buffer(self.zero_buffer.take().unwrap()); raw.destroy_fence(self.fence.write().take().unwrap()); - raw.exit(self.queue.take().unwrap()); + let queue = self.queue_to_drop.write().take().unwrap(); + raw.exit(queue); } } } @@ -168,7 +171,8 @@ impl Device { impl Device { pub(crate) fn new( - open: hal::OpenDevice, + raw_device: A::Device, + raw_queue: &A::Queue, adapter: &Arc>, alignments: hal::Alignments, downlevel: wgt::DownlevelCapabilities, @@ -180,19 +184,19 @@ impl Device { log::error!("Feature 'trace' is not enabled"); } let fence = - unsafe { open.device.create_fence() }.map_err(|_| CreateDeviceError::OutOfMemory)?; + unsafe { raw_device.create_fence() }.map_err(|_| CreateDeviceError::OutOfMemory)?; let mut com_alloc = CommandAllocator { free_encoders: Vec::new(), }; let pending_encoder = com_alloc - .acquire_encoder(&open.device, &open.queue) + .acquire_encoder(&raw_device, raw_queue) .map_err(|_| CreateDeviceError::OutOfMemory)?; let mut pending_writes = queue::PendingWrites::::new(pending_encoder); // Create zeroed buffer used for texture clears. let zero_buffer = unsafe { - open.device + raw_device .create_buffer(&hal::BufferDescriptor { label: Some("(wgpu internal) zero init buffer"), size: ZERO_BUFFER_SIZE, @@ -221,9 +225,10 @@ impl Device { } Ok(Self { - raw: Some(open.device), + raw: Some(raw_device), adapter: adapter.clone(), - queue: Some(open.queue), + queue_id: RwLock::new(None), + queue_to_drop: RwLock::new(None), zero_buffer: Some(zero_buffer), info: ResourceInfo::new(""), command_allocator: Mutex::new(Some(com_alloc)), @@ -254,6 +259,10 @@ impl Device { }) } + pub(crate) fn release_queue(&self, queue: A::Queue) { + self.queue_to_drop.write().replace(queue); + } + pub(crate) fn lock_life<'a>(&'a self) -> MutexGuard<'a, LifetimeTracker> { self.life_tracker.lock() } diff --git a/wgpu-core/src/global.rs b/wgpu-core/src/global.rs index 5fc1ff352b..3991e4f6f8 100644 --- a/wgpu-core/src/global.rs +++ b/wgpu-core/src/global.rs @@ -10,7 +10,7 @@ use crate::{ storage::{Element, StorageReport}, }; -#[derive(Debug)] +#[derive(Debug, PartialEq, Eq)] pub struct GlobalReport { pub surfaces: StorageReport, #[cfg(all(feature = "vulkan", not(target_arch = "wasm32")))] diff --git a/wgpu-core/src/hub.rs b/wgpu-core/src/hub.rs index 39af2ef086..f67b111b96 100644 --- a/wgpu-core/src/hub.rs +++ b/wgpu-core/src/hub.rs @@ -152,7 +152,7 @@ flagged as errors as well. use crate::{ binding_model::{BindGroup, BindGroupLayout, PipelineLayout}, command::{CommandBuffer, RenderBundle}, - device::Device, + device::{queue::Queue, Device}, hal_api::HalApi, id, identity::GlobalIdentityHandlerFactory, @@ -165,10 +165,11 @@ use crate::{ use std::fmt::Debug; -#[derive(Debug)] +#[derive(Debug, PartialEq, Eq)] pub struct HubReport { pub adapters: StorageReport, pub devices: StorageReport, + pub queues: StorageReport, pub pipeline_layouts: StorageReport, pub shader_modules: StorageReport, pub bind_group_layouts: StorageReport, @@ -218,6 +219,7 @@ impl HubReport { pub struct Hub { pub adapters: Registry, F>, pub devices: Registry, F>, + pub queues: Registry, F>, pub pipeline_layouts: Registry, F>, pub shader_modules: Registry, F>, pub bind_group_layouts: Registry, F>, @@ -239,6 +241,7 @@ impl Hub { Self { adapters: Registry::new(A::VARIANT, factory), devices: Registry::new(A::VARIANT, factory), + queues: Registry::new(A::VARIANT, factory), pipeline_layouts: Registry::new(A::VARIANT, factory), shader_modules: Registry::new(A::VARIANT, factory), bind_group_layouts: Registry::new(A::VARIANT, factory), @@ -308,6 +311,7 @@ impl Hub { } } + self.queues.write().map.clear(); devices.map.clear(); if with_adapters { @@ -332,6 +336,7 @@ impl Hub { HubReport { adapters: self.adapters.generate_report(), devices: self.devices.generate_report(), + queues: self.queues.generate_report(), pipeline_layouts: self.pipeline_layouts.generate_report(), shader_modules: self.shader_modules.generate_report(), bind_group_layouts: self.bind_group_layouts.generate_report(), diff --git a/wgpu-core/src/instance.rs b/wgpu-core/src/instance.rs index eb49384ee1..1f57744512 100644 --- a/wgpu-core/src/instance.rs +++ b/wgpu-core/src/instance.rs @@ -1,10 +1,10 @@ use std::sync::Arc; use crate::{ - device::{resource::Device, DeviceDescriptor}, + device::{queue::Queue, resource::Device, DeviceDescriptor}, global::Global, hal_api::HalApi, - id::{AdapterId, DeviceId, SurfaceId}, + id::{AdapterId, DeviceId, QueueId, SurfaceId}, identity::{GlobalIdentityHandlerFactory, Input}, present::Presentation, resource::{Resource, ResourceInfo}, @@ -14,7 +14,7 @@ use crate::{ use parking_lot::Mutex; use wgt::{Backend, Backends, PowerPreference}; -use hal::{Adapter as _, Instance as _}; +use hal::{Adapter as _, Instance as _, OpenDevice}; use thiserror::Error; pub type RequestAdapterOptions = wgt::RequestAdapterOptions; @@ -293,29 +293,37 @@ impl Adapter { } } - fn create_device_from_hal( + fn create_device_and_queue_from_hal( self: &Arc, - open: hal::OpenDevice, + hal_device: OpenDevice, desc: &DeviceDescriptor, trace_path: Option<&std::path::Path>, - ) -> Result, RequestDeviceError> { + ) -> Result<(Device, Queue), RequestDeviceError> { let caps = &self.raw.capabilities; - Device::new( - open, + if let Ok(device) = Device::new( + hal_device.device, + &hal_device.queue, self, caps.alignments.clone(), caps.downlevel.clone(), desc, trace_path, - ) - .or(Err(RequestDeviceError::OutOfMemory)) + ) { + let queue = Queue { + device: None, + raw: Some(hal_device.queue), + info: ResourceInfo::new(""), + }; + return Ok((device, queue)); + } + Err(RequestDeviceError::OutOfMemory) } - fn create_device( + fn create_device_and_queue( self: &Arc, desc: &DeviceDescriptor, trace_path: Option<&std::path::Path>, - ) -> Result, RequestDeviceError> { + ) -> Result<(Device, Queue), RequestDeviceError> { // Verify all features were exposed by the adapter if !self.raw.features.contains(desc.features) { return Err(RequestDeviceError::UnsupportedFeature( @@ -364,7 +372,7 @@ impl Adapter { }, )?; - self.create_device_from_hal(open, desc, trace_path) + self.create_device_and_queue_from_hal(open, desc, trace_path) } } @@ -1067,29 +1075,41 @@ impl Global { adapter_id: AdapterId, desc: &DeviceDescriptor, trace_path: Option<&std::path::Path>, - id_in: Input, - ) -> (DeviceId, Option) { + device_id_in: Input, + queue_id_in: Input, + ) -> (DeviceId, QueueId, Option) { profiling::scope!("Adapter::request_device"); let hub = A::hub(self); - let fid = hub.devices.prepare(id_in); + let device_fid = hub.devices.prepare(device_id_in); + let queue_fid = hub.queues.prepare(queue_id_in); let error = loop { let adapter = match hub.adapters.get(adapter_id) { Ok(adapter) => adapter, Err(_) => break RequestDeviceError::InvalidAdapter, }; - let device = match adapter.create_device(desc, trace_path) { - Ok(device) => device, + let (device, mut queue) = match adapter.create_device_and_queue(desc, trace_path) { + Ok((device, queue)) => (device, queue), Err(e) => break e, }; - let (id, _) = fid.assign(device); - log::info!("Created Device {:?}", id); - return (id.0, None); + let (device_id, _) = device_fid.assign(device); + log::info!("Created Device {:?}", device_id); + + let device = hub.devices.get(device_id.0).unwrap(); + queue.device = Some(device.clone()); + + let (queue_id, _) = queue_fid.assign(queue); + log::info!("Created Queue {:?}", queue_id); + + device.queue_id.write().replace(queue_id.0); + + return (device_id.0, queue_id.0, None); }; - let id = fid.assign_error(desc.label.borrow_or_default()); - (id, Some(error)) + let device_id = device_fid.assign_error(desc.label.borrow_or_default()); + let queue_id = queue_fid.assign_error(desc.label.borrow_or_default()); + (device_id, queue_id, Some(error)) } /// # Safety @@ -1099,32 +1119,45 @@ impl Global { pub unsafe fn create_device_from_hal( &self, adapter_id: AdapterId, - hal_device: hal::OpenDevice, + hal_device: OpenDevice, desc: &DeviceDescriptor, trace_path: Option<&std::path::Path>, - id_in: Input, - ) -> (DeviceId, Option) { - profiling::scope!("Adapter::create_device_from_hal"); + device_id_in: Input, + queue_id_in: Input, + ) -> (DeviceId, QueueId, Option) { + profiling::scope!("Global::create_device_from_hal"); let hub = A::hub(self); - let fid = hub.devices.prepare(id_in); + let devices_fid = hub.devices.prepare(device_id_in); + let queues_fid = hub.queues.prepare(queue_id_in); let error = loop { let adapter = match hub.adapters.get(adapter_id) { Ok(adapter) => adapter, Err(_) => break RequestDeviceError::InvalidAdapter, }; - let device = match adapter.create_device_from_hal(hal_device, desc, trace_path) { - Ok(device) => device, - Err(e) => break e, - }; - let (id, _) = fid.assign(device); - log::info!("Created Device {:?}", id); - return (id.0, None); + let (device, mut queue) = + match adapter.create_device_and_queue_from_hal(hal_device, desc, trace_path) { + Ok(device) => device, + Err(e) => break e, + }; + let (device_id, _) = devices_fid.assign(device); + log::info!("Created Device {:?}", device_id); + + let device = hub.devices.get(device_id.0).unwrap(); + queue.device = Some(device.clone()); + + let (queue_id, _) = queues_fid.assign(queue); + log::info!("Created Queue {:?}", queue_id); + + device.queue_id.write().replace(queue_id.0); + + return (device_id.0, queue_id.0, None); }; - let id = fid.assign_error(desc.label.borrow_or_default()); - (id, Some(error)) + let device_id = devices_fid.assign_error(desc.label.borrow_or_default()); + let queue_id = queues_fid.assign_error(desc.label.borrow_or_default()); + (device_id, queue_id, Some(error)) } } diff --git a/wgpu-core/src/present.rs b/wgpu-core/src/present.rs index 751ac4aa52..a244d139ab 100644 --- a/wgpu-core/src/present.rs +++ b/wgpu-core/src/present.rs @@ -280,6 +280,8 @@ impl Global { }; let device = hub.devices.get(present.device_id.0).unwrap(); + let queue_id = device.queue_id.read().unwrap(); + let queue = hub.queues.get(queue_id).unwrap(); #[cfg(feature = "trace")] if let Some(ref mut trace) = *device.trace.lock() { @@ -332,11 +334,7 @@ impl Global { Err(hal::SurfaceError::Outdated) } else { unsafe { - device - .queue - .as_ref() - .unwrap() - .present(&suf.unwrap().raw, raw) + queue.raw.as_ref().unwrap().present(&suf.unwrap().raw, raw) } } } diff --git a/wgpu-core/src/registry.rs b/wgpu-core/src/registry.rs index 7d56c4aca0..59bcb7f63a 100644 --- a/wgpu-core/src/registry.rs +++ b/wgpu-core/src/registry.rs @@ -58,8 +58,9 @@ impl> FutureId<'_, I, T> { self.id } - pub fn assign(self, value: T) -> (id::Valid, Arc) { - self.data.write().insert(self.id, value); + pub fn assign(self, mut value: T) -> (id::Valid, Arc) { + value.as_info_mut().set_id(self.id); + self.data.write().insert(self.id, Arc::new(value)); ( id::Valid(self.id), self.data.read().get(self.id).unwrap().clone(), diff --git a/wgpu-core/src/storage.rs b/wgpu-core/src/storage.rs index 6bacaef287..a328c4f73f 100644 --- a/wgpu-core/src/storage.rs +++ b/wgpu-core/src/storage.rs @@ -21,7 +21,7 @@ pub(crate) enum Element { Error(Epoch, String), } -#[derive(Clone, Debug, Default)] +#[derive(Copy, Clone, Debug, Default, PartialEq, Eq)] pub struct StorageReport { pub num_occupied: usize, pub num_vacant: usize, @@ -179,10 +179,9 @@ where } } - pub(crate) fn insert(&mut self, id: I, mut value: T) { - let (index, epoch, _) = id.unzip(); - value.as_info_mut().set_id(id); - self.insert_impl(index as usize, Element::Occupied(Arc::new(value), epoch)) + pub(crate) fn insert(&mut self, id: I, value: Arc) { + let (index, epoch, _backend) = id.unzip(); + self.insert_impl(index as usize, Element::Occupied(value, epoch)) } pub(crate) fn insert_error(&mut self, id: I, label: &str) { diff --git a/wgpu/src/backend/direct.rs b/wgpu/src/backend/direct.rs index ec469be986..637500aeb1 100644 --- a/wgpu/src/backend/direct.rs +++ b/wgpu/src/backend/direct.rs @@ -104,13 +104,14 @@ impl Context { trace_dir: Option<&std::path::Path>, ) -> Result<(Device, Queue), crate::RequestDeviceError> { let global = &self.0; - let (device_id, error) = unsafe { + let (device_id, queue_id, error) = unsafe { global.create_device_from_hal( *adapter, hal_device, &desc.map_label(|l| l.map(Borrowed)), trace_dir, (), + (), ) }; if let Some(err) = error { @@ -123,7 +124,7 @@ impl Context { features: desc.features, }; let queue = Queue { - id: device_id, + id: queue_id, error_sink, }; Ok((device, queue)) @@ -621,10 +622,11 @@ impl crate::Context for Context { trace_dir: Option<&std::path::Path>, ) -> Self::RequestDeviceFuture { let global = &self.0; - let (device_id, error) = wgc::gfx_select!(*adapter => global.adapter_request_device( + let (device_id, queue_id, error) = wgc::gfx_select!(*adapter => global.adapter_request_device( *adapter, &desc.map_label(|l| l.map(Borrowed)), trace_dir, + (), () )); if let Some(err) = error { @@ -638,7 +640,7 @@ impl crate::Context for Context { features: desc.features, }; let queue = Queue { - id: device_id, + id: queue_id, error_sink, }; ready(Ok((device_id, device, device_id, queue))) @@ -1447,6 +1449,11 @@ impl crate::Context for Context { wgc::gfx_select!(device => global.device_drop(*device)); } + #[cfg_attr(target_arch = "wasm32", allow(unused))] + fn queue_drop(&self, queue: &Self::QueueId, _device_data: &Self::QueueData) { + let global = &self.0; + wgc::gfx_select!(queue => global.queue_drop(*queue)); + } fn device_poll( &self, device: &Self::DeviceId, diff --git a/wgpu/src/backend/web.rs b/wgpu/src/backend/web.rs index 97f5cb945d..72f31b5bab 100644 --- a/wgpu/src/backend/web.rs +++ b/wgpu/src/backend/web.rs @@ -1816,6 +1816,10 @@ impl crate::context::Context for Context { // Device is dropped automatically } + fn queue_drop(&self, _queue: &Self::QueueId, _queue_data: &Self::QueueData) { + // Queue is dropped automatically + } + fn device_poll( &self, _device: &Self::DeviceId, diff --git a/wgpu/src/context.rs b/wgpu/src/context.rs index 9d0bdd9100..24465d25d1 100644 --- a/wgpu/src/context.rs +++ b/wgpu/src/context.rs @@ -269,6 +269,7 @@ pub trait Context: Debug + WasmNotSend + WasmNotSync + Sized { desc: &RenderBundleEncoderDescriptor, ) -> (Self::RenderBundleEncoderId, Self::RenderBundleEncoderData); fn device_drop(&self, device: &Self::DeviceId, device_data: &Self::DeviceData); + fn queue_drop(&self, queue: &Self::QueueId, queue_data: &Self::QueueData); fn device_poll( &self, device: &Self::DeviceId, @@ -1363,6 +1364,7 @@ pub(crate) trait DynContext: Debug + WasmNotSend + WasmNotSync { desc: &RenderBundleEncoderDescriptor, ) -> (ObjectId, Box); fn device_drop(&self, device: &ObjectId, device_data: &crate::Data); + fn queue_drop(&self, queue: &ObjectId, queue_data: &crate::Data); fn device_poll(&self, device: &ObjectId, device_data: &crate::Data, maintain: Maintain) -> bool; fn device_on_uncaptured_error( @@ -2424,6 +2426,12 @@ where Context::device_drop(self, &device, device_data) } + fn queue_drop(&self, queue: &ObjectId, queue_data: &crate::Data) { + let queue = ::from(*queue); + let queue_data = downcast_ref(queue_data); + Context::queue_drop(self, &queue, queue_data) + } + fn device_poll( &self, device: &ObjectId, diff --git a/wgpu/src/lib.rs b/wgpu/src/lib.rs index 9984dd1bf5..f5eefcff6e 100644 --- a/wgpu/src/lib.rs +++ b/wgpu/src/lib.rs @@ -915,6 +915,14 @@ pub struct Queue { ))] static_assertions::assert_impl_all!(Queue: Send, Sync); +impl Drop for Queue { + fn drop(&mut self) { + if !thread::panicking() { + self.context.queue_drop(&self.id, self.data.as_ref()); + } + } +} + /// Resource that can be bound to a pipeline. /// /// Corresponds to [WebGPU `GPUBindingResource`]( From 4b9370290d17a9a775d8d2a623c4dea23309e638 Mon Sep 17 00:00:00 2001 From: gents83 Date: Fri, 18 Aug 2023 17:52:01 +0200 Subject: [PATCH 074/132] RenderBundle has strong reference to its device --- wgpu-core/src/command/bundle.rs | 8 ++++---- wgpu-core/src/device/global.rs | 4 +--- 2 files changed, 5 insertions(+), 7 deletions(-) diff --git a/wgpu-core/src/command/bundle.rs b/wgpu-core/src/command/bundle.rs index 78389ecf95..14aed743e0 100644 --- a/wgpu-core/src/command/bundle.rs +++ b/wgpu-core/src/command/bundle.rs @@ -105,7 +105,7 @@ use crate::{ }; use arrayvec::ArrayVec; -use std::{borrow::Cow, mem, num::NonZeroU32, ops::Range}; +use std::{borrow::Cow, mem, num::NonZeroU32, ops::Range, sync::Arc}; use thiserror::Error; use hal::CommandEncoder as _; @@ -256,7 +256,7 @@ impl RenderBundleEncoder { pub(crate) fn finish( self, desc: &RenderBundleDescriptor, - device: &Device, + device: &Arc>, hub: &Hub, ) -> Result, RenderBundleError> { let pipeline_layout_guard = hub.pipeline_layouts.read(); @@ -660,7 +660,7 @@ impl RenderBundleEncoder { }, is_depth_read_only: self.is_depth_read_only, is_stencil_read_only: self.is_stencil_read_only, - device_id: id::Valid(self.parent_id), + device: device.clone(), used: state.trackers, buffer_memory_init_actions, texture_memory_init_actions, @@ -739,7 +739,7 @@ pub struct RenderBundle { base: BasePass, pub(super) is_depth_read_only: bool, pub(super) is_stencil_read_only: bool, - pub(crate) device_id: id::Valid, + pub(crate) device: Arc>, pub(crate) used: RenderBundleScope, pub(super) buffer_memory_init_actions: Vec, pub(super) texture_memory_init_actions: Vec, diff --git a/wgpu-core/src/device/global.rs b/wgpu-core/src/device/global.rs index 2573cea0ea..11d8a3b274 100644 --- a/wgpu-core/src/device/global.rs +++ b/wgpu-core/src/device/global.rs @@ -1551,9 +1551,7 @@ impl Global { } }; - hub.devices - .get(bundle.device_id.0) - .unwrap() + bundle.device .lock_life() .suspected_resources .render_bundles From a483132527d375ff16fd4845ff3bfb1688ef3430 Mon Sep 17 00:00:00 2001 From: gents83 Date: Fri, 18 Aug 2023 18:16:29 +0200 Subject: [PATCH 075/132] Removing check for test on wasm --- tests/tests/device.rs | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/tests/tests/device.rs b/tests/tests/device.rs index 9b2fd4266c..dd1582cf34 100644 --- a/tests/tests/device.rs +++ b/tests/tests/device.rs @@ -59,12 +59,27 @@ fn device_lifetime_check() { instance.poll_all(false); + #[cfg(any( + not(target_arch = "wasm32"), + target_os = "emscripten", + feature = "webgl" + ))] let pre_report = instance.generate_report(); drop(queue); drop(device); + #[cfg(any( + not(target_arch = "wasm32"), + target_os = "emscripten", + feature = "webgl" + ))] let post_report = instance.generate_report(); - + + #[cfg(any( + not(target_arch = "wasm32"), + target_os = "emscripten", + feature = "webgl" + ))] assert_ne!(pre_report, post_report, "Queue and Device has not been dropped as expected"); } From 586c1bbca3ce61075d617b0187fb0052c064d14c Mon Sep 17 00:00:00 2001 From: gents83 Date: Fri, 18 Aug 2023 20:26:05 +0200 Subject: [PATCH 076/132] Apply cargo fmt --- tests/tests/device.rs | 7 +++++-- wgpu-core/src/device/global.rs | 3 ++- wgpu/src/backend/direct.rs | 6 +++--- 3 files changed, 10 insertions(+), 6 deletions(-) diff --git a/tests/tests/device.rs b/tests/tests/device.rs index dd1582cf34..57f7439104 100644 --- a/tests/tests/device.rs +++ b/tests/tests/device.rs @@ -75,11 +75,14 @@ fn device_lifetime_check() { feature = "webgl" ))] let post_report = instance.generate_report(); - + #[cfg(any( not(target_arch = "wasm32"), target_os = "emscripten", feature = "webgl" ))] - assert_ne!(pre_report, post_report, "Queue and Device has not been dropped as expected"); + assert_ne!( + pre_report, post_report, + "Queue and Device has not been dropped as expected" + ); } diff --git a/wgpu-core/src/device/global.rs b/wgpu-core/src/device/global.rs index 11d8a3b274..8118234949 100644 --- a/wgpu-core/src/device/global.rs +++ b/wgpu-core/src/device/global.rs @@ -1551,7 +1551,8 @@ impl Global { } }; - bundle.device + bundle + .device .lock_life() .suspected_resources .render_bundles diff --git a/wgpu/src/backend/direct.rs b/wgpu/src/backend/direct.rs index 637500aeb1..3d3347b589 100644 --- a/wgpu/src/backend/direct.rs +++ b/wgpu/src/backend/direct.rs @@ -1437,17 +1437,17 @@ impl crate::Context for Context { } #[cfg_attr(target_arch = "wasm32", allow(unused))] fn device_drop(&self, device: &Self::DeviceId, _device_data: &Self::DeviceData) { - let global = &self.0; - + #[cfg(any(not(target_arch = "wasm32"), target_os = "emscripten"))] { + let global = &self.0; match wgc::gfx_select!(device => global.device_poll(*device, wgt::Maintain::Wait)) { Ok(_) => (), Err(err) => self.handle_error_fatal(err, "Device::drop"), } + wgc::gfx_select!(device => global.device_drop(*device)); } - wgc::gfx_select!(device => global.device_drop(*device)); } #[cfg_attr(target_arch = "wasm32", allow(unused))] fn queue_drop(&self, queue: &Self::QueueId, _device_data: &Self::QueueData) { From b1757741539896e4e7bb524e1f811605d910cfa9 Mon Sep 17 00:00:00 2001 From: Jim Blandy Date: Fri, 18 Aug 2023 21:28:04 -0700 Subject: [PATCH 077/132] Let Presentation hold a strong reference to its Device. --- wgpu-core/src/device/any_device.rs | 88 ++++++++++++++++++++++++++++++ wgpu-core/src/device/global.rs | 2 +- wgpu-core/src/device/mod.rs | 1 + wgpu-core/src/hal_api.rs | 2 +- wgpu-core/src/hub.rs | 29 +++------- wgpu-core/src/instance.rs | 29 +++++----- wgpu-core/src/present.rs | 26 ++++----- wgpu/src/backend/direct.rs | 2 - 8 files changed, 123 insertions(+), 56 deletions(-) create mode 100644 wgpu-core/src/device/any_device.rs diff --git a/wgpu-core/src/device/any_device.rs b/wgpu-core/src/device/any_device.rs new file mode 100644 index 0000000000..8cbc851939 --- /dev/null +++ b/wgpu-core/src/device/any_device.rs @@ -0,0 +1,88 @@ +use super::Device; +/// The `AnyDevice` type: a pointer to a `Device` for any backend `A`. +use crate::hal_api::HalApi; + +use std::any::Any; +use std::fmt; +use std::sync::Arc; + +/// A pointer to a `Device`, for any backend `A`. +/// +/// Any `AnyDevice` is just like an `Arc>`, except that the +/// `A` type parameter is erased. To access the `Device`, you must +/// downcast to a particular backend with the [`downcast_ref`] or +/// [`downcast_clone`] methods. +pub struct AnyDevice(Arc); + +impl AnyDevice { + /// Return an `AnyDevice` that holds an owning `Arc` pointer to `device`. + pub fn new(device: Arc>) -> AnyDevice { + AnyDevice(device) + } + + /// If `self` is an `Arc>`, return a reference to the + /// device. + pub fn downcast_ref(&self) -> Option<&Device> { + self.0.downcast_ref::>() + } + + /// If `self` is an `Arc>`, return a clone of that. + pub fn downcast_clone(&self) -> Option>> { + // `Arc::downcast` returns `Arc`, but requires that `T` be `Sync` and + // `Send`, and this is not the case for `Device` in wasm builds. + // + // But as far as I can see, `Arc::downcast` has no particular reason to + // require that `T` be `Sync` and `Send`; the steps used here are sound. + if (self.0).is::>() { + // Get an owned Arc. + let clone = self.0.clone(); + // Turn the `Arc`, which is a pointer to an `ArcInner` struct, into + // a pointer to the `ArcInner`'s `data` field. Carry along the + // vtable from the original `Arc`. + let raw_erased: *const (dyn Any + 'static) = Arc::into_raw(clone); + // Remove the vtable, and supply the concrete type of the `data`. + let raw_typed: *const Device = raw_erased.cast::>(); + // Convert the pointer to the `data` field back into a pointer to + // the `ArcInner`, and restore reference-counting behavior. + let arc_typed: Arc> = unsafe { + // Safety: + // - We checked that the `dyn Any` was indeed a `Device` above. + // - We're calling `Arc::from_raw` on the same pointer returned + // by `Arc::into_raw`, except that we stripped off the vtable + // pointer. + // - The pointer must still be live, because we've borrowed `self`, + // which holds another reference to it. + // - The format of a `ArcInner` must be the same as + // that of an `ArcInner>`, or else `AnyDevice::new` + // wouldn't be possible. + Arc::from_raw(raw_typed) + }; + Some(arc_typed) + } else { + None + } + } +} + +impl fmt::Debug for AnyDevice { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str("AnyDevice") + } +} + +#[cfg(any( + not(target_arch = "wasm32"), + all( + feature = "fragile-send-sync-non-atomic-wasm", + not(target_feature = "atomics") + ) +))] +unsafe impl Send for AnyDevice {} +#[cfg(any( + not(target_arch = "wasm32"), + all( + feature = "fragile-send-sync-non-atomic-wasm", + not(target_feature = "atomics") + ) +))] +unsafe impl Sync for AnyDevice {} diff --git a/wgpu-core/src/device/global.rs b/wgpu-core/src/device/global.rs index 8118234949..974a4060ef 100644 --- a/wgpu-core/src/device/global.rs +++ b/wgpu-core/src/device/global.rs @@ -2109,7 +2109,7 @@ impl Global { } let mut presentation = surface.presentation.lock(); *presentation = Some(present::Presentation { - device_id: id::Valid(device_id), + device: super::any_device::AnyDevice::new(device.clone()), config: config.clone(), num_frames, acquired_texture: None, diff --git a/wgpu-core/src/device/mod.rs b/wgpu-core/src/device/mod.rs index 2e73f2e665..10807833f9 100644 --- a/wgpu-core/src/device/mod.rs +++ b/wgpu-core/src/device/mod.rs @@ -18,6 +18,7 @@ use wgt::{BufferAddress, TextureFormat}; use std::{iter, num::NonZeroU32, ptr}; +pub mod any_device; pub mod global; mod life; pub mod queue; diff --git a/wgpu-core/src/hal_api.rs b/wgpu-core/src/hal_api.rs index 71151303a4..a95b58f070 100644 --- a/wgpu-core/src/hal_api.rs +++ b/wgpu-core/src/hal_api.rs @@ -7,7 +7,7 @@ use crate::{ instance::{HalSurface, Instance, Surface}, }; -pub trait HalApi: hal::Api { +pub trait HalApi: hal::Api + 'static { const VARIANT: Backend; fn create_instance_from_hal(name: &str, hal_instance: Self::Instance) -> Instance; fn instance_as_hal(instance: &Instance) -> Option<&Self::Instance>; diff --git a/wgpu-core/src/hub.rs b/wgpu-core/src/hub.rs index f67b111b96..f5b2dc20a5 100644 --- a/wgpu-core/src/hub.rs +++ b/wgpu-core/src/hub.rs @@ -291,21 +291,13 @@ impl Hub { for element in surface_guard.map.iter() { if let Element::Occupied(ref surface, _epoch) = *element { - if surface - .presentation - .lock() - .as_ref() - .map_or(wgt::Backend::Empty, |p| p.backend()) - != A::VARIANT - { - continue; - } - if let Some(present) = surface.presentation.lock().take() { - let device = &devices[present.device_id]; - let suf = A::get_surface(surface); - unsafe { - suf.unwrap().raw.unconfigure(device.raw()); - //TODO: we could destroy the surface here + if let Some(ref mut present) = *surface.presentation.lock() { + if let Some(device) = present.device.downcast_ref::() { + let suf = A::get_surface(surface); + unsafe { + suf.unwrap().raw.unconfigure(device.raw()); + //TODO: we could destroy the surface here + } } } } @@ -320,12 +312,7 @@ impl Hub { } } - pub(crate) fn surface_unconfigure( - &self, - device_id: id::Valid, - surface: &HalSurface, - ) { - let device = self.devices.get(device_id.0).unwrap(); + pub(crate) fn surface_unconfigure(&self, device: &Device, surface: &HalSurface) { unsafe { use hal::Surface; surface.raw.unconfigure(device.raw()); diff --git a/wgpu-core/src/instance.rs b/wgpu-core/src/instance.rs index 1f57744512..c97d287f43 100644 --- a/wgpu-core/src/instance.rs +++ b/wgpu-core/src/instance.rs @@ -667,27 +667,24 @@ impl Global { present: &Presentation, ) { let hub = HalApi::hub(global); - hub.surface_unconfigure(present.device_id, surface); + if let Some(device) = present.device.downcast_ref::() { + hub.surface_unconfigure(device, surface); + } } let surface = self.surfaces.unregister(id); if let Ok(surface) = Arc::try_unwrap(surface.unwrap()) { if let Some(present) = surface.presentation.lock().take() { - match present.backend() { - #[cfg(all(feature = "vulkan", not(target_arch = "wasm32")))] - Backend::Vulkan => { - unconfigure(self, surface.vulkan.as_ref().unwrap(), &present) - } - #[cfg(all(feature = "metal", any(target_os = "macos", target_os = "ios")))] - Backend::Metal => unconfigure(self, surface.metal.as_ref().unwrap(), &present), - #[cfg(all(feature = "dx12", windows))] - Backend::Dx12 => unconfigure(self, surface.dx12.as_ref().unwrap(), &present), - #[cfg(all(feature = "dx11", windows))] - Backend::Dx11 => unconfigure(self, surface.dx11.as_ref().unwrap(), &present), - #[cfg(feature = "gles")] - Backend::Gl => unconfigure(self, surface.gl.as_ref().unwrap(), &present), - _ => unreachable!(), - } + #[cfg(all(feature = "vulkan", not(target_arch = "wasm32")))] + unconfigure(self, surface.vulkan.as_ref().unwrap(), &present); + #[cfg(all(feature = "metal", any(target_os = "macos", target_os = "ios")))] + unconfigure(self, surface.metal.as_ref().unwrap(), &present); + #[cfg(all(feature = "dx12", windows))] + unconfigure(self, surface.dx12.as_ref().unwrap(), &present); + #[cfg(all(feature = "dx11", windows))] + unconfigure(self, surface.dx11.as_ref().unwrap(), &present); + #[cfg(feature = "gles")] + unconfigure(self, surface.gl.as_ref().unwrap(), &present); } self.instance.destroy_surface(surface); diff --git a/wgpu-core/src/present.rs b/wgpu-core/src/present.rs index a244d139ab..d8b6fc356d 100644 --- a/wgpu-core/src/present.rs +++ b/wgpu-core/src/present.rs @@ -21,10 +21,11 @@ use std::{ use crate::device::trace::Action; use crate::{ conv, + device::any_device::AnyDevice, device::{DeviceError, MissingDownlevelFlags}, global::Global, hal_api::HalApi, - id::{DeviceId, SurfaceId, TextureId, Valid}, + id::{SurfaceId, TextureId, Valid}, identity::{GlobalIdentityHandlerFactory, Input}, init_tracker::TextureInitTracker, resource::{self, ResourceInfo}, @@ -41,19 +42,13 @@ pub const DESIRED_NUM_FRAMES: u32 = 3; #[derive(Debug)] pub(crate) struct Presentation { - pub(crate) device_id: Valid, + pub(crate) device: AnyDevice, pub(crate) config: wgt::SurfaceConfiguration>, #[allow(unused)] pub(crate) num_frames: u32, pub(crate) acquired_texture: Option>, } -impl Presentation { - pub(crate) fn backend(&self) -> wgt::Backend { - crate::id::TypedId::unzip(self.device_id.0).2 - } -} - #[derive(Clone, Debug, Error)] #[non_exhaustive] pub enum SurfaceError { @@ -129,12 +124,13 @@ impl Global { .get(surface_id) .map_err(|_| SurfaceError::Invalid)?; - let (device, config) = match surface.presentation.lock().as_ref() { - Some(present) => { - let device = hub.devices.get(present.device_id.0).unwrap(); - (device, present.config.clone()) + let (device, config) = if let Some(ref present) = *surface.presentation.lock() { + match present.device.downcast_clone::() { + Some(device) => (device, present.config.clone()), + None => return Err(SurfaceError::NotConfigured), } - None => return Err(SurfaceError::NotConfigured), + } else { + return Err(SurfaceError::NotConfigured); }; #[cfg(feature = "trace")] @@ -279,7 +275,7 @@ impl Global { None => return Err(SurfaceError::NotConfigured), }; - let device = hub.devices.get(present.device_id.0).unwrap(); + let device = present.device.downcast_ref::().unwrap(); let queue_id = device.queue_id.read().unwrap(); let queue = hub.queues.get(queue_id).unwrap(); @@ -384,7 +380,7 @@ impl Global { None => return Err(SurfaceError::NotConfigured), }; - let device = hub.devices.get(present.device_id.0).unwrap(); + let device = present.device.downcast_ref::().unwrap(); #[cfg(feature = "trace")] if let Some(ref mut trace) = *device.trace.lock() { diff --git a/wgpu/src/backend/direct.rs b/wgpu/src/backend/direct.rs index 3d3347b589..77dfd9dba5 100644 --- a/wgpu/src/backend/direct.rs +++ b/wgpu/src/backend/direct.rs @@ -1437,7 +1437,6 @@ impl crate::Context for Context { } #[cfg_attr(target_arch = "wasm32", allow(unused))] fn device_drop(&self, device: &Self::DeviceId, _device_data: &Self::DeviceData) { - #[cfg(any(not(target_arch = "wasm32"), target_os = "emscripten"))] { let global = &self.0; @@ -1447,7 +1446,6 @@ impl crate::Context for Context { } wgc::gfx_select!(device => global.device_drop(*device)); } - } #[cfg_attr(target_arch = "wasm32", allow(unused))] fn queue_drop(&self, queue: &Self::QueueId, _device_data: &Self::QueueData) { From f71b2dc6a86b6590de2f991227facb19c5da282c Mon Sep 17 00:00:00 2001 From: gents83 Date: Sat, 19 Aug 2023 12:12:43 +0200 Subject: [PATCH 078/132] Introducing AnySurface --- wgpu-core/src/any_surface.rs | 112 ++++++++++++++ wgpu-core/src/command/mod.rs | 4 +- wgpu-core/src/device/any_device.rs | 4 +- wgpu-core/src/hal_api.rs | 10 +- wgpu-core/src/instance.rs | 234 +++++++++++++++++------------ wgpu-core/src/lib.rs | 3 +- 6 files changed, 257 insertions(+), 110 deletions(-) create mode 100644 wgpu-core/src/any_surface.rs diff --git a/wgpu-core/src/any_surface.rs b/wgpu-core/src/any_surface.rs new file mode 100644 index 0000000000..757e5c152b --- /dev/null +++ b/wgpu-core/src/any_surface.rs @@ -0,0 +1,112 @@ +use wgt::Backend; + +/// The `AnySurface` type: a `Arc` of a `HalSurface` for any backend `A`. +use crate::hal_api::HalApi; +use crate::instance::HalSurface; + +use std::any::Any; +use std::fmt; +use std::sync::Arc; + +/// A `Arc` of a `HalSurface`, for any backend `A`. +/// +/// Any `AnySurface` is just like an `Arc>`, except that the +/// `A` type parameter is erased. To access the `Surface`, you must +/// downcast to a particular backend with the \[`downcast_ref`\] or +/// \[`take`\] methods. +pub struct AnySurface(Arc); + +impl AnySurface { + /// Return an `AnySurface` that holds an owning `Arc` to `HalSurface`. + pub fn new(surface: HalSurface) -> AnySurface { + AnySurface(Arc::new(surface)) + } + + pub fn backend(&self) -> Backend { + #[cfg(all(feature = "vulkan", not(target_arch = "wasm32")))] + if self.downcast_ref::().is_some() { + return Backend::Vulkan; + } + #[cfg(all(feature = "metal", any(target_os = "macos", target_os = "ios")))] + if self.downcast_ref::().is_some() { + return Backend::Metal; + } + #[cfg(all(feature = "dx12", windows))] + if self.downcast_ref::().is_some() { + return Backend::Dx12; + } + #[cfg(all(feature = "dx11", windows))] + if self.downcast_ref::().is_some() { + return Backend::Dx11; + } + #[cfg(feature = "gles")] + if self.downcast_ref::().is_some() { + return Backend::Gl; + } + Backend::Empty + } + + /// If `self` is an `Arc>`, return a reference to the + /// HalSurface. + pub fn downcast_ref(&self) -> Option<&HalSurface> { + self.0.downcast_ref::>() + } + + /// If `self` is an `Arc>`, returns that. + pub fn take(self) -> Option>> { + // `Arc::downcast` returns `Arc`, but requires that `T` be `Sync` and + // `Send`, and this is not the case for `HalSurface` in wasm builds. + // + // But as far as I can see, `Arc::downcast` has no particular reason to + // require that `T` be `Sync` and `Send`; the steps used here are sound. + if (self.0).is::>() { + // Turn the `Arc`, which is a pointer to an `ArcInner` struct, into + // a pointer to the `ArcInner`'s `data` field. Carry along the + // vtable from the original `Arc`. + let raw_erased: *const (dyn Any + 'static) = Arc::into_raw(self.0); + // Remove the vtable, and supply the concrete type of the `data`. + let raw_typed: *const HalSurface = raw_erased.cast::>(); + // Convert the pointer to the `data` field back into a pointer to + // the `ArcInner`, and restore reference-counting behavior. + let arc_typed: Arc> = unsafe { + // Safety: + // - We checked that the `dyn Any` was indeed a `HalSurface` above. + // - We're calling `Arc::from_raw` on the same pointer returned + // by `Arc::into_raw`, except that we stripped off the vtable + // pointer. + // - The pointer must still be live, because we've borrowed `self`, + // which holds another reference to it. + // - The format of a `ArcInner` must be the same as + // that of an `ArcInner>`, or else `AnyHalSurface::new` + // wouldn't be possible. + Arc::from_raw(raw_typed) + }; + Some(arc_typed) + } else { + None + } + } +} + +impl fmt::Debug for AnySurface { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str("AnySurface") + } +} + +#[cfg(any( + not(target_arch = "wasm32"), + all( + feature = "fragile-send-sync-non-atomic-wasm", + not(target_feature = "atomics") + ) +))] +unsafe impl Send for AnySurface {} +#[cfg(any( + not(target_arch = "wasm32"), + all( + feature = "fragile-send-sync-non-atomic-wasm", + not(target_feature = "atomics") + ) +))] +unsafe impl Sync for AnySurface {} diff --git a/wgpu-core/src/command/mod.rs b/wgpu-core/src/command/mod.rs index b4847b6e6a..12c11093f8 100644 --- a/wgpu-core/src/command/mod.rs +++ b/wgpu-core/src/command/mod.rs @@ -52,7 +52,7 @@ pub(crate) enum CommandEncoderStatus { Error, } -pub(crate) struct CommandEncoder { +pub(crate) struct CommandEncoder { raw: A::CommandEncoder, list: Vec, is_open: bool, @@ -60,7 +60,7 @@ pub(crate) struct CommandEncoder { } //TODO: handle errors better -impl CommandEncoder { +impl CommandEncoder { /// Closes the live encoder fn close_and_swap(&mut self) { if self.is_open { diff --git a/wgpu-core/src/device/any_device.rs b/wgpu-core/src/device/any_device.rs index 8cbc851939..ab13b1421f 100644 --- a/wgpu-core/src/device/any_device.rs +++ b/wgpu-core/src/device/any_device.rs @@ -10,8 +10,8 @@ use std::sync::Arc; /// /// Any `AnyDevice` is just like an `Arc>`, except that the /// `A` type parameter is erased. To access the `Device`, you must -/// downcast to a particular backend with the [`downcast_ref`] or -/// [`downcast_clone`] methods. +/// downcast to a particular backend with the \[`downcast_ref`\] or +/// \[`downcast_clone`\] methods. pub struct AnyDevice(Arc); impl AnyDevice { diff --git a/wgpu-core/src/hal_api.rs b/wgpu-core/src/hal_api.rs index a95b58f070..df0acdac75 100644 --- a/wgpu-core/src/hal_api.rs +++ b/wgpu-core/src/hal_api.rs @@ -48,7 +48,7 @@ impl HalApi for hal::api::Vulkan { &global.hubs.vulkan } fn get_surface(surface: &Surface) -> Option<&HalSurface> { - surface.vulkan.as_ref() + surface.raw.downcast_ref() } } @@ -69,7 +69,7 @@ impl HalApi for hal::api::Metal { &global.hubs.metal } fn get_surface(surface: &Surface) -> Option<&HalSurface> { - surface.metal.as_ref() + surface.raw.downcast_ref() } } @@ -90,7 +90,7 @@ impl HalApi for hal::api::Dx12 { &global.hubs.dx12 } fn get_surface(surface: &Surface) -> Option<&HalSurface> { - surface.dx12.as_ref() + surface.raw.downcast_ref() } } @@ -111,7 +111,7 @@ impl HalApi for hal::api::Dx11 { &global.hubs.dx11 } fn get_surface(surface: &Surface) -> Option<&HalSurface> { - surface.dx11.as_ref() + surface.raw.downcast_ref() } } @@ -133,6 +133,6 @@ impl HalApi for hal::api::Gles { &global.hubs.gl } fn get_surface(surface: &Surface) -> Option<&HalSurface> { - surface.gl.as_ref() + surface.raw.downcast_ref() } } diff --git a/wgpu-core/src/instance.rs b/wgpu-core/src/instance.rs index c97d287f43..23f6b65bf7 100644 --- a/wgpu-core/src/instance.rs +++ b/wgpu-core/src/instance.rs @@ -1,6 +1,7 @@ use std::sync::Arc; use crate::{ + any_surface::AnySurface, device::{queue::Queue, resource::Device, DeviceDescriptor}, global::Global, hal_api::HalApi, @@ -20,9 +21,9 @@ use thiserror::Error; pub type RequestAdapterOptions = wgt::RequestAdapterOptions; type HalInstance = ::Instance; //TODO: remove this -pub struct HalSurface { +#[derive(Clone)] +pub struct HalSurface { pub raw: Arc, - //pub acquired_texture: Option, } #[derive(Clone, Debug, Error)] @@ -109,47 +110,41 @@ impl Instance { } pub(crate) fn destroy_surface(&self, surface: Surface) { - fn destroy( - _: A, - instance: &Option, - surface: Option>, - ) { + fn destroy(_: A, instance: &Option, surface: AnySurface) { unsafe { - if let Some(suf) = surface { - if let Ok(raw) = Arc::try_unwrap(suf.raw) { - instance.as_ref().unwrap().destroy_surface(raw); + if let Some(surface) = surface.take::() { + if let Ok(suf) = Arc::try_unwrap(surface) { + if let Ok(raw) = Arc::try_unwrap(suf.raw) { + instance.as_ref().unwrap().destroy_surface(raw); + } else { + panic!("Surface cannot be destroyed because is still in use"); + } } else { panic!("Surface cannot be destroyed because is still in use"); } } } } - #[cfg(all(feature = "vulkan", not(target_arch = "wasm32")))] - destroy(hal::api::Vulkan, &self.vulkan, surface.vulkan); - #[cfg(all(feature = "metal", any(target_os = "macos", target_os = "ios")))] - destroy(hal::api::Metal, &self.metal, surface.metal); - #[cfg(all(feature = "dx12", windows))] - destroy(hal::api::Dx12, &self.dx12, surface.dx12); - #[cfg(all(feature = "dx11", windows))] - destroy(hal::api::Dx11, &self.dx11, surface.dx11); - #[cfg(feature = "gles")] - destroy(hal::api::Gles, &self.gl, surface.gl); + match surface.raw.backend() { + #[cfg(all(feature = "vulkan", not(target_arch = "wasm32")))] + Backend::Vulkan => destroy(hal::api::Vulkan, &self.vulkan, surface.raw), + #[cfg(all(feature = "metal", any(target_os = "macos", target_os = "ios")))] + Backend::Metal => destroy(hal::api::Metal, &self.metal, surface.raw), + #[cfg(all(feature = "dx12", windows))] + Backend::Dx12 => destroy(hal::api::Dx12, &self.dx12, surface.raw), + #[cfg(all(feature = "dx11", windows))] + Backend::Dx11 => destroy(hal::api::Dx11, &self.dx11, surface.raw), + #[cfg(feature = "gles")] + Backend::Gl => destroy(hal::api::Gles, &self.gl, surface.raw), + _ => unreachable!(), + } } } pub struct Surface { pub(crate) presentation: Mutex>, pub(crate) info: ResourceInfo, - #[cfg(all(feature = "vulkan", not(target_arch = "wasm32")))] - pub vulkan: Option>, - #[cfg(all(feature = "metal", any(target_os = "macos", target_os = "ios")))] - pub metal: Option>, - #[cfg(all(feature = "dx12", windows))] - pub dx12: Option>, - #[cfg(all(feature = "dx11", windows))] - pub dx11: Option>, - #[cfg(feature = "gles")] - pub gl: Option>, + pub(crate) raw: AnySurface, } impl Resource for Surface { @@ -187,7 +182,7 @@ impl Surface { } } -pub struct Adapter { +pub struct Adapter { pub(crate) raw: hal::ExposedAdapter, pub(crate) info: ResourceInfo, } @@ -376,7 +371,7 @@ impl Adapter { } } -impl Resource for Adapter { +impl Resource for Adapter { const TYPE: &'static str = "Adapter"; fn as_info(&self) -> &ResourceInfo { @@ -471,17 +466,14 @@ impl Global { ) -> SurfaceId { profiling::scope!("Instance::create_surface"); - fn init( + fn init( inst: &Option, display_handle: raw_window_handle::RawDisplayHandle, window_handle: raw_window_handle::RawWindowHandle, ) -> Option> { inst.as_ref().and_then(|inst| unsafe { match inst.create_surface(display_handle, window_handle) { - Ok(raw) => Some(HalSurface { - raw: Arc::new(raw), - //acquired_texture: None, - }), + Ok(raw) => Some(HalSurface { raw: Arc::new(raw) }), Err(e) => { log::warn!("Error: {:?}", e); None @@ -490,19 +482,41 @@ impl Global { }) } + let mut hal_surface = None; + #[cfg(all(feature = "vulkan", not(target_arch = "wasm32")))] + if let Some(raw) = + init::(&self.instance.vulkan, display_handle, window_handle) + { + hal_surface = Some(AnySurface::new(raw)); + } + #[cfg(all(feature = "metal", any(target_os = "macos", target_os = "ios")))] + if let Some(raw) = + init::(&self.instance.metal, display_handle, window_handle) + { + hal_surface = Some(AnySurface::new(raw)); + } + #[cfg(all(feature = "dx12", windows))] + if let Some(raw) = + init::(&self.instance.dx12, display_handle, window_handle) + { + hal_surface = Some(AnySurface::new(raw)); + } + #[cfg(all(feature = "dx11", windows))] + if let Some(raw) = + init::(&self.instance.dx11, display_handle, window_handle) + { + hal_surface = Some(AnySurface::new(raw)); + } + #[cfg(feature = "gles")] + if let Some(raw) = init::(&self.instance.gl, display_handle, window_handle) + { + hal_surface = Some(AnySurface::new(raw)); + } + let surface = Surface { presentation: Mutex::new(None), info: ResourceInfo::new(""), - #[cfg(all(feature = "vulkan", not(target_arch = "wasm32")))] - vulkan: init::(&self.instance.vulkan, display_handle, window_handle), - #[cfg(all(feature = "metal", any(target_os = "macos", target_os = "ios")))] - metal: init::(&self.instance.metal, display_handle, window_handle), - #[cfg(all(feature = "dx12", windows))] - dx12: init::(&self.instance.dx12, display_handle, window_handle), - #[cfg(all(feature = "dx11", windows))] - dx11: init::(&self.instance.dx11, display_handle, window_handle), - #[cfg(feature = "gles")] - gl: init::(&self.instance.gl, display_handle, window_handle), + raw: hal_surface.unwrap(), }; let (id, _) = self.surfaces.prepare(id_in).assign(surface); @@ -523,17 +537,21 @@ impl Global { let surface = Surface { presentation: Mutex::new(None), info: ResourceInfo::new(""), - metal: self.instance.metal.as_ref().map(|inst| HalSurface { - raw: Arc::new( - // we don't want to link to metal-rs for this - #[allow(clippy::transmute_ptr_to_ref)] - inst.create_surface_from_layer(unsafe { std::mem::transmute(layer) }), - ), //acquired_texture: None, - }), - #[cfg(all(feature = "vulkan", not(target_arch = "wasm32")))] - vulkan: None, - #[cfg(feature = "gles")] - gl: None, + raw: { + let hal_surface: HalSurface = self + .instance + .metal + .as_ref() + .map(|inst| HalSurface { + raw: Arc::new( + // we don't want to link to metal-rs for this + #[allow(clippy::transmute_ptr_to_ref)] + inst.create_surface_from_layer(unsafe { std::mem::transmute(layer) }), + ), //acquired_texture: None, + }) + .unwrap(); + AnySurface::new(hal_surface) + }, }; let (id, _) = self.surfaces.prepare(id_in).assign(surface); @@ -555,16 +573,19 @@ impl Global { let surface = Surface { presentation: Mutex::new(None), info: ResourceInfo::new(""), - gl: self - .instance - .gl - .as_ref() - .map(|inst| { - Ok(HalSurface { - raw: Arc::new(inst.create_surface_from_canvas(canvas)?), + raw: { + let hal_surface: HalSurface = self + .instance + .gl + .as_ref() + .map(|inst| { + Ok(HalSurface { + raw: Arc::new(inst.create_surface_from_canvas(canvas)?), + }) }) - }) - .transpose()?, + .transpose()?; + AnySurface::new(hal_surface) + }, }; let (id, _) = self.surfaces.prepare(id_in).assign(surface); @@ -586,16 +607,19 @@ impl Global { let surface = Surface { presentation: Mutex::new(None), info: ResourceInfo::new(""), - gl: self - .instance - .gl - .as_ref() - .map(|inst| { - Ok(HalSurface { - raw: Arc::new(inst.create_surface_from_offscreen_canvas(canvas)?), + raw: { + let hal_surface: HalSurface = self + .instance + .gl + .as_ref() + .map(|inst| { + Ok(HalSurface { + raw: Arc::new(inst.create_surface_from_offscreen_canvas(canvas)?), + }) }) - }) - .transpose()?, + .transpose()?; + AnySurface::new(hal_surface) + }, }; let (id, _) = self.surfaces.prepare(id_in).assign(surface); @@ -616,14 +640,17 @@ impl Global { let surface = Surface { presentation: Mutex::new(None), info: ResourceInfo::new(""), - #[cfg(all(feature = "vulkan", not(target_arch = "wasm32")))] - vulkan: None, - dx12: self.instance.dx12.as_ref().map(|inst| HalSurface { - raw: Arc::new(unsafe { inst.create_surface_from_visual(visual as _) }), - }), - dx11: None, - #[cfg(feature = "gles")] - gl: None, + raw: { + let hal_surface: HalSurface = self + .instance + .dx12 + .as_ref() + .map(|inst| HalSurface { + raw: Arc::new(unsafe { inst.create_surface_from_visual(visual as _) }), + }) + .unwrap(); + AnySurface::new(hal_surface) + }, }; let (id, _) = self.surfaces.prepare(id_in).assign(surface); @@ -644,14 +671,19 @@ impl Global { let surface = Surface { presentation: Mutex::new(None), info: ResourceInfo::new(""), - #[cfg(all(feature = "vulkan", not(target_arch = "wasm32")))] - vulkan: None, - dx12: self.instance.dx12.as_ref().map(|inst| HalSurface { - raw: Arc::new(unsafe { inst.create_surface_from_surface_handle(surface_handle) }), - }), - dx11: None, - #[cfg(feature = "gles")] - gl: None, + raw: { + let hal_surface: HalSurface = self + .instance + .dx12 + .as_ref() + .map(|inst| HalSurface { + raw: Arc::new(unsafe { + inst.create_surface_from_surface_handle(surface_handle) + }), + }) + .unwrap(); + AnySurface::new(hal_surface) + }, }; let (id, _) = self.surfaces.prepare(id_in).assign(surface); @@ -663,12 +695,14 @@ impl Global { fn unconfigure( global: &Global, - surface: &HalSurface, + surface: &AnySurface, present: &Presentation, ) { let hub = HalApi::hub(global); - if let Some(device) = present.device.downcast_ref::() { - hub.surface_unconfigure(device, surface); + if let Some(hal_surface) = surface.downcast_ref::() { + if let Some(device) = present.device.downcast_ref::() { + hub.surface_unconfigure(device, hal_surface); + } } } @@ -676,15 +710,15 @@ impl Global { if let Ok(surface) = Arc::try_unwrap(surface.unwrap()) { if let Some(present) = surface.presentation.lock().take() { #[cfg(all(feature = "vulkan", not(target_arch = "wasm32")))] - unconfigure(self, surface.vulkan.as_ref().unwrap(), &present); + unconfigure::<_, hal::api::Vulkan>(self, &surface.raw, &present); #[cfg(all(feature = "metal", any(target_os = "macos", target_os = "ios")))] - unconfigure(self, surface.metal.as_ref().unwrap(), &present); + unconfigure::<_, hal::api::Metal>(self, &surface.raw, &present); #[cfg(all(feature = "dx12", windows))] - unconfigure(self, surface.dx12.as_ref().unwrap(), &present); + unconfigure::<_, hal::api::Dx12>(self, &surface.raw, &present); #[cfg(all(feature = "dx11", windows))] - unconfigure(self, surface.dx11.as_ref().unwrap(), &present); + unconfigure::<_, hal::api::Dx11>(self, &surface.raw, &present); #[cfg(feature = "gles")] - unconfigure(self, surface.gl.as_ref().unwrap(), &present); + unconfigure::<_, hal::api::Gles>(self, &surface.raw, &present); } self.instance.destroy_surface(surface); diff --git a/wgpu-core/src/lib.rs b/wgpu-core/src/lib.rs index 2d962330ce..e6eb2569b2 100644 --- a/wgpu-core/src/lib.rs +++ b/wgpu-core/src/lib.rs @@ -48,6 +48,7 @@ clippy::pattern_type_mismatch, )] +pub mod any_surface; pub mod binding_model; pub mod command; mod conv; @@ -232,7 +233,7 @@ define_backend_caller! { gfx_if_gles, gfx_if_gles_hidden, "gles" if feature = "g /// /// ```ignore /// impl<...> Global<...> { -/// pub fn device_create_buffer(&self, ...) -> ... +/// pub fn device_create_buffer(&self, ...) -> ... /// { ... } /// } /// ``` From 007a53da0def8c8bb15d8043b9997bf939af4592 Mon Sep 17 00:00:00 2001 From: gents83 Date: Sat, 19 Aug 2023 12:15:39 +0200 Subject: [PATCH 079/132] Fixing clippy --- wgpu-core/src/device/any_device.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/wgpu-core/src/device/any_device.rs b/wgpu-core/src/device/any_device.rs index 8cbc851939..ab13b1421f 100644 --- a/wgpu-core/src/device/any_device.rs +++ b/wgpu-core/src/device/any_device.rs @@ -10,8 +10,8 @@ use std::sync::Arc; /// /// Any `AnyDevice` is just like an `Arc>`, except that the /// `A` type parameter is erased. To access the `Device`, you must -/// downcast to a particular backend with the [`downcast_ref`] or -/// [`downcast_clone`] methods. +/// downcast to a particular backend with the \[`downcast_ref`\] or +/// \[`downcast_clone`\] methods. pub struct AnyDevice(Arc); impl AnyDevice { From c108c9afd6ec06045853510394455263425f6194 Mon Sep 17 00:00:00 2001 From: Jim Blandy Date: Sat, 19 Aug 2023 10:26:44 -0700 Subject: [PATCH 080/132] When clearing a Hub, remember to remove the surface's presentation. --- wgpu-core/src/hub.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/wgpu-core/src/hub.rs b/wgpu-core/src/hub.rs index f5b2dc20a5..568c395ffe 100644 --- a/wgpu-core/src/hub.rs +++ b/wgpu-core/src/hub.rs @@ -291,7 +291,7 @@ impl Hub { for element in surface_guard.map.iter() { if let Element::Occupied(ref surface, _epoch) = *element { - if let Some(ref mut present) = *surface.presentation.lock() { + if let Some(ref mut present) = surface.presentation.lock().take() { if let Some(device) = present.device.downcast_ref::() { let suf = A::get_surface(surface); unsafe { From e65bdd200a1b327f67d56b7c6fa5defe0968a693 Mon Sep 17 00:00:00 2001 From: gents83 Date: Fri, 25 Aug 2023 17:27:43 +0200 Subject: [PATCH 081/132] Improving error msg --- wgpu-core/src/storage.rs | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/wgpu-core/src/storage.rs b/wgpu-core/src/storage.rs index a328c4f73f..ca7819373f 100644 --- a/wgpu-core/src/storage.rs +++ b/wgpu-core/src/storage.rs @@ -111,8 +111,8 @@ where }; assert_eq!( epoch, storage_epoch, - "{}[{}] is no longer alive", - self.kind, index + "{}[{:?}] is no longer alive", + self.kind, id ); result } @@ -123,14 +123,14 @@ where let (index, epoch, _) = id.unzip(); let (result, storage_epoch) = match self.map.get(index as usize) { Some(&Element::Occupied(ref v, epoch)) => (Ok(v.is_unique()), epoch), - Some(&Element::Vacant) => panic!("{}[{}] does not exist", self.kind, index), + Some(&Element::Vacant) => panic!("{}[{id}] does not exist", self.kind, id), Some(&Element::Error(epoch, ..)) => (Err(InvalidId), epoch), None => return Err(InvalidId), }; assert_eq!( epoch, storage_epoch, - "{}[{}] is no longer alive", - self.kind, index + "{}[{:?}] is no longer alive", + self.kind, id ); result } @@ -141,14 +141,14 @@ where let (index, epoch, _) = id.unzip(); let (result, storage_epoch) = match self.map.get(index as usize) { Some(&Element::Occupied(ref v, epoch)) => (Ok(v), epoch), - Some(&Element::Vacant) => panic!("{}[{}] does not exist", self.kind, index), + Some(&Element::Vacant) => panic!("{}[{:?}] does not exist", self.kind, id), Some(&Element::Error(epoch, ..)) => (Err(InvalidId), epoch), None => return Err(InvalidId), }; assert_eq!( epoch, storage_epoch, - "{}[{}] is no longer alive", - self.kind, index + "{}[{:?}] is no longer alive", + self.kind, id ); result } From 38eb9ac667765a4d0c65dad67eae7e0d24f041e6 Mon Sep 17 00:00:00 2001 From: gents83 Date: Fri, 25 Aug 2023 17:29:50 +0200 Subject: [PATCH 082/132] TypeId should be debuggable --- wgpu-core/src/id.rs | 2 +- wgpu-core/src/storage.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/wgpu-core/src/id.rs b/wgpu-core/src/id.rs index 6aa2aa2872..07beb89499 100644 --- a/wgpu-core/src/id.rs +++ b/wgpu-core/src/id.rs @@ -171,7 +171,7 @@ pub(crate) struct Valid(pub I); /// Most `wgpu-core` clients should not use this trait. Unusual clients that /// need to construct `Id` values directly, or access their components, like the /// WGPU recording player, may use this trait to do so. -pub trait TypedId: Copy { +pub trait TypedId: Copy + std::fmt::Debug { fn zip(index: Index, epoch: Epoch, backend: Backend) -> Self; fn unzip(self) -> (Index, Epoch, Backend); fn into_raw(self) -> NonZeroId; diff --git a/wgpu-core/src/storage.rs b/wgpu-core/src/storage.rs index ca7819373f..af82909586 100644 --- a/wgpu-core/src/storage.rs +++ b/wgpu-core/src/storage.rs @@ -123,7 +123,7 @@ where let (index, epoch, _) = id.unzip(); let (result, storage_epoch) = match self.map.get(index as usize) { Some(&Element::Occupied(ref v, epoch)) => (Ok(v.is_unique()), epoch), - Some(&Element::Vacant) => panic!("{}[{id}] does not exist", self.kind, id), + Some(&Element::Vacant) => panic!("{}[{:?}] does not exist", self.kind, id), Some(&Element::Error(epoch, ..)) => (Err(InvalidId), epoch), None => return Err(InvalidId), }; From f974bebb7d50457f7629e28ff47de9666d162990 Mon Sep 17 00:00:00 2001 From: gents83 Date: Fri, 25 Aug 2023 17:49:35 +0200 Subject: [PATCH 083/132] Improving log --- wgpu-core/src/track/buffer.rs | 9 +++------ wgpu-core/src/track/stateless.rs | 5 +++-- wgpu-core/src/track/texture.rs | 9 +++------ 3 files changed, 9 insertions(+), 14 deletions(-) diff --git a/wgpu-core/src/track/buffer.rs b/wgpu-core/src/track/buffer.rs index 96e3cbb768..38b527a25f 100644 --- a/wgpu-core/src/track/buffer.rs +++ b/wgpu-core/src/track/buffer.rs @@ -11,7 +11,7 @@ use super::PendingTransition; use crate::{ hal_api::HalApi, id::{BufferId, TypedId, Valid}, - resource::{Buffer, Resource}, + resource::Buffer, storage::Storage, track::{ invalid_resource_state, skip_barrier, ResourceMetadata, ResourceMetadataProvider, @@ -556,11 +556,8 @@ impl BufferTracker { return true; } else { log::info!( - "{:?} is still referenced from {}", - self.metadata - .get_resource_unchecked(index) - .as_info() - .label(), + "Buffer {:?} is still referenced from {}", + id, existing_ref_count ); } diff --git a/wgpu-core/src/track/stateless.rs b/wgpu-core/src/track/stateless.rs index 61f23faf9f..87a19faa67 100644 --- a/wgpu-core/src/track/stateless.rs +++ b/wgpu-core/src/track/stateless.rs @@ -188,8 +188,9 @@ impl> StatelessTracker { return true; } else { log::info!( - "{:?} is still referenced from {}", - self.metadata.get_resource_unchecked(index).label(), + "{} {:?} is still referenced from {}", + T::TYPE, + id, existing_ref_count ); } diff --git a/wgpu-core/src/track/texture.rs b/wgpu-core/src/track/texture.rs index ebff03c9e0..013b9dfb56 100644 --- a/wgpu-core/src/track/texture.rs +++ b/wgpu-core/src/track/texture.rs @@ -23,7 +23,7 @@ use super::{range::RangedStates, PendingTransition}; use crate::{ hal_api::HalApi, id::{TextureId, TypedId, Valid}, - resource::{Resource, Texture}, + resource::Texture, storage::Storage, track::{ invalid_resource_state, skip_barrier, ResourceMetadata, ResourceMetadataProvider, @@ -722,11 +722,8 @@ impl TextureTracker { return true; } else { log::info!( - "{:?} is still referenced from {}", - self.metadata - .get_resource_unchecked(index) - .as_info() - .label(), + "Texture {:?} is still referenced from {}", + id, existing_ref_count ); } From 579edb6da6f55be6fbe9b43c86b1cc4f76717610 Mon Sep 17 00:00:00 2001 From: gents83 Date: Sat, 26 Aug 2023 16:47:21 +0200 Subject: [PATCH 084/132] Fix integration issues --- wgpu-core/src/binding_model.rs | 18 ++-- wgpu-core/src/command/bind.rs | 165 +++++++++++++------------------ wgpu-core/src/command/bundle.rs | 115 +++++++++------------ wgpu-core/src/command/compute.rs | 54 +++++----- wgpu-core/src/command/render.rs | 66 ++++++------- wgpu-core/src/device/global.rs | 62 +++++------- wgpu-core/src/device/life.rs | 54 +++++----- wgpu-core/src/device/resource.rs | 91 +++++++++-------- wgpu-core/src/instance.rs | 20 ++-- wgpu-core/src/pipeline.rs | 8 +- wgpu-core/src/resource.rs | 3 + wgpu-core/src/track/stateless.rs | 2 +- 12 files changed, 298 insertions(+), 360 deletions(-) diff --git a/wgpu-core/src/binding_model.rs b/wgpu-core/src/binding_model.rs index d9b10028cc..64a909291d 100644 --- a/wgpu-core/src/binding_model.rs +++ b/wgpu-core/src/binding_model.rs @@ -459,7 +459,7 @@ pub struct BindGroupLayout { // bind group layout id with its compatible sibling. // Since this substitution can come at a cost, it is skipped when wgpu-core generates // its own resource IDs. - pub(crate) compatible_layout: Option>, + pub(crate) compatible_layout: Option>>, #[allow(unused)] pub(crate) dynamic_count: usize, pub(crate) count_validator: BindingTypeMaxCountValidator, @@ -509,23 +509,23 @@ impl BindGroupLayout { pub(crate) fn try_get_bind_group_layout( layouts: &BindGroupLayouts, id: BindGroupLayoutId, -) -> Option<&BindGroupLayout> { +) -> Option<&Arc>> { let layout = layouts.get(id).ok()?; - if let Some(compat) = layout.compatible_layout { - return Some(&layouts[compat]); + if let Some(compat) = layout.compatible_layout.as_ref() { + return Some(compat); } - Some(layout) } pub(crate) fn get_bind_group_layout( layouts: &BindGroupLayouts, id: Valid, -) -> (Valid, &BindGroupLayout) { +) -> (Valid, &Arc>) { let layout = &layouts[id]; layout .compatible_layout - .map(|compat| (compat, &layouts[compat])) + .as_ref() + .map(|compat| (compat.as_info().id(), compat)) .unwrap_or((id, layout)) } @@ -630,7 +630,7 @@ pub struct PipelineLayout { pub(crate) raw: Option, pub(crate) device: Arc>, pub(crate) info: ResourceInfo, - pub(crate) bind_group_layout_ids: ArrayVec, { hal::MAX_BIND_GROUPS }>, + pub(crate) bind_group_layouts: ArrayVec>, { hal::MAX_BIND_GROUPS }>, pub(crate) push_constant_ranges: ArrayVec, } @@ -844,7 +844,7 @@ pub(crate) fn buffer_binding_type_alignment( pub struct BindGroup { pub(crate) raw: Option, pub(crate) device: Arc>, - pub(crate) layout_id: Valid, + pub(crate) layout: Arc>, pub(crate) info: ResourceInfo, pub(crate) used: BindGroupStates, pub(crate) used_buffer_ranges: Vec, diff --git a/wgpu-core/src/command/bind.rs b/wgpu-core/src/command/bind.rs index be6aa69629..862b6efe4e 100644 --- a/wgpu-core/src/command/bind.rs +++ b/wgpu-core/src/command/bind.rs @@ -1,12 +1,11 @@ +use std::sync::Arc; + use crate::{ - binding_model::{ - BindGroup, BindGroupLayouts, LateMinBufferBindingSizeMismatch, PipelineLayout, - }, + binding_model::{BindGroup, LateMinBufferBindingSizeMismatch, PipelineLayout}, device::SHADER_STAGE_COUNT, hal_api::HalApi, - id::{BindGroupId, PipelineLayoutId, Valid}, + id::{BindGroupId, Valid}, pipeline::LateSizedBufferGroup, - storage::Storage, }; use arrayvec::ArrayVec; @@ -14,73 +13,91 @@ use arrayvec::ArrayVec; type BindGroupMask = u8; mod compat { - use crate::{ - binding_model::BindGroupLayouts, - id::{BindGroupLayoutId, Valid}, - }; - use std::ops::Range; + use arrayvec::ArrayVec; - #[derive(Debug, Default)] - struct Entry { - assigned: Option>, - expected: Option>, + use crate::{binding_model::BindGroupLayout, hal_api::HalApi, resource::Resource}; + use std::{ops::Range, sync::Arc}; + + #[derive(Debug, Clone)] + struct Entry { + assigned: Option>>, + expected: Option>>, } - impl Entry { + impl Entry { + fn empty() -> Self { + Self { + assigned: None, + expected: None, + } + } fn is_active(&self) -> bool { self.assigned.is_some() && self.expected.is_some() } - fn is_valid(&self, bind_group_layouts: &BindGroupLayouts) -> bool { - if self.expected.is_none() || self.expected == self.assigned { - return true; + fn is_valid(&self) -> bool { + if let Some(expected_bgl) = self.expected.as_ref() { + if let Some(assigned_bgl) = self.assigned.as_ref() { + if expected_bgl.is_equal(assigned_bgl) { + return true; + } + if let Some(compatible_bgl) = assigned_bgl.compatible_layout.as_ref() { + return compatible_bgl.is_equal(expected_bgl); + } + } + return false; } + true + } - if let Some(id) = self.assigned { - return bind_group_layouts[id].compatible_layout == self.expected; + fn is_incompatible(&self) -> bool { + if let Some(expected_bgl) = self.expected.as_ref() { + if let Some(assigned_bgl) = self.assigned.as_ref() { + return !assigned_bgl.is_equal(expected_bgl); + } } - - false + true } } - #[derive(Debug)] - pub(crate) struct BoundBindGroupLayouts { - entries: [Entry; hal::MAX_BIND_GROUPS], + #[derive(Debug, Default)] + pub(crate) struct BoundBindGroupLayouts { + entries: ArrayVec, { hal::MAX_BIND_GROUPS }>, } - impl BoundBindGroupLayouts { + impl BoundBindGroupLayouts { pub fn new() -> Self { Self { - entries: Default::default(), + entries: (0..hal::MAX_BIND_GROUPS).map(|_| Entry::empty()).collect(), } } - fn make_range(&self, start_index: usize) -> Range { // find first incompatible entry let end = self .entries .iter() - .position(|e| e.expected.is_none() || e.assigned != e.expected) + .position(|e| e.is_incompatible()) .unwrap_or(self.entries.len()); start_index..end.max(start_index) } pub fn update_expectations( &mut self, - expectations: &[Valid], + expectations: &[Arc>], ) -> Range { let start_index = self .entries .iter() .zip(expectations) - .position(|(e, &expect)| e.expected != Some(expect)) + .position(|(e, expect)| { + e.expected.is_none() || !e.expected.as_ref().unwrap().is_equal(expect) + }) .unwrap_or(expectations.len()); - for (e, &expect) in self.entries[start_index..] + for (e, expect) in self.entries[start_index..] .iter_mut() .zip(expectations[start_index..].iter()) { - e.expected = Some(expect); + e.expected = Some(expect.clone()); } for e in self.entries[expectations.len()..].iter_mut() { e.expected = None; @@ -88,7 +105,7 @@ mod compat { self.make_range(start_index) } - pub fn assign(&mut self, index: usize, value: Valid) -> Range { + pub fn assign(&mut self, index: usize, value: Arc>) -> Range { self.entries[index].assigned = Some(value); self.make_range(index) } @@ -100,12 +117,9 @@ mod compat { .filter_map(|(i, e)| if e.is_active() { Some(i) } else { None }) } - pub fn invalid_mask( - &self, - bind_group_layouts: &BindGroupLayouts, - ) -> super::BindGroupMask { + pub fn invalid_mask(&self) -> super::BindGroupMask { self.entries.iter().enumerate().fold(0, |mask, (i, entry)| { - if entry.is_valid(bind_group_layouts) { + if entry.is_valid() { mask } else { mask | 1u8 << i @@ -113,40 +127,6 @@ mod compat { }) } } - - #[test] - fn test_compatibility() { - fn id(val: u32) -> Valid { - BindGroupLayoutId::dummy(val) - } - - let mut man = BoundBindGroupLayouts::new(); - man.entries[0] = Entry { - expected: Some(id(3)), - assigned: Some(id(2)), - }; - man.entries[1] = Entry { - expected: Some(id(1)), - assigned: Some(id(1)), - }; - man.entries[2] = Entry { - expected: Some(id(4)), - assigned: Some(id(5)), - }; - // check that we rebind [1] after [0] became compatible - assert_eq!(man.assign(0, id(3)), 0..2); - // check that nothing is rebound - assert_eq!(man.update_expectations(&[id(3), id(2)]), 1..1); - // check that [1] and [2] are rebound on expectations change - assert_eq!(man.update_expectations(&[id(3), id(1), id(5)]), 1..3); - // reset the first two bindings - assert_eq!(man.update_expectations(&[id(4), id(6), id(5)]), 0..0); - // check that nothing is rebound, even if there is a match, - // since earlier binding is incompatible. - assert_eq!(man.assign(1, id(6)), 1..1); - // finally, bind everything - assert_eq!(man.assign(0, id(4)), 0..3); - } } #[derive(Debug)] @@ -174,40 +154,37 @@ impl EntryPayload { } } -#[derive(Debug)] -pub(super) struct Binder { - pub(super) pipeline_layout_id: Option>, //TODO: strongly `Stored` - manager: compat::BoundBindGroupLayouts, +#[derive(Debug, Default)] +pub(super) struct Binder { + pub(super) pipeline_layout: Option>>, + manager: compat::BoundBindGroupLayouts, payloads: [EntryPayload; hal::MAX_BIND_GROUPS], } -impl Binder { +impl Binder { pub(super) fn new() -> Self { Self { - pipeline_layout_id: None, + pipeline_layout: None, manager: compat::BoundBindGroupLayouts::new(), payloads: Default::default(), } } - pub(super) fn reset(&mut self) { - self.pipeline_layout_id = None; + self.pipeline_layout = None; self.manager = compat::BoundBindGroupLayouts::new(); for payload in self.payloads.iter_mut() { payload.reset(); } } - pub(super) fn change_pipeline_layout<'a, A: HalApi>( + pub(super) fn change_pipeline_layout<'a>( &'a mut self, - guard: &Storage, PipelineLayoutId>, - new_id: Valid, + new: &Arc>, late_sized_buffer_groups: &[LateSizedBufferGroup], ) -> (usize, &'a [EntryPayload]) { - let old_id_opt = self.pipeline_layout_id.replace(new_id); - let new = &guard[new_id]; + let old_id_opt = self.pipeline_layout.replace(new.clone()); - let mut bind_range = self.manager.update_expectations(&new.bind_group_layout_ids); + let mut bind_range = self.manager.update_expectations(&new.bind_group_layouts); // Update the buffer binding sizes that are required by shaders. for (payload, late_group) in self.payloads.iter_mut().zip(late_sized_buffer_groups) { @@ -231,8 +208,7 @@ impl Binder { } } - if let Some(old_id) = old_id_opt { - let old = &guard[old_id]; + if let Some(old) = old_id_opt { // root constants are the base compatibility property if old.push_constant_ranges != new.push_constant_ranges { bind_range.start = 0; @@ -242,7 +218,7 @@ impl Binder { (bind_range.start, &self.payloads[bind_range]) } - pub(super) fn assign_group<'a, A: HalApi>( + pub(super) fn assign_group<'a>( &'a mut self, index: usize, bind_group_id: Valid, @@ -277,7 +253,7 @@ impl Binder { } } - let bind_range = self.manager.assign(index, bind_group.layout_id); + let bind_range = self.manager.assign(index, bind_group.layout.clone()); &self.payloads[bind_range] } @@ -288,11 +264,8 @@ impl Binder { .map(move |index| *payloads[index].group_id.as_ref().unwrap()) } - pub(super) fn invalid_mask( - &self, - bind_group_layouts: &BindGroupLayouts, - ) -> BindGroupMask { - self.manager.invalid_mask(bind_group_layouts) + pub(super) fn invalid_mask(&self) -> BindGroupMask { + self.manager.invalid_mask() } /// Scan active buffer bindings corresponding to layouts without `min_binding_size` specified. diff --git a/wgpu-core/src/command/bundle.rs b/wgpu-core/src/command/bundle.rs index 14aed743e0..f7a7a39a72 100644 --- a/wgpu-core/src/command/bundle.rs +++ b/wgpu-core/src/command/bundle.rs @@ -79,7 +79,7 @@ index format changes. #![allow(clippy::reversed_empty_ranges)] use crate::{ - binding_model::{self, buffer_binding_type_alignment}, + binding_model::{buffer_binding_type_alignment, BindGroup, BindGroupLayout, PipelineLayout}, command::{ BasePass, BindGroupStateChange, ColorAttachmentError, DrawError, MapPassErr, PassErrorScope, RenderCommand, RenderCommandError, StateChange, @@ -95,7 +95,7 @@ use crate::{ id::{self, RenderBundleId}, identity::GlobalIdentityHandlerFactory, init_tracker::{BufferInitTrackerAction, MemoryInitKind, TextureInitTrackerAction}, - pipeline::{self, PipelineFlags}, + pipeline::{self, PipelineFlags, RenderPipeline}, resource, resource::{Resource, ResourceInfo}, storage::Storage, @@ -259,7 +259,6 @@ impl RenderBundleEncoder { device: &Arc>, hub: &Hub, ) -> Result, RenderBundleError> { - let pipeline_layout_guard = hub.pipeline_layouts.read(); let bind_group_guard = hub.bind_groups.read(); let pipeline_guard = hub.render_pipelines.read(); let query_set_guard = hub.query_sets.read(); @@ -296,7 +295,7 @@ impl RenderBundleEncoder { } => { let scope = PassErrorScope::SetBindGroup(bind_group_id); - let bind_group: &binding_model::BindGroup = state + let bind_group = state .trackers .bind_groups .add_single(&*bind_group_guard, bind_group_id) @@ -348,7 +347,7 @@ impl RenderBundleEncoder { buffer_memory_init_actions.extend_from_slice(&bind_group.used_buffer_ranges); texture_memory_init_actions.extend_from_slice(&bind_group.used_texture_ranges); - state.set_bind_group(index, bind_group_id, bind_group.layout_id, offsets_range); + state.set_bind_group(index, bind_group_guard.get(bind_group_id).as_ref().unwrap(), &bind_group.layout, offsets_range); unsafe { state .trackers @@ -361,7 +360,7 @@ impl RenderBundleEncoder { RenderCommand::SetPipeline(pipeline_id) => { let scope = PassErrorScope::SetPipelineRender(pipeline_id); - let pipeline: &pipeline::RenderPipeline = state + let pipeline = state .trackers .render_pipelines .add_single(&*pipeline_guard, pipeline_id) @@ -384,8 +383,7 @@ impl RenderBundleEncoder { .map_pass_err(scope); } - let layout = &pipeline_layout_guard[pipeline.layout_id]; - let pipeline_state = PipelineState::new(pipeline_id, pipeline, layout); + let pipeline_state = PipelineState::new(pipeline); commands.push(command); @@ -394,7 +392,7 @@ impl RenderBundleEncoder { commands.extend(iter) } - state.invalidate_bind_groups(&pipeline_state, layout); + state.invalidate_bind_groups(&pipeline_state, &pipeline.layout); state.pipeline = Some(pipeline_state); } RenderCommand::SetIndexBuffer { @@ -462,10 +460,9 @@ impl RenderBundleEncoder { let scope = PassErrorScope::SetPushConstant; let end_offset = offset + size_bytes; - let pipeline = state.pipeline(scope)?; - let pipeline_layout = &pipeline_layout_guard[pipeline.layout_id]; + let pipeline_state = state.pipeline(scope)?; - pipeline_layout + pipeline_state.pipeline.layout .validate_push_constant_ranges(stages, offset, end_offset) .map_pass_err(scope)?; @@ -777,16 +774,12 @@ impl RenderBundle { pub(super) unsafe fn execute( &self, raw: &mut A::CommandEncoder, - pipeline_layout_guard: &Storage< - crate::binding_model::PipelineLayout, - id::PipelineLayoutId, - >, - bind_group_guard: &Storage, id::BindGroupId>, - pipeline_guard: &Storage, id::RenderPipelineId>, + bind_group_guard: &Storage, id::BindGroupId>, + pipeline_guard: &Storage, id::RenderPipelineId>, buffer_guard: &Storage, id::BufferId>, ) -> Result<(), ExecutionError> { let mut offsets = self.base.dynamic_offsets.as_slice(); - let mut pipeline_layout_id = None::>; + let mut pipeline_layout = None::>>; if let Some(ref label) = self.base.label { unsafe { raw.begin_debug_marker(label) }; } @@ -801,7 +794,7 @@ impl RenderBundle { let bind_group = bind_group_guard.get(bind_group_id).unwrap(); unsafe { raw.set_bind_group( - pipeline_layout_guard[pipeline_layout_id.unwrap()].raw(), + pipeline_layout.as_ref().unwrap().raw(), index, bind_group.raw(), &offsets[..num_dynamic_offsets as usize], @@ -813,7 +806,7 @@ impl RenderBundle { let pipeline = pipeline_guard.get(pipeline_id).unwrap(); unsafe { raw.set_render_pipeline(pipeline.raw()) }; - pipeline_layout_id = Some(pipeline.layout_id); + pipeline_layout = Some(pipeline.layout.clone()); } RenderCommand::SetIndexBuffer { buffer_id, @@ -859,8 +852,7 @@ impl RenderBundle { size_bytes, values_offset, } => { - let pipeline_layout_id = pipeline_layout_id.unwrap(); - let pipeline_layout = &pipeline_layout_guard[pipeline_layout_id]; + let pipeline_layout = pipeline_layout.as_ref().unwrap(); if let Some(values_offset) = values_offset { let values_end_offset = @@ -1077,12 +1069,12 @@ impl VertexState { /// A bind group that has been set at a particular index during render bundle encoding. #[derive(Debug)] -struct BindState { +struct BindState { /// The id of the bind group set at this index. - bind_group_id: id::BindGroupId, + bind_group: Arc>, /// The layout of `group`. - layout_id: id::Valid, + layout: Arc>, /// The range of dynamic offsets for this bind group, in the original /// command stream's `BassPass::dynamic_offsets` array. @@ -1106,12 +1098,9 @@ struct VertexLimitState { } /// The bundle's current pipeline, and some cached information needed for validation. -struct PipelineState { - /// The pipeline's id. - id: id::RenderPipelineId, - - /// The id of the pipeline's layout. - layout_id: id::Valid, +struct PipelineState { + /// The pipeline + pipeline: Arc>, /// How this pipeline's vertex shader traverses each vertex buffer, indexed /// by vertex buffer slot number. @@ -1125,18 +1114,18 @@ struct PipelineState { used_bind_groups: usize, } -impl PipelineState { - fn new( - pipeline_id: id::RenderPipelineId, - pipeline: &pipeline::RenderPipeline, - layout: &binding_model::PipelineLayout, - ) -> Self { +impl PipelineState { + fn new(pipeline: &Arc>) -> Self { Self { - id: pipeline_id, - layout_id: pipeline.layout_id, + pipeline: pipeline.clone(), steps: pipeline.vertex_steps.to_vec(), - push_constant_ranges: layout.push_constant_ranges.iter().cloned().collect(), - used_bind_groups: layout.bind_group_layout_ids.len(), + push_constant_ranges: pipeline + .layout + .push_constant_ranges + .iter() + .cloned() + .collect(), + used_bind_groups: pipeline.layout.bind_group_layouts.len(), } } @@ -1178,10 +1167,10 @@ struct State { trackers: RenderBundleScope, /// The currently set pipeline, if any. - pipeline: Option, + pipeline: Option>, /// The bind group set at each index, if any. - bind: ArrayVec, { hal::MAX_BIND_GROUPS }>, + bind: ArrayVec>, { hal::MAX_BIND_GROUPS }>, /// The state of each vertex buffer slot. vertex: ArrayVec, { hal::MAX_VERTEX_BUFFERS }>, @@ -1200,7 +1189,7 @@ struct State { } impl State { - fn vertex_limits(&self, pipeline: &PipelineState) -> VertexLimitState { + fn vertex_limits(&self, pipeline: &PipelineState) -> VertexLimitState { let mut vert_state = VertexLimitState { vertex_limit: u32::MAX, vertex_limit_slot: 0, @@ -1231,11 +1220,11 @@ impl State { /// Return the id of the current pipeline, if any. fn pipeline_id(&self) -> Option { - self.pipeline.as_ref().map(|p| p.id) + self.pipeline.as_ref().map(|p| p.pipeline.as_info().id().0) } /// Return the current pipeline state. Return an error if none is set. - fn pipeline(&self, scope: PassErrorScope) -> Result<&PipelineState, RenderBundleError> { + fn pipeline(&self, scope: PassErrorScope) -> Result<&PipelineState, RenderBundleError> { self.pipeline .as_ref() .ok_or(DrawError::MissingPipeline) @@ -1252,8 +1241,8 @@ impl State { fn set_bind_group( &mut self, slot: u32, - bind_group_id: id::BindGroupId, - layout_id: id::Valid, + bind_group: &Arc>, + layout: &Arc>, dynamic_offsets: Range, ) { // If this call wouldn't actually change this index's state, we can @@ -1261,7 +1250,7 @@ impl State { // be different.) if dynamic_offsets.is_empty() { if let Some(ref contents) = self.bind[slot as usize] { - if contents.bind_group_id == bind_group_id { + if contents.bind_group.is_equal(bind_group) { return; } } @@ -1269,8 +1258,8 @@ impl State { // Record the index's new state. self.bind[slot as usize] = Some(BindState { - bind_group_id, - layout_id, + bind_group: bind_group.clone(), + layout: layout.clone(), dynamic_offsets, is_dirty: true, }); @@ -1293,18 +1282,14 @@ impl State { /// /// - Changing the push constant ranges at all requires re-establishing /// all bind groups. - fn invalidate_bind_groups( - &mut self, - new: &PipelineState, - layout: &binding_model::PipelineLayout, - ) { + fn invalidate_bind_groups(&mut self, new: &PipelineState, layout: &PipelineLayout) { match self.pipeline { None => { // Establishing entirely new pipeline state. self.invalidate_bind_group_from(0); } Some(ref old) => { - if old.id == new.id { + if old.pipeline.is_equal(&new.pipeline) { // Everything is derived from the pipeline, so if the id has // not changed, there's no need to consider anything else. return; @@ -1314,14 +1299,12 @@ impl State { if old.push_constant_ranges != new.push_constant_ranges { self.invalidate_bind_group_from(0); } else { - let first_changed = self - .bind - .iter() - .zip(&layout.bind_group_layout_ids) - .position(|(entry, &layout_id)| match *entry { - Some(ref contents) => contents.layout_id != layout_id, + let first_changed = self.bind.iter().zip(&layout.bind_group_layouts).position( + |(entry, layout)| match *entry { + Some(ref contents) => !contents.layout.is_equal(layout), None => false, - }); + }, + ); if let Some(slot) = first_changed { self.invalidate_bind_group_from(slot); } @@ -1395,7 +1378,7 @@ impl State { let offsets = &contents.dynamic_offsets; return Some(RenderCommand::SetBindGroup { index: i.try_into().unwrap(), - bind_group_id: contents.bind_group_id, + bind_group_id: contents.bind_group.as_info().id().0, num_dynamic_offsets: (offsets.end - offsets.start) as u8, }); } diff --git a/wgpu-core/src/command/compute.rs b/wgpu-core/src/command/compute.rs index e15fe4c496..0dfbb29fb8 100644 --- a/wgpu-core/src/command/compute.rs +++ b/wgpu-core/src/command/compute.rs @@ -1,7 +1,7 @@ +use crate::resource::Resource; use crate::{ binding_model::{ - BindError, BindGroup, BindGroupLayouts, LateMinBufferBindingSizeMismatch, - PushConstantUploadError, + BindError, BindGroup, LateMinBufferBindingSizeMismatch, PushConstantUploadError, }, command::{ bind::Binder, @@ -276,15 +276,15 @@ where } struct State { - binder: Binder, + binder: Binder, pipeline: Option, scope: UsageScope, debug_scope_depth: u32, } impl State { - fn is_ready(&self, bind_group_layouts: &BindGroupLayouts) -> Result<(), DispatchError> { - let bind_mask = self.binder.invalid_mask(bind_group_layouts); + fn is_ready(&self) -> Result<(), DispatchError> { + let bind_mask = self.binder.invalid_mask(); if bind_mask != 0 { //let (expected, provided) = self.binder.entries[index as usize].info(); return Err(DispatchError::IncompatibleBindGroup { @@ -397,9 +397,7 @@ impl Global { let raw = encoder.open(); let device = &cmd_buf.device; - let pipeline_layout_guard = hub.pipeline_layouts.read(); let bind_group_guard = hub.bind_groups.read(); - let bind_group_layout_guard = hub.bind_group_layouts.read(); let pipeline_guard = hub.compute_pipelines.read(); let query_set_guard = hub.query_sets.read(); let buffer_guard = hub.buffers.read(); @@ -529,7 +527,8 @@ impl Global { ); } - let pipeline_layout_id = state.binder.pipeline_layout_id; + let pipeline_layout = &state.binder.pipeline_layout; + let pipeline_layout = pipeline_layout.as_ref().unwrap().clone(); let entries = state.binder.assign_group( index as usize, id::Valid(bind_group_id), @@ -537,8 +536,7 @@ impl Global { &temp_offsets, ); if !entries.is_empty() { - let pipeline_layout = - pipeline_layout_guard[pipeline_layout_id.unwrap()].raw(); + let pipeline_layout = pipeline_layout.raw(); for (i, e) in entries.iter().enumerate() { let raw_bg = bind_group_guard[*e.group_id.as_ref().unwrap()].raw(); unsafe { @@ -568,12 +566,16 @@ impl Global { } // Rebind resources - if state.binder.pipeline_layout_id != Some(pipeline.layout_id) { - let pipeline_layout = &pipeline_layout_guard[pipeline.layout_id]; - + if state.binder.pipeline_layout.is_none() + || !state + .binder + .pipeline_layout + .as_ref() + .unwrap() + .is_equal(&pipeline.layout) + { let (start_index, entries) = state.binder.change_pipeline_layout( - &*pipeline_layout_guard, - pipeline.layout_id, + &pipeline.layout, &pipeline.late_sized_buffer_groups, ); if !entries.is_empty() { @@ -581,7 +583,7 @@ impl Global { let raw_bg = bind_group_guard[*e.group_id.as_ref().unwrap()].raw(); unsafe { raw.set_bind_group( - pipeline_layout.raw(), + pipeline.layout.raw(), start_index as u32 + i as u32, raw_bg, &e.dynamic_offsets, @@ -592,7 +594,7 @@ impl Global { // Clear push constant ranges let non_overlapping = super::bind::compute_nonoverlapping_ranges( - &pipeline_layout.push_constant_ranges, + &pipeline.layout.push_constant_ranges, ); for range in non_overlapping { let offset = range.range.start; @@ -602,7 +604,7 @@ impl Global { size_bytes, |clear_offset, clear_data| unsafe { raw.set_push_constants( - pipeline_layout.raw(), + pipeline.layout.raw(), wgt::ShaderStages::COMPUTE, clear_offset, clear_data, @@ -625,15 +627,15 @@ impl Global { let data_slice = &base.push_constant_data[(values_offset as usize)..values_end_offset]; - let pipeline_layout_id = state + let pipeline_layout = state .binder - .pipeline_layout_id + .pipeline_layout + .as_ref() //TODO: don't error here, lazily update the push constants .ok_or(ComputePassErrorInner::Dispatch( DispatchError::MissingPipeline, )) .map_pass_err(scope)?; - let pipeline_layout = &pipeline_layout_guard[pipeline_layout_id]; pipeline_layout .validate_push_constant_ranges( @@ -657,10 +659,8 @@ impl Global { indirect: false, pipeline: state.pipeline, }; - state - .is_ready(&*bind_group_layout_guard) - .map_pass_err(scope)?; - + state.is_ready().map_pass_err(scope)?; + state .flush_states( raw, @@ -697,9 +697,7 @@ impl Global { pipeline: state.pipeline, }; - state - .is_ready(&*bind_group_layout_guard) - .map_pass_err(scope)?; + state.is_ready().map_pass_err(scope)?; device .require_downlevel_flags(wgt::DownlevelFlags::INDIRECT_EXECUTION) diff --git a/wgpu-core/src/command/render.rs b/wgpu-core/src/command/render.rs index b2580b3ac5..6aad63d3f3 100644 --- a/wgpu-core/src/command/render.rs +++ b/wgpu-core/src/command/render.rs @@ -1,5 +1,6 @@ +use crate::resource::Resource; use crate::{ - binding_model::{BindError, BindGroupLayouts}, + binding_model::BindError, command::{ self, bind::Binder, @@ -411,9 +412,9 @@ impl VertexState { } #[derive(Debug)] -struct State { +struct State { pipeline_flags: PipelineFlags, - binder: Binder, + binder: Binder, blend_constant: OptionalState, stencil_reference: u32, pipeline: Option, @@ -422,12 +423,8 @@ struct State { debug_scope_depth: u32, } -impl State { - fn is_ready( - &self, - indexed: bool, - bind_group_layouts: &BindGroupLayouts, - ) -> Result<(), DrawError> { +impl State { + fn is_ready(&self, indexed: bool) -> Result<(), DrawError> { // Determine how many vertex buffers have already been bound let vertex_buffer_count = self.vertex.inputs.iter().take_while(|v| v.bound).count() as u32; // Compare with the needed quantity @@ -437,7 +434,7 @@ impl State { }); } - let bind_mask = self.binder.invalid_mask(bind_group_layouts); + let bind_mask = self.binder.invalid_mask(); if bind_mask != 0 { //let (expected, provided) = self.binder.entries[index as usize].info(); return Err(DrawError::IncompatibleBindGroup { @@ -1344,7 +1341,6 @@ impl Global { encoder.open_pass(base.label); let bundle_guard = hub.render_bundles.read(); - let pipeline_layout_guard = hub.pipeline_layouts.read(); let bind_group_guard = hub.bind_groups.read(); let render_pipeline_guard = hub.render_pipelines.read(); let query_set_guard = hub.query_sets.read(); @@ -1462,7 +1458,7 @@ impl Global { ); } - let pipeline_layout_id = state.binder.pipeline_layout_id; + let pipeline_layout = state.binder.pipeline_layout.clone(); let entries = state.binder.assign_group( index as usize, id::Valid(bind_group_id), @@ -1470,8 +1466,7 @@ impl Global { &temp_offsets, ); if !entries.is_empty() { - let pipeline_layout = - pipeline_layout_guard[pipeline_layout_id.unwrap()].raw(); + let pipeline_layout = pipeline_layout.as_ref().unwrap().raw(); for (i, e) in entries.iter().enumerate() { let raw_bg = bind_group_guard[*e.group_id.as_ref().unwrap()].raw(); unsafe { @@ -1529,12 +1524,16 @@ impl Global { } // Rebind resource - if state.binder.pipeline_layout_id != Some(pipeline.layout_id) { - let pipeline_layout = &pipeline_layout_guard[pipeline.layout_id]; - + if state.binder.pipeline_layout.is_none() + || !state + .binder + .pipeline_layout + .as_ref() + .unwrap() + .is_equal(&pipeline.layout) + { let (start_index, entries) = state.binder.change_pipeline_layout( - &*pipeline_layout_guard, - pipeline.layout_id, + &pipeline.layout, &pipeline.late_sized_buffer_groups, ); if !entries.is_empty() { @@ -1543,7 +1542,7 @@ impl Global { bind_group_guard[*e.group_id.as_ref().unwrap()].raw(); unsafe { raw.set_bind_group( - pipeline_layout.raw(), + pipeline.layout.raw(), start_index as u32 + i as u32, raw_bg, &e.dynamic_offsets, @@ -1554,7 +1553,7 @@ impl Global { // Clear push constant ranges let non_overlapping = super::bind::compute_nonoverlapping_ranges( - &pipeline_layout.push_constant_ranges, + &pipeline.layout.push_constant_ranges, ); for range in non_overlapping { let offset = range.range.start; @@ -1564,7 +1563,7 @@ impl Global { size_bytes, |clear_offset, clear_data| unsafe { raw.set_push_constants( - pipeline_layout.raw(), + pipeline.layout.raw(), range.stages, clear_offset, clear_data, @@ -1770,12 +1769,12 @@ impl Global { let data_slice = &base.push_constant_data[(values_offset as usize)..values_end_offset]; - let pipeline_layout_id = state + let pipeline_layout = state .binder - .pipeline_layout_id + .pipeline_layout + .as_ref() .ok_or(DrawError::MissingPipeline) .map_pass_err(scope)?; - let pipeline_layout = &pipeline_layout_guard[pipeline_layout_id]; pipeline_layout .validate_push_constant_ranges(stages, offset, end_offset_bytes) @@ -1821,9 +1820,7 @@ impl Global { indirect: false, pipeline: state.pipeline, }; - state - .is_ready::(indexed, &bind_group_layout_guard) - .map_pass_err(scope)?; + state.is_ready(indexed).map_pass_err(scope)?; let last_vertex = first_vertex + vertex_count; let vertex_limit = state.vertex.vertex_limit; @@ -1863,9 +1860,7 @@ impl Global { indirect: false, pipeline: state.pipeline, }; - state - .is_ready::(indexed, &*bind_group_layout_guard) - .map_pass_err(scope)?; + state.is_ready(indexed).map_pass_err(scope)?; //TODO: validate that base_vertex + max_index() is // within the provided range @@ -1910,9 +1905,7 @@ impl Global { indirect: true, pipeline: state.pipeline, }; - state - .is_ready::(indexed, &*bind_group_layout_guard) - .map_pass_err(scope)?; + state.is_ready(indexed).map_pass_err(scope)?; let stride = match indexed { false => mem::size_of::(), @@ -1984,9 +1977,7 @@ impl Global { indirect: true, pipeline: state.pipeline, }; - state - .is_ready::(indexed, &*bind_group_layout_guard) - .map_pass_err(scope)?; + state.is_ready(indexed).map_pass_err(scope)?; let stride = match indexed { false => mem::size_of::(), @@ -2249,7 +2240,6 @@ impl Global { unsafe { bundle.execute( raw, - &*pipeline_layout_guard, &*bind_group_guard, &*render_pipeline_guard, &*buffer_guard, diff --git a/wgpu-core/src/device/global.rs b/wgpu-core/src/device/global.rs index b7345ba959..02b4327423 100644 --- a/wgpu-core/src/device/global.rs +++ b/wgpu-core/src/device/global.rs @@ -11,7 +11,7 @@ use crate::{ instance::{self, Adapter, Surface}, pipeline, present, resource::{self, Buffer, BufferAccessResult, BufferMapState}, - resource::{BufferAccessError, BufferMapOperation}, + resource::{BufferAccessError, BufferMapOperation, Resource}, validation::check_buffer_usage, FastHashMap, Label, LabelHelpers as _, }; @@ -1068,7 +1068,7 @@ impl Global { let mut compatible_layout = None; { let bgl_guard = hub.bind_group_layouts.read(); - if let Some(id) = + if let Some((id, layout)) = Device::deduplicate_bind_group_layout(device_id, &entry_map, &*bgl_guard) { // If there is an equivalent BGL, just bump the refcount and return it. @@ -1082,7 +1082,7 @@ impl Global { return (id, None); } - compatible_layout = Some(id::Valid(id)); + compatible_layout = Some(layout.clone()); } } @@ -1095,7 +1095,7 @@ impl Global { let (id, _) = fid.assign(layout); log::info!("Created BindGroupLayout {:?}", id); - + return (id.0, None); }; @@ -1231,29 +1231,21 @@ impl Global { trace.add(trace::Action::CreateBindGroup(fid.id(), desc.clone())); } - let mut bind_group_layout = match hub.bind_group_layouts.get(desc.layout) { + let bind_group_layout_guard = hub.bind_group_layouts.read(); + let mut bind_group_layout = match bind_group_layout_guard.get(desc.layout) { Ok(layout) => layout, Err(..) => break binding_model::CreateBindGroupError::InvalidLayout, }; - let mut layout_id = id::Valid(desc.layout); - if let Some(id) = bind_group_layout.compatible_layout { - layout_id = id; - bind_group_layout = &bind_group_layout_guard[id]; + if let Some(layout) = bind_group_layout.compatible_layout.as_ref() { + bind_group_layout = layout; } - let bind_group = match device.create_bind_group( - device_id, - bind_group_layout, - layout_id, - desc, - hub, - &mut token, - ) { + let bind_group = match device.create_bind_group(bind_group_layout, desc, hub) { Ok(bind_group) => bind_group, Err(e) => break e, }; - + let (id, resource) = fid.assign(bind_group); log::info!("Created BindGroup {:?}", id,); @@ -1724,18 +1716,14 @@ impl Global { Option, ) { let hub = A::hub(self); - let pipeline_layout_guard = hub.pipeline_layouts.read(); let error = loop { let pipeline = match hub.render_pipelines.get(pipeline_id) { Ok(pipeline) => pipeline, Err(_) => break binding_model::GetBindGroupLayoutError::InvalidPipeline, }; - let id = match pipeline_layout_guard[pipeline.layout_id] - .bind_group_layout_ids - .get(index as usize) - { - Some(id) => id, + let id = match pipeline.layout.bind_group_layouts.get(index as usize) { + Some(id) => id.as_info().id(), None => break binding_model::GetBindGroupLayoutError::InvalidGroupIndex(index), }; @@ -1761,10 +1749,10 @@ impl Global { ); let hub = A::hub(self); - let (pipeline, layout_id) = { + let (pipeline, layout) = { let mut pipeline_guard = hub.render_pipelines.write(); match pipeline_guard.get(render_pipeline_id) { - Ok(pipeline) => (pipeline.clone(), pipeline.layout_id), + Ok(pipeline) => (pipeline.clone(), pipeline.layout.clone()), Err(_) => { hub.render_pipelines .unregister_locked(render_pipeline_id, &mut *pipeline_guard); @@ -1772,17 +1760,18 @@ impl Global { } } }; + let layout_id = layout.as_info().id().0; let device = &pipeline.device; let mut life_lock = device.lock_life(); life_lock .suspected_resources .render_pipelines .insert(render_pipeline_id, pipeline.clone()); - let layout = hub.pipeline_layouts.get(layout_id.0).unwrap(); + life_lock .suspected_resources .pipeline_layouts - .insert(layout_id.0, layout); + .insert(layout_id, layout); } pub fn device_create_compute_pipeline( @@ -1848,7 +1837,6 @@ impl Global { Option, ) { let hub = A::hub(self); - let pipeline_layout_guard = hub.pipeline_layouts.read(); let error = loop { let pipeline_guard = hub.compute_pipelines.read(); @@ -1857,15 +1845,13 @@ impl Global { Ok(pipeline) => pipeline, Err(_) => break binding_model::GetBindGroupLayoutError::InvalidPipeline, }; - let id = match pipeline_layout_guard[pipeline.layout_id] - .bind_group_layout_ids - .get(index as usize) - { + + let layout = match pipeline.layout.bind_group_layouts.get(index as usize) { Some(id) => id, None => break binding_model::GetBindGroupLayoutError::InvalidGroupIndex(index), }; - return (id.0, None); + return (layout.as_info().id().0, None); }; let id = hub @@ -1887,10 +1873,10 @@ impl Global { ); let hub = A::hub(self); - let (pipeline, layout_id) = { + let (pipeline, layout) = { let mut pipeline_guard = hub.compute_pipelines.write(); match pipeline_guard.get(compute_pipeline_id) { - Ok(pipeline) => (pipeline.clone(), pipeline.layout_id), + Ok(pipeline) => (pipeline.clone(), pipeline.layout.clone()), Err(_) => { hub.compute_pipelines .unregister_locked(compute_pipeline_id, &mut *pipeline_guard); @@ -1904,11 +1890,11 @@ impl Global { .suspected_resources .compute_pipelines .insert(compute_pipeline_id, pipeline.clone()); - let layout = hub.pipeline_layouts.get(layout_id.0).unwrap(); + let layout_id = layout.as_info().id().0; life_lock .suspected_resources .pipeline_layouts - .insert(layout_id.0, layout); + .insert(layout_id, layout); } pub fn surface_configure( diff --git a/wgpu-core/src/device/life.rs b/wgpu-core/src/device/life.rs index e4a91937bc..78266a0ece 100644 --- a/wgpu-core/src/device/life.rs +++ b/wgpu-core/src/device/life.rs @@ -567,11 +567,9 @@ impl LifetimeTracker { .insert(v.as_info().id().0, v.clone()); } - let bind_group_layout = - hub.bind_group_layouts.get(res.layout_id.0).unwrap(); self.suspected_resources .bind_group_layouts - .insert(res.layout_id.0, bind_group_layout); + .insert(res.layout.as_info().id().0, res.layout.clone()); let submit_index = res.info.submission_index(); self.active @@ -848,11 +846,10 @@ impl LifetimeTracker { .pipeline_layouts .unregister_locked(id.0, &mut *pipeline_layouts_locked) { - for bgl_id in &lay.bind_group_layout_ids { - let bgl = hub.bind_group_layouts.get(bgl_id.0).unwrap(); + for bgl in &lay.bind_group_layouts { self.suspected_resources .bind_group_layouts - .insert(bgl_id.0, bgl); + .insert(bgl.as_info().id().0, bgl.clone()); } self.free_resources.pipeline_layouts.push(lay); } @@ -869,35 +866,36 @@ impl LifetimeTracker { F: FnMut(&id::BindGroupLayoutId), { let mut bind_group_layouts_locked = hub.bind_group_layouts.write(); - - self.suspected_resources.bind_group_layouts.retain( + + self.suspected_resources.bind_group_layouts.retain( |bind_group_layout_id, bind_group_layout| { - let mut result = true; - let id = bind_group_layout.as_info().id(); + let id = bind_group_layout.info.id(); //Note: this has to happen after all the suspected pipelines are destroyed //Note: nothing else can bump the refcount since the guard is locked exclusively //Note: same BGL can appear multiple times in the list, but only the last // encounter could drop the refcount to 0. - let mut bgl_to_check = Some(id); - while let Some(id) = bgl_to_check.take() { - if bind_group_layouts_locked.is_unique(id.0).unwrap() { - result = false; - // If This layout points to a compatible one, go over the latter - // to decrement the ref count and potentially destroy it. - bgl_to_check = bgl.compatible_layout; - - log::debug!("BindGroupLayout {:?} will be removed from registry", id); - f(bind_group_layout_id); - - if let Some(lay) = hub - .bind_group_layouts - .unregister_locked(id.0, &mut *bind_group_layouts_locked) - { - self.free_resources.bind_group_layouts.push(lay); - } + + //Note: this has to happen after all the suspected pipelines are destroyed + if bind_group_layouts_locked.is_unique(id.0).unwrap() { + // If This layout points to a compatible one, go over the latter + // to decrement the ref count and potentially destroy it. + //bgl_to_check = bind_group_layout.compatible_layout; + + log::debug!( + "BindGroupLayout {:?} will be removed from registry", + bind_group_layout_id + ); + f(bind_group_layout_id); + + if let Some(lay) = hub + .bind_group_layouts + .unregister_locked(*bind_group_layout_id, &mut *bind_group_layouts_locked) + { + self.free_resources.bind_group_layouts.push(lay); } + return false; } - result + true }, ); self diff --git a/wgpu-core/src/device/resource.rs b/wgpu-core/src/device/resource.rs index f921dbdc8d..b01549d888 100644 --- a/wgpu-core/src/device/resource.rs +++ b/wgpu-core/src/device/resource.rs @@ -1,7 +1,11 @@ #[cfg(feature = "trace")] use crate::device::trace; use crate::{ - binding_model::{self, get_bind_group_layout, try_get_bind_group_layout}, command, conv, + binding_model::{ + self, get_bind_group_layout, try_get_bind_group_layout, BindGroupLayout, + BindGroupLayoutEntryError, + }, + command, conv, device::life::{LifetimeTracker, WaitIdleError}, device::queue::PendingWrites, device::{ @@ -1368,44 +1372,46 @@ impl Device { }) } - pub(crate) fn deduplicate_bind_group_layout( + pub(crate) fn deduplicate_bind_group_layout<'a>( self_id: DeviceId, - entry_map: &binding_model::BindEntryMap, - guard: &Storage, id::BindGroupLayoutId>, - ) -> Option { + entry_map: &'a binding_model::BindEntryMap, + guard: &'a Storage, id::BindGroupLayoutId>, + ) -> Option<(id::BindGroupLayoutId, &'a Arc>)> { guard .iter(self_id.backend()) - .find(|&(_, bgl)| bgl.device.info.id().0 == self_id && bgl.compatible_layout.is_none() && bgl.entries == *entry_map) - .map(|(id, _)| id) + .find(|&(_, bgl)| { + bgl.device.info.id().0 == self_id + && bgl.compatible_layout.is_none() + && bgl.entries == *entry_map + }) + .map(|(id, resource)| (id, resource)) } pub(crate) fn get_introspection_bind_group_layouts<'a>( - pipeline_layout: &binding_model::PipelineLayout, - bgl_guard: &'a Storage, id::BindGroupLayoutId>, + pipeline_layout: &'a binding_model::PipelineLayout, ) -> ArrayVec<&'a binding_model::BindEntryMap, { hal::MAX_BIND_GROUPS }> { pipeline_layout - .bind_group_layout_ids + .bind_group_layouts .iter() - .map(|&id| &bgl_guard[id].entries) + .map(|layout| &layout.entries) .collect() } /// Generate information about late-validated buffer bindings for pipelines. //TODO: should this be combined with `get_introspection_bind_group_layouts` in some way? - pub(crate) fn make_late_sized_buffer_groups<'a>( + pub(crate) fn make_late_sized_buffer_groups( shader_binding_sizes: &FastHashMap, layout: &binding_model::PipelineLayout, - bgl_guard: &'a Storage, id::BindGroupLayoutId>, ) -> ArrayVec { // Given the shader-required binding sizes and the pipeline layout, // return the filtered list of them in the layout order, // removing those with given `min_binding_size`. layout - .bind_group_layout_ids + .bind_group_layouts .iter() .enumerate() - .map(|(group_index, &bgl_id)| pipeline::LateSizedBufferGroup { - shader_sizes: bgl_guard[bgl_id] + .map(|(group_index, bgl)| pipeline::LateSizedBufferGroup { + shader_sizes: bgl .entries .values() .filter_map(|entry| match entry.ty { @@ -1432,7 +1438,7 @@ impl Device { self: &Arc, label: Option<&str>, entry_map: binding_model::BindEntryMap, - ) -> Result, binding_model::CreateBindGroupLayoutError> { + ) -> Result, binding_model::CreateBindGroupLayoutError> { #[derive(PartialEq)] enum WritableStorage { Yes, @@ -1485,7 +1491,8 @@ impl Device { } => { return Err(binding_model::CreateBindGroupLayoutError::Entry { binding: entry.binding, - error: binding_model::BindGroupLayoutEntryError::SampleTypeFloatFilterableBindingMultisampled, + error: + BindGroupLayoutEntryError::SampleTypeFloatFilterableBindingMultisampled, }); } Bt::Texture { .. } => ( @@ -1501,7 +1508,7 @@ impl Device { wgt::TextureViewDimension::Cube | wgt::TextureViewDimension::CubeArray => { return Err(binding_model::CreateBindGroupLayoutError::Entry { binding: entry.binding, - error: binding_model::BindGroupLayoutEntryError::StorageTextureCube, + error: BindGroupLayoutEntryError::StorageTextureCube, }) } _ => (), @@ -1515,7 +1522,7 @@ impl Device { { return Err(binding_model::CreateBindGroupLayoutError::Entry { binding: entry.binding, - error: binding_model::BindGroupLayoutEntryError::StorageTextureReadWrite, + error: BindGroupLayoutEntryError::StorageTextureReadWrite, }); } _ => (), @@ -1545,7 +1552,7 @@ impl Device { // Validate the count parameter if entry.count.is_some() { required_features |= array_feature - .ok_or(binding_model::BindGroupLayoutEntryError::ArrayUnsupported) + .ok_or(BindGroupLayoutEntryError::ArrayUnsupported) .map_err(|error| binding_model::CreateBindGroupLayoutError::Entry { binding: entry.binding, error, @@ -1577,13 +1584,13 @@ impl Device { } self.require_features(required_features) - .map_err(binding_model::BindGroupLayoutEntryError::MissingFeatures) + .map_err(BindGroupLayoutEntryError::MissingFeatures) .map_err(|error| binding_model::CreateBindGroupLayoutError::Entry { binding: entry.binding, error, })?; self.require_downlevel_flags(required_downlevel_flags) - .map_err(binding_model::BindGroupLayoutEntryError::MissingDownlevelFlags) + .map_err(BindGroupLayoutEntryError::MissingDownlevelFlags) .map_err(|error| binding_model::CreateBindGroupLayoutError::Entry { binding: entry.binding, error, @@ -1617,9 +1624,13 @@ impl Device { .validate(&self.limits) .map_err(binding_model::CreateBindGroupLayoutError::TooManyBindings)?; - Ok(binding_model::BindGroupLayout { + Ok(BindGroupLayout { raw: Some(raw), device: self.clone(), + dynamic_count: entry_map + .values() + .filter(|b| b.ty.has_dynamic_offset()) + .count(), count_validator, entries: entry_map, info: ResourceInfo::new(label.unwrap_or("")), @@ -1796,8 +1807,7 @@ impl Device { pub(crate) fn create_bind_group( self: &Arc, - layout: &binding_model::BindGroupLayout, - layout_id: id::Valid, + layout: &Arc>, desc: &binding_model::BindGroupDescriptor, hub: &Hub, ) -> Result, binding_model::CreateBindGroupError> { @@ -2031,7 +2041,7 @@ impl Device { Ok(binding_model::BindGroup { raw: Some(raw), device: self.clone(), - layout_id, + layout: layout.clone(), info: ResourceInfo::new(desc.label.borrow_or_default()), used, used_buffer_ranges, @@ -2212,7 +2222,7 @@ impl Device { pub(crate) fn create_pipeline_layout( self: &Arc, desc: &binding_model::PipelineLayoutDescriptor, - bgl_guard: &Storage, id::BindGroupLayoutId>, + bgl_guard: &Storage, id::BindGroupLayoutId>, ) -> Result, binding_model::CreatePipelineLayoutError> { use crate::binding_model::CreatePipelineLayoutError as Error; @@ -2279,7 +2289,7 @@ impl Device { let bgl_vec = desc .bind_group_layouts .iter() - .map(|&id| &try_get_bind_group_layout(bgl_guard, id).unwrap().raw()) + .map(|&id| try_get_bind_group_layout(bgl_guard, id).unwrap().raw()) .collect::>(); let hal_desc = hal::PipelineLayoutDescriptor { label: desc.label.borrow_option(), @@ -2300,14 +2310,12 @@ impl Device { raw: Some(raw), device: self.clone(), info: ResourceInfo::new(desc.label.borrow_or_default()), - bind_group_layout_ids: desc + bind_group_layouts: desc .bind_group_layouts .iter() .map(|&id| { - // manually add a dependency to BGL - let (id, layout) = get_bind_group_layout(bgl_guard, id::Valid(id)); - layout.multi_ref_count.inc(); - id + let (_, layout) = get_bind_group_layout(bgl_guard, id::Valid(id)); + layout.clone() }) .collect(), push_constant_ranges: desc.push_constant_ranges.iter().cloned().collect(), @@ -2320,7 +2328,7 @@ impl Device { self: &Arc, implicit_context: Option, mut derived_group_layouts: ArrayVec, - bgl_guard: &mut Storage, id::BindGroupLayoutId>, + bgl_guard: &mut Storage, id::BindGroupLayoutId>, pipeline_layout_guard: &mut Storage, id::PipelineLayoutId>, ) -> Result { while derived_group_layouts @@ -2342,7 +2350,7 @@ impl Device { for (bgl_id, map) in ids.group_ids.iter_mut().zip(derived_group_layouts) { match Device::deduplicate_bind_group_layout(self.info.id().0, &map, bgl_guard) { - Some(dedup_id) => { + Some((dedup_id, _)) => { *bgl_id = dedup_id; } None => { @@ -2402,7 +2410,6 @@ impl Device { .get(pipeline_layout_id) .as_ref() .map_err(|_| pipeline::CreateComputePipelineError::InvalidLayout)?, - &*bgl_guard, )), None => { for _ in 0..self.limits.max_bind_groups { @@ -2438,7 +2445,7 @@ impl Device { .map_err(|_| pipeline::CreateComputePipelineError::InvalidLayout)?; let late_sized_buffer_groups = - Device::make_late_sized_buffer_groups(&shader_binding_sizes, layout, &*bgl_guard); + Device::make_late_sized_buffer_groups(&shader_binding_sizes, layout); let pipeline_desc = hal::ComputePipelineDescriptor { label: desc.label.borrow_option(), @@ -2469,7 +2476,7 @@ impl Device { let pipeline = pipeline::ComputePipeline { raw: Some(raw), - layout_id: id::Valid(pipeline_layout_id), + layout: layout.clone(), device: self.clone(), late_sized_buffer_groups, info: ResourceInfo::new(desc.label.borrow_or_default()), @@ -2762,7 +2769,6 @@ impl Device { .map_err(|_| pipeline::CreateRenderPipelineError::InvalidLayout)?; Some(Device::get_introspection_bind_group_layouts( pipeline_layout, - &*bgl_guard, )) } None => None, @@ -2810,7 +2816,6 @@ impl Device { .get(pipeline_layout_id) .as_ref() .map_err(|_| pipeline::CreateRenderPipelineError::InvalidLayout)?, - &*bgl_guard, )), None => None, }; @@ -2914,7 +2919,7 @@ impl Device { } let late_sized_buffer_groups = - Device::make_late_sized_buffer_groups(&shader_binding_sizes, layout, &*bgl_guard); + Device::make_late_sized_buffer_groups(&shader_binding_sizes, layout); let pipeline_desc = hal::RenderPipelineDescriptor { label: desc.label.borrow_option(), @@ -2984,7 +2989,7 @@ impl Device { let pipeline = pipeline::RenderPipeline { raw: Some(raw), - layout_id: id::Valid(pipeline_layout_id), + layout: layout.clone(), device: self.clone(), pass_context, flags, diff --git a/wgpu-core/src/instance.rs b/wgpu-core/src/instance.rs index c97d287f43..356e7c9b1d 100644 --- a/wgpu-core/src/instance.rs +++ b/wgpu-core/src/instance.rs @@ -663,12 +663,14 @@ impl Global { fn unconfigure( global: &Global, - surface: &HalSurface, + surface: &Option>, present: &Presentation, ) { - let hub = HalApi::hub(global); - if let Some(device) = present.device.downcast_ref::() { - hub.surface_unconfigure(device, surface); + if let Some(surface) = surface.as_ref() { + let hub = HalApi::hub(global); + if let Some(device) = present.device.downcast_ref::() { + hub.surface_unconfigure(device, surface); + } } } @@ -676,15 +678,15 @@ impl Global { if let Ok(surface) = Arc::try_unwrap(surface.unwrap()) { if let Some(present) = surface.presentation.lock().take() { #[cfg(all(feature = "vulkan", not(target_arch = "wasm32")))] - unconfigure(self, surface.vulkan.as_ref().unwrap(), &present); + unconfigure(self, &surface.vulkan, &present); #[cfg(all(feature = "metal", any(target_os = "macos", target_os = "ios")))] - unconfigure(self, surface.metal.as_ref().unwrap(), &present); + unconfigure(self, &surface.metal, &present); #[cfg(all(feature = "dx12", windows))] - unconfigure(self, surface.dx12.as_ref().unwrap(), &present); + unconfigure(self, &surface.dx12, &present); #[cfg(all(feature = "dx11", windows))] - unconfigure(self, surface.dx11.as_ref().unwrap(), &present); + unconfigure(self, &surface.dx11, &present); #[cfg(feature = "gles")] - unconfigure(self, surface.gl.as_ref().unwrap(), &present); + unconfigure(self, &surface.gl, &present); } self.instance.destroy_surface(surface); diff --git a/wgpu-core/src/pipeline.rs b/wgpu-core/src/pipeline.rs index d351f8d9ab..43dc04efe4 100644 --- a/wgpu-core/src/pipeline.rs +++ b/wgpu-core/src/pipeline.rs @@ -1,11 +1,11 @@ #[cfg(feature = "trace")] use crate::device::trace; use crate::{ - binding_model::{CreateBindGroupLayoutError, CreatePipelineLayoutError}, + binding_model::{CreateBindGroupLayoutError, CreatePipelineLayoutError, PipelineLayout}, command::ColorAttachmentError, device::{Device, DeviceError, MissingDownlevelFlags, MissingFeatures, RenderPassContext}, hal_api::HalApi, - id::{ComputePipelineId, PipelineLayoutId, RenderPipelineId, ShaderModuleId, Valid}, + id::{ComputePipelineId, PipelineLayoutId, RenderPipelineId, ShaderModuleId}, resource::{Resource, ResourceInfo}, validation, Label, }; @@ -244,7 +244,7 @@ pub enum CreateComputePipelineError { #[derive(Debug)] pub struct ComputePipeline { pub(crate) raw: Option, - pub(crate) layout_id: Valid, + pub(crate) layout: Arc>, pub(crate) device: Arc>, pub(crate) late_sized_buffer_groups: ArrayVec, pub(crate) info: ResourceInfo, @@ -471,8 +471,8 @@ impl Default for VertexStep { #[derive(Debug)] pub struct RenderPipeline { pub(crate) raw: Option, - pub(crate) layout_id: Valid, pub(crate) device: Arc>, + pub(crate) layout: Arc>, pub(crate) pass_context: RenderPassContext, pub(crate) flags: PipelineFlags, pub(crate) strip_index_format: Option, diff --git a/wgpu-core/src/resource.rs b/wgpu-core/src/resource.rs index 197cd3bb5d..a9e64e613b 100644 --- a/wgpu-core/src/resource.rs +++ b/wgpu-core/src/resource.rs @@ -127,6 +127,9 @@ pub trait Resource { fn is_unique(self: &Arc) -> bool { self.ref_count() == 1 } + fn is_equal(&self, other: &Self) -> bool { + self.as_info().id().0.unzip() == other.as_info().id().0.unzip() + } } /// The status code provided to the buffer mapping callback. diff --git a/wgpu-core/src/track/stateless.rs b/wgpu-core/src/track/stateless.rs index 87a19faa67..b2053a9df0 100644 --- a/wgpu-core/src/track/stateless.rs +++ b/wgpu-core/src/track/stateless.rs @@ -122,7 +122,7 @@ impl> StatelessTracker { /// /// If the ID is higher than the length of internal vectors, /// the vectors will be extended. A call to set_size is not needed. - pub fn add_single<'a>(&mut self, storage: &'a Storage, id: Id) -> Option<&'a T> { + pub fn add_single<'a>(&mut self, storage: &'a Storage, id: Id) -> Option<&'a Arc> { let resource = storage.get(id).ok()?; let (index32, _epoch, _) = id.unzip(); From b9b1556b830993d7c03ddc1ce77e11a4aedef872 Mon Sep 17 00:00:00 2001 From: gents83 Date: Sat, 26 Aug 2023 17:21:44 +0200 Subject: [PATCH 085/132] Removing valid and adding is_valid --- wgpu-core/src/binding_model.rs | 6 +-- wgpu-core/src/command/bind.rs | 10 ++--- wgpu-core/src/command/bundle.rs | 8 ++-- wgpu-core/src/command/clear.rs | 14 +++---- wgpu-core/src/command/compute.rs | 6 +-- wgpu-core/src/command/memory_init.rs | 8 ++-- wgpu-core/src/command/render.rs | 20 +++++----- wgpu-core/src/command/transfer.rs | 4 +- wgpu-core/src/device/global.rs | 46 +++++++++++------------ wgpu-core/src/device/life.rs | 56 ++++++++++++++-------------- wgpu-core/src/device/queue.rs | 28 +++++++------- wgpu-core/src/device/resource.rs | 30 +++++++-------- wgpu-core/src/id.rs | 17 ++++----- wgpu-core/src/instance.rs | 36 +++++++++--------- wgpu-core/src/pipeline.rs | 2 +- wgpu-core/src/present.rs | 18 ++++----- wgpu-core/src/registry.rs | 4 +- wgpu-core/src/resource.rs | 14 +++---- wgpu-core/src/storage.rs | 6 +-- wgpu-core/src/track/buffer.rs | 24 ++++++------ wgpu-core/src/track/metadata.rs | 2 +- wgpu-core/src/track/stateless.rs | 18 ++++----- wgpu-core/src/track/texture.rs | 24 ++++++------ 23 files changed, 199 insertions(+), 202 deletions(-) diff --git a/wgpu-core/src/binding_model.rs b/wgpu-core/src/binding_model.rs index 64a909291d..13004007c1 100644 --- a/wgpu-core/src/binding_model.rs +++ b/wgpu-core/src/binding_model.rs @@ -4,7 +4,7 @@ use crate::{ hal_api::HalApi, id::{ BindGroupId, BindGroupLayoutId, BufferId, PipelineLayoutId, SamplerId, TextureId, - TextureViewId, Valid, + TextureViewId, }, init_tracker::{BufferInitTrackerAction, TextureInitTrackerAction}, resource::{Resource, ResourceInfo}, @@ -519,8 +519,8 @@ pub(crate) fn try_get_bind_group_layout( pub(crate) fn get_bind_group_layout( layouts: &BindGroupLayouts, - id: Valid, -) -> (Valid, &Arc>) { + id: BindGroupLayoutId, +) -> (BindGroupLayoutId, &Arc>) { let layout = &layouts[id]; layout .compatible_layout diff --git a/wgpu-core/src/command/bind.rs b/wgpu-core/src/command/bind.rs index 862b6efe4e..331f2a56e9 100644 --- a/wgpu-core/src/command/bind.rs +++ b/wgpu-core/src/command/bind.rs @@ -4,7 +4,7 @@ use crate::{ binding_model::{BindGroup, LateMinBufferBindingSizeMismatch, PipelineLayout}, device::SHADER_STAGE_COUNT, hal_api::HalApi, - id::{BindGroupId, Valid}, + id::BindGroupId, pipeline::LateSizedBufferGroup, }; @@ -137,7 +137,7 @@ struct LateBufferBinding { #[derive(Debug, Default)] pub(super) struct EntryPayload { - pub(super) group_id: Option>, + pub(super) group_id: Option, pub(super) dynamic_offsets: Vec, late_buffer_bindings: Vec, /// Since `LateBufferBinding` may contain information about the bindings @@ -221,12 +221,12 @@ impl Binder { pub(super) fn assign_group<'a>( &'a mut self, index: usize, - bind_group_id: Valid, + bind_group_id: BindGroupId, bind_group: &BindGroup, offsets: &[wgt::DynamicOffset], ) -> &'a [EntryPayload] { log::trace!("\tBinding [{}] = group {:?}", index, bind_group_id); - debug_assert_eq!(A::VARIANT, bind_group_id.0.backend()); + debug_assert_eq!(A::VARIANT, bind_group_id.backend()); let payload = &mut self.payloads[index]; payload.group_id = Some(bind_group_id); @@ -257,7 +257,7 @@ impl Binder { &self.payloads[bind_range] } - pub(super) fn list_active(&self) -> impl Iterator> + '_ { + pub(super) fn list_active(&self) -> impl Iterator + '_ { let payloads = &self.payloads; self.manager .list_active() diff --git a/wgpu-core/src/command/bundle.rs b/wgpu-core/src/command/bundle.rs index f7a7a39a72..64e66ea6df 100644 --- a/wgpu-core/src/command/bundle.rs +++ b/wgpu-core/src/command/bundle.rs @@ -668,9 +668,9 @@ impl RenderBundleEncoder { fn check_valid_to_use( &self, - device_id: id::Valid, + device_id: id::DeviceId, ) -> Result<(), RenderBundleErrorInner> { - if device_id.0 != self.parent_id { + if device_id != self.parent_id { return Err(RenderBundleErrorInner::NotValidToUse); } @@ -1220,7 +1220,7 @@ impl State { /// Return the id of the current pipeline, if any. fn pipeline_id(&self) -> Option { - self.pipeline.as_ref().map(|p| p.pipeline.as_info().id().0) + self.pipeline.as_ref().map(|p| p.pipeline.as_info().id()) } /// Return the current pipeline state. Return an error if none is set. @@ -1378,7 +1378,7 @@ impl State { let offsets = &contents.dynamic_offsets; return Some(RenderCommand::SetBindGroup { index: i.try_into().unwrap(), - bind_group_id: contents.bind_group.as_info().id().0, + bind_group_id: contents.bind_group.as_info().id(), num_dynamic_offsets: (offsets.end - offsets.start) as u8, }); } diff --git a/wgpu-core/src/command/clear.rs b/wgpu-core/src/command/clear.rs index f682ada924..693882a6a9 100644 --- a/wgpu-core/src/command/clear.rs +++ b/wgpu-core/src/command/clear.rs @@ -7,7 +7,7 @@ use crate::{ get_lowest_common_denom, global::Global, hal_api::HalApi, - id::{BufferId, CommandEncoderId, DeviceId, TextureId, Valid}, + id::{BufferId, CommandEncoderId, DeviceId, TextureId}, identity::GlobalIdentityHandlerFactory, init_tracker::{MemoryInitKind, TextureInitRange}, resource::{Texture, TextureClearMode}, @@ -224,7 +224,7 @@ impl Global { let texture_guard = hub.textures.read(); clear_texture( &*texture_guard, - Valid(dst), + dst, TextureInitRange { mip_range: subresource_mip_range, layer_range: subresource_layer_range, @@ -239,7 +239,7 @@ impl Global { pub(crate) fn clear_texture( storage: &Storage, TextureId>, - dst_texture_id: Valid, + dst_texture_id: TextureId, range: TextureInitRange, encoder: &mut A::CommandEncoder, texture_tracker: &mut TextureTracker, @@ -253,7 +253,7 @@ pub(crate) fn clear_texture( .as_ref() .unwrap() .as_raw() - .ok_or(ClearError::InvalidTexture(dst_texture_id.0))?; + .ok_or(ClearError::InvalidTexture(dst_texture_id))?; // Issue the right barrier. let clear_usage = match *dst_texture.clear_mode.read() { @@ -265,7 +265,7 @@ pub(crate) fn clear_texture( hal::TextureUses::COLOR_TARGET } TextureClearMode::None => { - return Err(ClearError::NoValidTextureClearMode(dst_texture_id.0)); + return Err(ClearError::NoValidTextureClearMode(dst_texture_id)); } }; @@ -288,7 +288,7 @@ pub(crate) fn clear_texture( // clear_texture api in order to remove this check and call the cheaper // change_replace_tracked whenever possible. let dst_barrier = texture_tracker - .set_single(dst_texture, dst_texture_id.0, selector, clear_usage) + .set_single(dst_texture, dst_texture_id, selector, clear_usage) .unwrap() .map(|pending| pending.into_hal(dst_texture)); unsafe { @@ -312,7 +312,7 @@ pub(crate) fn clear_texture( clear_texture_via_render_passes(dst_texture, range, is_color, encoder)? } TextureClearMode::None => { - return Err(ClearError::NoValidTextureClearMode(dst_texture_id.0)); + return Err(ClearError::NoValidTextureClearMode(dst_texture_id)); } } Ok(()) diff --git a/wgpu-core/src/command/compute.rs b/wgpu-core/src/command/compute.rs index 0dfbb29fb8..96fb09daf7 100644 --- a/wgpu-core/src/command/compute.rs +++ b/wgpu-core/src/command/compute.rs @@ -308,7 +308,7 @@ impl State { bind_group_guard: &Storage, id::BindGroupId>, buffer_guard: &Storage, id::BufferId>, texture_guard: &Storage, id::TextureId>, - indirect_buffer: Option>, + indirect_buffer: Option, ) -> Result<(), UsageConflict> { for id in self.binder.list_active() { unsafe { @@ -531,7 +531,7 @@ impl Global { let pipeline_layout = pipeline_layout.as_ref().unwrap().clone(); let entries = state.binder.assign_group( index as usize, - id::Valid(bind_group_id), + bind_group_id, bind_group, &temp_offsets, ); @@ -744,7 +744,7 @@ impl Global { &*bind_group_guard, &*buffer_guard, &*texture_guard, - Some(id::Valid(buffer_id)), + Some(buffer_id), ) .map_pass_err(scope)?; unsafe { diff --git a/wgpu-core/src/command/memory_init.rs b/wgpu-core/src/command/memory_init.rs index 88d2da14b4..dd70628479 100644 --- a/wgpu-core/src/command/memory_init.rs +++ b/wgpu-core/src/command/memory_init.rs @@ -111,13 +111,13 @@ impl CommandBufferTextureMemoryActions { // implicit init, not requiring any immediate resource init. pub(crate) fn register_implicit_init( &mut self, - id: id::Valid, + id: TextureId, range: TextureInitRange, texture_guard: &Storage, TextureId>, ) { let must_be_empty = self.register_init_action( &TextureInitTrackerAction { - id: id.0, + id, range, kind: MemoryInitKind::ImplicitlyInitialized, }, @@ -144,7 +144,7 @@ pub(crate) fn fixup_discarded_surfaces< for init in inits { clear_texture( texture_guard, - id::Valid(init.texture), + init.texture, TextureInitRange { mip_range: init.mip_level..(init.mip_level + 1), layer_range: init.layer..(init.layer + 1), @@ -308,7 +308,7 @@ impl BakedCommands { for range in ranges.drain(..) { clear_texture( texture_guard, - id::Valid(texture_use.id), + texture_use.id, range, &mut self.encoder, &mut device_tracker.textures, diff --git a/wgpu-core/src/command/render.rs b/wgpu-core/src/command/render.rs index 6aad63d3f3..9c30983f11 100644 --- a/wgpu-core/src/command/render.rs +++ b/wgpu-core/src/command/render.rs @@ -316,7 +316,7 @@ impl OptionalState { #[derive(Debug, Default)] struct IndexState { - bound_buffer_view: Option<(id::Valid, Range)>, + bound_buffer_view: Option<(id::BufferId, Range)>, format: Option, pipeline_format: Option, limit: u32, @@ -684,7 +684,7 @@ where } struct RenderAttachment<'a> { - texture_id: &'a id::Valid, + texture_id: &'a id::TextureId, selector: &'a TextureSelector, usage: hal::TextureUses, } @@ -728,7 +728,7 @@ impl<'a, A: HalApi> RenderPassInfo<'a, A> { if channel.load_op == LoadOp::Load { pending_discard_init_fixups.extend(texture_memory_actions.register_init_action( &TextureInitTrackerAction { - id: view.parent_id.0, + id: view.parent_id, range: TextureInitRange::from(view.selector.clone()), // Note that this is needed even if the target is discarded, kind: MemoryInitKind::NeedsInitializedMemory, @@ -748,7 +748,7 @@ impl<'a, A: HalApi> RenderPassInfo<'a, A> { // discard right away be alright since the texture can't be used // during the pass anyways texture_memory_actions.discard(TextureSurfaceDiscard { - texture: view.parent_id.0, + texture: view.parent_id, mip_level: view.selector.mips.start, layer: view.selector.layers.start, }); @@ -920,7 +920,7 @@ impl<'a, A: HalApi> RenderPassInfo<'a, A> { pending_discard_init_fixups.extend( texture_memory_actions.register_init_action( &TextureInitTrackerAction { - id: view.parent_id.0, + id: view.parent_id, range: TextureInitRange::from(view.selector.clone()), kind: MemoryInitKind::NeedsInitializedMemory, }, @@ -956,7 +956,7 @@ impl<'a, A: HalApi> RenderPassInfo<'a, A> { } else if at.depth.store_op == StoreOp::Discard { // Both are discarded using the regular path. discarded_surfaces.push(TextureSurfaceDiscard { - texture: view.parent_id.0, + texture: view.parent_id, mip_level: view.selector.mips.start, layer: view.selector.layers.start, }); @@ -1206,7 +1206,7 @@ impl<'a, A: HalApi> RenderPassInfo<'a, A> { } for ra in self.render_attachments { - if !texture_guard.contains(ra.texture_id.0) { + if !texture_guard.contains(*ra.texture_id) { return Err(RenderPassErrorInner::SurfaceTextureDropped); } let texture = &texture_guard[*ra.texture_id]; @@ -1461,7 +1461,7 @@ impl Global { let pipeline_layout = state.binder.pipeline_layout.clone(); let entries = state.binder.assign_group( index as usize, - id::Valid(bind_group_id), + bind_group_id, bind_group, &temp_offsets, ); @@ -1620,7 +1620,7 @@ impl Global { Some(s) => offset + s.get(), None => buffer.size, }; - state.index.bound_buffer_view = Some((id::Valid(buffer_id), offset..end)); + state.index.bound_buffer_view = Some((buffer_id, offset..end)); state.index.format = Some(index_format); state.index.update_limit(); @@ -2304,7 +2304,7 @@ impl Global { .reset_queries( transit, &query_set_guard, - cmd_buf.device.info.id().0.backend(), + cmd_buf.device.info.id().backend(), ) .map_err(RenderCommandError::InvalidQuerySet) .map_pass_err(PassErrorScope::QueryReset)?; diff --git a/wgpu-core/src/command/transfer.rs b/wgpu-core/src/command/transfer.rs index 0227bc712b..2a4a790783 100644 --- a/wgpu-core/src/command/transfer.rs +++ b/wgpu-core/src/command/transfer.rs @@ -7,7 +7,7 @@ use crate::{ error::{ErrorFormatter, PrettyError}, global::Global, hal_api::HalApi, - id::{BufferId, CommandEncoderId, TextureId, Valid}, + id::{BufferId, CommandEncoderId, TextureId}, identity::GlobalIdentityHandlerFactory, init_tracker::{ has_copy_partial_init_tracker_coverage, MemoryInitKind, TextureInitRange, @@ -466,7 +466,7 @@ fn handle_texture_init( for init in immediate_inits { clear_texture( texture_guard, - Valid(init.texture), + init.texture, TextureInitRange { mip_range: init.mip_level..(init.mip_level + 1), layer_range: init.layer..(init.layer + 1), diff --git a/wgpu-core/src/device/global.rs b/wgpu-core/src/device/global.rs index 02b4327423..7ef51165d3 100644 --- a/wgpu-core/src/device/global.rs +++ b/wgpu-core/src/device/global.rs @@ -249,7 +249,7 @@ impl Global { .buffers .insert_single(id, resource, buffer_use); - return (id.0, None); + return (id, None); }; let id = fid.assign_error(desc.label.borrow_or_default()); @@ -608,7 +608,7 @@ impl Global { array_layer_count: Some(1), }, }; - let view = device.create_texture_view(&resource, id.0, &desc).unwrap(); + let view = device.create_texture_view(&resource, id, &desc).unwrap(); clear_views.push(Arc::new(view)); } } @@ -620,12 +620,12 @@ impl Global { } device.trackers.lock().textures.insert_single( - id.0, + id, resource, hal::TextureUses::UNINITIALIZED, ); - return (id.0, None); + return (id, None); }; let id = fid.assign_error(desc.label.borrow_or_default()); @@ -690,12 +690,12 @@ impl Global { log::info!("Created Texture {:?} with {:?}", id, desc); device.trackers.lock().textures.insert_single( - id.0, + id, resource, hal::TextureUses::UNINITIALIZED, ); - return (id.0, None); + return (id, None); }; let id = fid.assign_error(desc.label.borrow_or_default()); @@ -744,7 +744,7 @@ impl Global { .buffers .insert_single(id, buffer, hal::BufferUses::empty()); - return (id.0, None); + return (id, None); }; let id = fid.assign_error(desc.label.borrow_or_default()); @@ -904,7 +904,7 @@ impl Global { let (id, resource) = fid.assign(view); log::info!("Created TextureView {:?}", id); device.trackers.lock().views.insert_single(id, resource); - return (id.0, None); + return (id, None); }; let id = fid.assign_error(desc.label.borrow_or_default()); @@ -989,7 +989,7 @@ impl Global { log::info!("Created Sampler {:?}", id); device.trackers.lock().samplers.insert_single(id, resource); - return (id.0, None); + return (id, None); }; let id = fid.assign_error(desc.label.borrow_or_default()); @@ -1096,7 +1096,7 @@ impl Global { let (id, _) = fid.assign(layout); log::info!("Created BindGroupLayout {:?}", id); - return (id.0, None); + return (id, None); }; let id = fid.assign_error(desc.label.borrow_or_default()); @@ -1170,7 +1170,7 @@ impl Global { let (id, _) = fid.assign(layout); log::info!("Created PipelineLayout {:?}", id); - return (id.0, None); + return (id, None); }; let id = fid.assign_error(desc.label.borrow_or_default()); @@ -1254,7 +1254,7 @@ impl Global { .lock() .bind_groups .insert_single(id, resource); - return (id.0, None); + return (id, None); }; let id = fid.assign_error(desc.label.borrow_or_default()); @@ -1341,7 +1341,7 @@ impl Global { }; let (id, _) = fid.assign(shader); log::info!("Created ShaderModule {:?} with {:?}", id, desc); - return (id.0, None); + return (id, None); }; let id = fid.assign_error(desc.label.borrow_or_default()); @@ -1392,7 +1392,7 @@ impl Global { }; let (id, _) = fid.assign(shader); log::info!("Created ShaderModule {:?} with {:?}", id, desc); - return (id.0, None); + return (id, None); }; let id = fid.assign_error(desc.label.borrow_or_default()); @@ -1451,7 +1451,7 @@ impl Global { let (id, _) = fid.assign(command_buffer); log::info!("Created CommandBuffer {:?} with {:?}", id, desc); - return (id.0, None); + return (id, None); }; let id = fid.assign_error(desc.label.borrow_or_default()); @@ -1541,7 +1541,7 @@ impl Global { let (id, resource) = fid.assign(render_bundle); log::info!("Created RenderBundle {:?}", id); device.trackers.lock().bundles.insert_single(id, resource); - return (id.0, None); + return (id, None); }; let id = fid.assign_error(desc.label.borrow_or_default()); @@ -1614,7 +1614,7 @@ impl Global { .query_sets .insert_single(id, resource); - return (id.0, None); + return (id, None); }; let id = fid.assign_error(""); @@ -1697,7 +1697,7 @@ impl Global { .render_pipelines .insert_single(id, resource); - return (id.0, None); + return (id, None); }; let id = fid.assign_error(desc.label.borrow_or_default()); @@ -1727,7 +1727,7 @@ impl Global { None => break binding_model::GetBindGroupLayoutError::InvalidGroupIndex(index), }; - return (id.0, None); + return (id, None); }; let id = hub @@ -1760,7 +1760,7 @@ impl Global { } } }; - let layout_id = layout.as_info().id().0; + let layout_id = layout.as_info().id(); let device = &pipeline.device; let mut life_lock = device.lock_life(); life_lock @@ -1818,7 +1818,7 @@ impl Global { .lock() .compute_pipelines .insert_single(id, resource); - return (id.0, None); + return (id, None); }; let id = fid.assign_error(desc.label.borrow_or_default()); @@ -1851,7 +1851,7 @@ impl Global { None => break binding_model::GetBindGroupLayoutError::InvalidGroupIndex(index), }; - return (layout.as_info().id().0, None); + return (layout.as_info().id(), None); }; let id = hub @@ -1890,7 +1890,7 @@ impl Global { .suspected_resources .compute_pipelines .insert(compute_pipeline_id, pipeline.clone()); - let layout_id = layout.as_info().id().0; + let layout_id = layout.as_info().id(); life_lock .suspected_resources .pipeline_layouts diff --git a/wgpu-core/src/device/life.rs b/wgpu-core/src/device/life.rs index 78266a0ece..0cfdc4d533 100644 --- a/wgpu-core/src/device/life.rs +++ b/wgpu-core/src/device/life.rs @@ -363,12 +363,12 @@ impl LifetimeTracker { for v in self.future_suspected_buffers.drain(..).take(1) { self.suspected_resources .buffers - .insert(v.as_info().id().0, v); + .insert(v.as_info().id(), v); } for v in self.future_suspected_textures.drain(..).take(1) { self.suspected_resources .textures - .insert(v.as_info().id().0, v); + .insert(v.as_info().id(), v); } } @@ -490,31 +490,31 @@ impl LifetimeTracker { log::info!("Bundle {:?} is removed from registry", id); f(bundle_id); - if let Some(res) = hub.render_bundles.unregister(id.0) { + if let Some(res) = hub.render_bundles.unregister(id) { for v in res.used.buffers.used_resources() { self.suspected_resources .buffers - .insert(v.as_info().id().0, v.clone()); + .insert(v.as_info().id(), v.clone()); } for v in res.used.textures.used_resources() { self.suspected_resources .textures - .insert(v.as_info().id().0, v.clone()); + .insert(v.as_info().id(), v.clone()); } for v in res.used.bind_groups.used_resources() { self.suspected_resources .bind_groups - .insert(v.as_info().id().0, v.clone()); + .insert(v.as_info().id(), v.clone()); } for v in res.used.render_pipelines.used_resources() { self.suspected_resources .render_pipelines - .insert(v.as_info().id().0, v.clone()); + .insert(v.as_info().id(), v.clone()); } for v in res.used.query_sets.used_resources() { self.suspected_resources .query_sets - .insert(v.as_info().id().0, v.clone()); + .insert(v.as_info().id(), v.clone()); } } } @@ -545,31 +545,31 @@ impl LifetimeTracker { log::info!("BindGroup {:?} is removed from registry", id); f(bind_group_id); - if let Some(res) = hub.bind_groups.unregister(id.0) { + if let Some(res) = hub.bind_groups.unregister(id) { for v in res.used.buffers.used_resources() { self.suspected_resources .buffers - .insert(v.as_info().id().0, v.clone()); + .insert(v.as_info().id(), v.clone()); } for v in res.used.textures.used_resources() { self.suspected_resources .textures - .insert(v.as_info().id().0, v.clone()); + .insert(v.as_info().id(), v.clone()); } for v in res.used.views.used_resources() { self.suspected_resources .texture_views - .insert(v.as_info().id().0, v.clone()); + .insert(v.as_info().id(), v.clone()); } for v in res.used.samplers.used_resources() { self.suspected_resources .samplers - .insert(v.as_info().id().0, v.clone()); + .insert(v.as_info().id(), v.clone()); } self.suspected_resources .bind_group_layouts - .insert(res.layout.as_info().id().0, res.layout.clone()); + .insert(res.layout.as_info().id(), res.layout.clone()); let submit_index = res.info.submission_index(); self.active @@ -607,11 +607,11 @@ impl LifetimeTracker { log::info!("TextureView {:?} is removed from registry", id); f(view_id); - if let Some(res) = hub.texture_views.unregister(id.0) { + if let Some(res) = hub.texture_views.unregister(id) { if let Some(parent_texture) = res.parent.as_ref() { self.suspected_resources .textures - .insert(parent_texture.as_info().id().0, parent_texture.clone()); + .insert(parent_texture.as_info().id(), parent_texture.clone()); } let submit_index = res.info.submission_index(); self.active @@ -649,7 +649,7 @@ impl LifetimeTracker { log::info!("Texture {:?} is removed from registry", id); f(texture_id); - if let Some(res) = hub.textures.unregister(id.0) { + if let Some(res) = hub.textures.unregister(id) { let submit_index = res.info.submission_index(); let non_referenced_resources = self .active @@ -695,7 +695,7 @@ impl LifetimeTracker { log::info!("Sampler {:?} is removed from registry", id); f(sampler_id); - if let Some(res) = hub.samplers.unregister(id.0) { + if let Some(res) = hub.samplers.unregister(id) { let submit_index = res.info.submission_index(); self.active .iter_mut() @@ -732,7 +732,7 @@ impl LifetimeTracker { log::info!("Buffer {:?} is removed from registry", id); f(buffer_id); - if let Some(res) = hub.buffers.unregister(id.0) { + if let Some(res) = hub.buffers.unregister(id) { let submit_index = res.info.submission_index(); if let resource::BufferMapState::Init { ref stage_buffer, .. @@ -774,7 +774,7 @@ impl LifetimeTracker { log::info!("ComputePipeline {:?} is removed from registry", id); f(compute_pipeline_id); - if let Some(res) = hub.compute_pipelines.unregister(id.0) { + if let Some(res) = hub.compute_pipelines.unregister(id) { let submit_index = res.info.submission_index(); self.active .iter_mut() @@ -812,7 +812,7 @@ impl LifetimeTracker { log::info!("RenderPipeline {:?} is removed from registry", id); f(render_pipeline_id); - if let Some(res) = hub.render_pipelines.unregister(id.0) { + if let Some(res) = hub.render_pipelines.unregister(id) { let submit_index = res.info.submission_index(); self.active .iter_mut() @@ -838,18 +838,18 @@ impl LifetimeTracker { .retain(|pipeline_layout_id, pipeline_layout| { let id = pipeline_layout.info.id(); //Note: this has to happen after all the suspected pipelines are destroyed - if pipeline_layouts_locked.is_unique(id.0).unwrap() { + if pipeline_layouts_locked.is_unique(id).unwrap() { log::debug!("PipelineLayout {:?} will be removed from registry", id); f(pipeline_layout_id); if let Some(lay) = hub .pipeline_layouts - .unregister_locked(id.0, &mut *pipeline_layouts_locked) + .unregister_locked(id, &mut *pipeline_layouts_locked) { for bgl in &lay.bind_group_layouts { self.suspected_resources .bind_group_layouts - .insert(bgl.as_info().id().0, bgl.clone()); + .insert(bgl.as_info().id(), bgl.clone()); } self.free_resources.pipeline_layouts.push(lay); } @@ -876,7 +876,7 @@ impl LifetimeTracker { // encounter could drop the refcount to 0. //Note: this has to happen after all the suspected pipelines are destroyed - if bind_group_layouts_locked.is_unique(id.0).unwrap() { + if bind_group_layouts_locked.is_unique(id).unwrap() { // If This layout points to a compatible one, go over the latter // to decrement the ref count and potentially destroy it. //bgl_to_check = bind_group_layout.compatible_layout; @@ -920,8 +920,8 @@ impl LifetimeTracker { if is_removed { log::info!("QuerySet {:?} is removed from registry", id); // #[cfg(feature = "trace")] - // trace.map(|t| t.add(trace::Action::DestroyComputePipeline(id.0))); - if let Some(res) = hub.query_sets.unregister(id.0) { + // trace.map(|t| t.add(trace::Action::DestroyComputePipeline(id))); + if let Some(res) = hub.query_sets.unregister(id) { let submit_index = res.info.submission_index(); self.active .iter_mut() @@ -1099,7 +1099,7 @@ impl LifetimeTracker { if is_removed { *buffer.map_state.lock() = resource::BufferMapState::Idle; log::info!("Buffer {:?} is removed from registry", buffer_id); - if let Some(buf) = hub.buffers.unregister(buffer_id.0) { + if let Some(buf) = hub.buffers.unregister(buffer_id) { self.free_resources.buffers.push(buf); } } else { diff --git a/wgpu-core/src/device/queue.rs b/wgpu-core/src/device/queue.rs index b150511fec..59487cb57c 100644 --- a/wgpu-core/src/device/queue.rs +++ b/wgpu-core/src/device/queue.rs @@ -475,7 +475,7 @@ impl Global { let (id, _) = fid.assign(staging_buffer); log::info!("Created StagingBuffer {:?}", id); - Ok((id.0, staging_buffer_ptr)) + Ok((id, staging_buffer_ptr)) } pub fn queue_write_staging_buffer( @@ -777,7 +777,7 @@ impl Global { let texture_guard = hub.textures.read(); crate::command::clear_texture( &*texture_guard, - id::Valid(destination.texture), + destination.texture, TextureInitRange { mip_range: destination.mip_level..(destination.mip_level + 1), layer_range, @@ -1200,7 +1200,7 @@ impl Global { let raw_buf = match buffer.raw { Some(ref raw) => raw, None => { - return Err(QueueSubmitError::DestroyedBuffer(id.0)); + return Err(QueueSubmitError::DestroyedBuffer(id)); } }; buffer.info.use_at(submit_index); @@ -1215,11 +1215,11 @@ impl Global { .temp_suspected .lock() .buffers - .insert(id.0, buffer.clone()); + .insert(id, buffer.clone()); } else { match *buffer.map_state.lock() { BufferMapState::Idle => (), - _ => return Err(QueueSubmitError::BufferStillMapped(id.0)), + _ => return Err(QueueSubmitError::BufferStillMapped(id)), } } } @@ -1227,7 +1227,7 @@ impl Global { let id = texture.info.id(); let should_extend = match *texture.inner.as_ref().unwrap() { TextureInner::Native { raw: None } => { - return Err(QueueSubmitError::DestroyedTexture(id.0)); + return Err(QueueSubmitError::DestroyedTexture(id)); } TextureInner::Native { raw: Some(_) } => false, TextureInner::Surface { ref has_work, .. } => { @@ -1241,7 +1241,7 @@ impl Global { .temp_suspected .lock() .textures - .insert(id.0, texture.clone()); + .insert(id, texture.clone()); } if should_extend { unsafe { @@ -1261,7 +1261,7 @@ impl Global { texture_view.info.use_at(submit_index); if texture_view.is_unique() { device.temp_suspected.lock().texture_views.insert( - texture_view.as_info().id().0, + texture_view.as_info().id(), texture_view.clone(), ); } @@ -1286,7 +1286,7 @@ impl Global { .temp_suspected .lock() .bind_groups - .insert(bg.as_info().id().0, bg.clone()); + .insert(bg.as_info().id(), bg.clone()); } } } @@ -1297,7 +1297,7 @@ impl Global { compute_pipeline.info.use_at(submit_index); if compute_pipeline.is_unique() { device.temp_suspected.lock().compute_pipelines.insert( - compute_pipeline.as_info().id().0, + compute_pipeline.as_info().id(), compute_pipeline.clone(), ); } @@ -1308,7 +1308,7 @@ impl Global { render_pipeline.info.use_at(submit_index); if render_pipeline.is_unique() { device.temp_suspected.lock().render_pipelines.insert( - render_pipeline.as_info().id().0, + render_pipeline.as_info().id(), render_pipeline.clone(), ); } @@ -1320,7 +1320,7 @@ impl Global { .temp_suspected .lock() .query_sets - .insert(query_set.as_info().id().0, query_set.clone()); + .insert(query_set.as_info().id(), query_set.clone()); } } for bundle in cmd_buf_trackers.bundles.used_resources() { @@ -1340,7 +1340,7 @@ impl Global { .temp_suspected .lock() .render_bundles - .insert(bundle.as_info().id().0, bundle.clone()); + .insert(bundle.as_info().id(), bundle.clone()); } } } @@ -1431,7 +1431,7 @@ impl Global { used_surface_textures .merge_single( &*texture_guard, - id::Valid(id), + id, None, hal::TextureUses::PRESENT, ) diff --git a/wgpu-core/src/device/resource.rs b/wgpu-core/src/device/resource.rs index b01549d888..9666ba4774 100644 --- a/wgpu-core/src/device/resource.rs +++ b/wgpu-core/src/device/resource.rs @@ -365,56 +365,56 @@ impl Device { if resource.is_unique() { temp_suspected .buffers - .insert(resource.as_info().id().0, resource.clone()); + .insert(resource.as_info().id(), resource.clone()); } } for resource in trackers.textures.used_resources() { if resource.is_unique() { temp_suspected .textures - .insert(resource.as_info().id().0, resource.clone()); + .insert(resource.as_info().id(), resource.clone()); } } for resource in trackers.views.used_resources() { if resource.is_unique() { temp_suspected .texture_views - .insert(resource.as_info().id().0, resource.clone()); + .insert(resource.as_info().id(), resource.clone()); } } for resource in trackers.bind_groups.used_resources() { if resource.is_unique() { temp_suspected .bind_groups - .insert(resource.as_info().id().0, resource.clone()); + .insert(resource.as_info().id(), resource.clone()); } } for resource in trackers.samplers.used_resources() { if resource.is_unique() { temp_suspected .samplers - .insert(resource.as_info().id().0, resource.clone()); + .insert(resource.as_info().id(), resource.clone()); } } for resource in trackers.compute_pipelines.used_resources() { if resource.is_unique() { temp_suspected .compute_pipelines - .insert(resource.as_info().id().0, resource.clone()); + .insert(resource.as_info().id(), resource.clone()); } } for resource in trackers.render_pipelines.used_resources() { if resource.is_unique() { temp_suspected .render_pipelines - .insert(resource.as_info().id().0, resource.clone()); + .insert(resource.as_info().id(), resource.clone()); } } for resource in trackers.query_sets.used_resources() { if resource.is_unique() { temp_suspected .query_sets - .insert(resource.as_info().id().0, resource.clone()); + .insert(resource.as_info().id(), resource.clone()); } } } @@ -1042,7 +1042,7 @@ impl Device { Ok(TextureView { raw: Some(raw), parent: None, - parent_id: id::Valid(texture_id), + parent_id: texture_id, device: self.clone(), desc: resource::HalTextureViewDescriptor { format: resolved_format, @@ -1380,7 +1380,7 @@ impl Device { guard .iter(self_id.backend()) .find(|&(_, bgl)| { - bgl.device.info.id().0 == self_id + bgl.device.info.id() == self_id && bgl.compatible_layout.is_none() && bgl.entries == *entry_map }) @@ -1781,17 +1781,17 @@ impl Device { .textures .add_single( texture_guard, - view.parent_id.0, + view.parent_id, Some(view.selector.clone()), internal_use, ) .ok_or(binding_model::CreateBindGroupError::InvalidTexture( - view.parent_id.0, + view.parent_id, ))?; check_texture_usage(texture.desc.usage, pub_usage)?; used_texture_ranges.push(TextureInitTrackerAction { - id: view.parent_id.0, + id: view.parent_id, range: TextureInitRange { mip_range: view.desc.range.mip_range(texture.desc.mip_level_count), layer_range: view @@ -2314,7 +2314,7 @@ impl Device { .bind_group_layouts .iter() .map(|&id| { - let (_, layout) = get_bind_group_layout(bgl_guard, id::Valid(id)); + let (_, layout) = get_bind_group_layout(bgl_guard, id); layout.clone() }) .collect(), @@ -2349,7 +2349,7 @@ impl Device { } for (bgl_id, map) in ids.group_ids.iter_mut().zip(derived_group_layouts) { - match Device::deduplicate_bind_group_layout(self.info.id().0, &map, bgl_guard) { + match Device::deduplicate_bind_group_layout(self.info.id(), &map, bgl_guard) { Some((dedup_id, _)) => { *bgl_id = dedup_id; } diff --git a/wgpu-core/src/id.rs b/wgpu-core/src/id.rs index 07beb89499..5d33309096 100644 --- a/wgpu-core/src/id.rs +++ b/wgpu-core/src/id.rs @@ -101,8 +101,13 @@ impl Id { } #[allow(dead_code)] - pub(crate) fn dummy(index: u32) -> Valid { - Valid(Id::zip(index, 1, Backend::Empty)) + pub(crate) fn dummy(index: u32) -> Self { + Id::zip(index, 1, Backend::Empty) + } + + #[allow(dead_code)] + pub(crate) fn is_valid(&self) -> bool { + self.backend() != Backend::Empty } pub fn backend(self) -> Backend { @@ -158,14 +163,6 @@ impl Ord for Id { } } -/// An internal ID that has been checked to point to -/// a valid object in the storages. -#[repr(transparent)] -#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq, PartialOrd, Ord)] -#[cfg_attr(feature = "trace", derive(serde::Serialize))] -#[cfg_attr(feature = "replay", derive(serde::Deserialize))] -pub(crate) struct Valid(pub I); - /// Trait carrying methods for direct `Id` access. /// /// Most `wgpu-core` clients should not use this trait. Unusual clients that diff --git a/wgpu-core/src/instance.rs b/wgpu-core/src/instance.rs index 356e7c9b1d..d1fd9686f5 100644 --- a/wgpu-core/src/instance.rs +++ b/wgpu-core/src/instance.rs @@ -506,7 +506,7 @@ impl Global { }; let (id, _) = self.surfaces.prepare(id_in).assign(surface); - id.0 + id } /// # Safety @@ -537,7 +537,7 @@ impl Global { }; let (id, _) = self.surfaces.prepare(id_in).assign(surface); - id.0 + id } #[cfg(all( @@ -568,7 +568,7 @@ impl Global { }; let (id, _) = self.surfaces.prepare(id_in).assign(surface); - Ok(id.0) + Ok(id) } #[cfg(all( @@ -599,7 +599,7 @@ impl Global { }; let (id, _) = self.surfaces.prepare(id_in).assign(surface); - Ok(id.0) + Ok(id) } #[cfg(all(feature = "dx12", windows))] @@ -627,7 +627,7 @@ impl Global { }; let (id, _) = self.surfaces.prepare(id_in).assign(surface); - id.0 + id } #[cfg(all(feature = "dx12", windows))] @@ -655,7 +655,7 @@ impl Global { }; let (id, _) = self.surfaces.prepare(id_in).assign(surface); - id.0 + id } pub fn surface_drop(&self, id: SurfaceId) { @@ -719,7 +719,7 @@ impl Global { let adapter = Adapter::new(raw); log::info!("Adapter {:?} {:?}", A::VARIANT, adapter.raw.info); let (id, _) = hub.adapters.prepare(id_backend.clone()).assign(adapter); - list.push(id.0); + list.push(id); } } @@ -770,7 +770,7 @@ impl Global { .adapters .prepare(new_id.unwrap()) .assign(adapter); - Some(id.0) + Some(id) } } } @@ -964,15 +964,15 @@ impl Global { let id = match A::VARIANT { #[cfg(all(feature = "vulkan", not(target_arch = "wasm32")))] - Backend::Vulkan => fid.assign(Adapter::new(hal_adapter)).0 .0, + Backend::Vulkan => fid.assign(Adapter::new(hal_adapter)) .0, #[cfg(all(feature = "metal", any(target_os = "macos", target_os = "ios")))] Backend::Metal => fid.assign(Adapter::new(hal_adapter)).0 .0, #[cfg(all(feature = "dx12", windows))] - Backend::Dx12 => fid.assign(Adapter::new(hal_adapter)).0 .0, + Backend::Dx12 => fid.assign(Adapter::new(hal_adapter)) .0, #[cfg(all(feature = "dx11", windows))] - Backend::Dx11 => fid.assign(Adapter::new(hal_adapter)).0 .0, + Backend::Dx11 => fid.assign(Adapter::new(hal_adapter)) .0, #[cfg(feature = "gles")] - Backend::Gl => fid.assign(Adapter::new(hal_adapter)).0 .0, + Backend::Gl => fid.assign(Adapter::new(hal_adapter)) .0, _ => unreachable!(), }; log::info!("Created Adapter {:?}", id); @@ -1095,15 +1095,15 @@ impl Global { let (device_id, _) = device_fid.assign(device); log::info!("Created Device {:?}", device_id); - let device = hub.devices.get(device_id.0).unwrap(); + let device = hub.devices.get(device_id).unwrap(); queue.device = Some(device.clone()); let (queue_id, _) = queue_fid.assign(queue); log::info!("Created Queue {:?}", queue_id); - device.queue_id.write().replace(queue_id.0); + device.queue_id.write().replace(queue_id); - return (device_id.0, queue_id.0, None); + return (device_id, queue_id, None); }; let device_id = device_fid.assign_error(desc.label.borrow_or_default()); @@ -1143,15 +1143,15 @@ impl Global { let (device_id, _) = devices_fid.assign(device); log::info!("Created Device {:?}", device_id); - let device = hub.devices.get(device_id.0).unwrap(); + let device = hub.devices.get(device_id).unwrap(); queue.device = Some(device.clone()); let (queue_id, _) = queues_fid.assign(queue); log::info!("Created Queue {:?}", queue_id); - device.queue_id.write().replace(queue_id.0); + device.queue_id.write().replace(queue_id); - return (device_id.0, queue_id.0, None); + return (device_id, queue_id, None); }; let device_id = devices_fid.assign_error(desc.label.borrow_or_default()); diff --git a/wgpu-core/src/pipeline.rs b/wgpu-core/src/pipeline.rs index 43dc04efe4..2ec01dc287 100644 --- a/wgpu-core/src/pipeline.rs +++ b/wgpu-core/src/pipeline.rs @@ -58,7 +58,7 @@ impl Drop for ShaderModule { if let Some(raw) = self.raw.take() { #[cfg(feature = "trace")] if let Some(ref mut trace) = *self.device.trace.lock() { - trace.add(trace::Action::DestroyShaderModule(self.info.id().0)); + trace.add(trace::Action::DestroyShaderModule(self.info.id())); } unsafe { use hal::Device; diff --git a/wgpu-core/src/present.rs b/wgpu-core/src/present.rs index d8b6fc356d..b217bd05a3 100644 --- a/wgpu-core/src/present.rs +++ b/wgpu-core/src/present.rs @@ -25,7 +25,7 @@ use crate::{ device::{DeviceError, MissingDownlevelFlags}, global::Global, hal_api::HalApi, - id::{SurfaceId, TextureId, Valid}, + id::{SurfaceId, TextureId}, identity::{GlobalIdentityHandlerFactory, Input}, init_tracker::TextureInitTracker, resource::{self, ResourceInfo}, @@ -46,7 +46,7 @@ pub(crate) struct Presentation { pub(crate) config: wgt::SurfaceConfiguration>, #[allow(unused)] pub(crate) num_frames: u32, - pub(crate) acquired_texture: Option>, + pub(crate) acquired_texture: Option, } #[derive(Clone, Debug, Error)] @@ -193,7 +193,7 @@ impl Global { let texture = resource::Texture { inner: Some(resource::TextureInner::Surface { raw: ast.texture, - parent_id: Valid(surface_id), + parent_id: surface_id, has_work: AtomicBool::new(false), }), device: device.clone(), @@ -218,7 +218,7 @@ impl Global { // register it in the device tracker as uninitialized let mut trackers = device.trackers.lock(); trackers.textures.insert_single( - id.0, + id, resource, hal::TextureUses::UNINITIALIZED, ); @@ -234,7 +234,7 @@ impl Global { } else { Status::Good }; - (Some(id.0), status) + (Some(id), status) } Ok(None) => (None, Status::Timeout), Err(err) => ( @@ -298,7 +298,7 @@ impl Global { ); device.trackers.lock().textures.remove(texture_id); - let texture = hub.textures.unregister(texture_id.0); + let texture = hub.textures.unregister(texture_id); if let Some(texture) = texture { if let Ok(mut texture) = Arc::try_unwrap(texture) { let mut clear_mode = texture.clear_mode.write(); @@ -321,7 +321,7 @@ impl Global { parent_id, has_work, } => { - if surface_id != parent_id.0 { + if surface_id != parent_id { log::error!("Presented frame is from a different surface"); Err(hal::SurfaceError::Lost) } else if !has_work.load(Ordering::Relaxed) { @@ -397,7 +397,7 @@ impl Global { // and now we are moving it away. device.trackers.lock().textures.remove(texture_id); - let texture = hub.textures.unregister(texture_id.0); + let texture = hub.textures.unregister(texture_id); if let Some(texture) = texture { if let Ok(mut texture) = Arc::try_unwrap(texture) { let suf = A::get_surface(&surface); @@ -407,7 +407,7 @@ impl Global { parent_id, has_work: _, } => { - if surface_id == parent_id.0 { + if surface_id == parent_id { unsafe { suf.unwrap().raw.discard_texture(raw) }; } else { log::warn!("Surface texture is outdated"); diff --git a/wgpu-core/src/registry.rs b/wgpu-core/src/registry.rs index 59bcb7f63a..0d2db72d67 100644 --- a/wgpu-core/src/registry.rs +++ b/wgpu-core/src/registry.rs @@ -58,11 +58,11 @@ impl> FutureId<'_, I, T> { self.id } - pub fn assign(self, mut value: T) -> (id::Valid, Arc) { + pub fn assign(self, mut value: T) -> (I, Arc) { value.as_info_mut().set_id(self.id); self.data.write().insert(self.id, Arc::new(value)); ( - id::Valid(self.id), + self.id, self.data.read().get(self.id).unwrap().clone(), ) } diff --git a/wgpu-core/src/resource.rs b/wgpu-core/src/resource.rs index a9e64e613b..4632473092 100644 --- a/wgpu-core/src/resource.rs +++ b/wgpu-core/src/resource.rs @@ -4,7 +4,7 @@ use crate::{ hal_api::HalApi, id::{ AdapterId, BufferId, DeviceId, QuerySetId, SamplerId, StagingBufferId, SurfaceId, - TextureId, TextureViewId, TypedId, Valid, + TextureId, TextureViewId, TypedId, }, identity::GlobalIdentityHandlerFactory, init_tracker::{BufferInitTracker, TextureInitTracker}, @@ -49,7 +49,7 @@ use std::{ /// [`Buffer`]: crate::resource::Buffer #[derive(Debug)] pub struct ResourceInfo { - id: Option>, + id: Option, /// The index of the last queue submission in which the resource /// was used. /// @@ -91,12 +91,12 @@ impl ResourceInfo { label } - pub(crate) fn id(&self) -> Valid { + pub(crate) fn id(&self) -> Id { self.id.unwrap() } pub(crate) fn set_id(&mut self, id: Id) { - self.id = Some(Valid(id)); + self.id = Some(id); } /// Record that this resource will be used by the queue submission with the @@ -128,7 +128,7 @@ pub trait Resource { self.ref_count() == 1 } fn is_equal(&self, other: &Self) -> bool { - self.as_info().id().0.unzip() == other.as_info().id().0.unzip() + self.as_info().id().unzip() == other.as_info().id().unzip() } } @@ -517,7 +517,7 @@ pub(crate) enum TextureInner { }, Surface { raw: A::SurfaceTexture, - parent_id: Valid, + parent_id: SurfaceId, has_work: AtomicBool, }, } @@ -857,7 +857,7 @@ pub struct TextureView { pub(crate) parent: Option>>, // The parent's refcount is held alive, but the parent may still be deleted // if it's a surface texture. TODO: make this cleaner. - pub(crate) parent_id: Valid, + pub(crate) parent_id: TextureId, pub(crate) device: Arc>, //TODO: store device_id for quick access? pub(crate) desc: HalTextureViewDescriptor, diff --git a/wgpu-core/src/storage.rs b/wgpu-core/src/storage.rs index af82909586..dbf85482df 100644 --- a/wgpu-core/src/storage.rs +++ b/wgpu-core/src/storage.rs @@ -54,14 +54,14 @@ where _phantom: PhantomData, } -impl ops::Index> for Storage +impl ops::Index for Storage where T: Resource, I: id::TypedId, { type Output = Arc; - fn index(&self, id: id::Valid) -> &Arc { - self.get(id.0).unwrap() + fn index(&self, id: I) -> &Arc { + self.get(id).unwrap() } } impl Storage diff --git a/wgpu-core/src/track/buffer.rs b/wgpu-core/src/track/buffer.rs index 38b527a25f..edffa0ed7b 100644 --- a/wgpu-core/src/track/buffer.rs +++ b/wgpu-core/src/track/buffer.rs @@ -10,7 +10,7 @@ use std::{borrow::Cow, marker::PhantomData, sync::Arc, vec::Drain}; use super::PendingTransition; use crate::{ hal_api::HalApi, - id::{BufferId, TypedId, Valid}, + id::{BufferId, TypedId}, resource::Buffer, storage::Storage, track::{ @@ -43,7 +43,7 @@ impl ResourceUses for BufferUses { /// Stores all the buffers that a bind group stores. #[derive(Debug)] pub(crate) struct BufferBindGroupState { - buffers: Vec<(Valid, Arc>, BufferUses)>, + buffers: Vec<(BufferId, Arc>, BufferUses)>, _phantom: PhantomData, } @@ -62,11 +62,11 @@ impl BufferBindGroupState { /// accesses will be in a constant assending order. pub(crate) fn optimize(&mut self) { self.buffers - .sort_unstable_by_key(|&(id, _, _)| id.0.unzip().0); + .sort_unstable_by_key(|&(id, _, _)| id.unzip().0); } /// Returns a list of all buffers tracked. May contain duplicates. - pub fn used_ids(&self) -> impl Iterator> + '_ { + pub fn used_ids(&self) -> impl Iterator + '_ { self.buffers.iter().map(|&(id, _, _)| id) } @@ -84,7 +84,7 @@ impl BufferBindGroupState { ) -> Option<&'a Buffer> { let buffer = storage.get(id).ok()?; - self.buffers.push((Valid(id), buffer.clone(), state)); + self.buffers.push((id, buffer.clone(), state)); Some(buffer) } @@ -150,7 +150,7 @@ impl BufferUsageScope { bind_group: &BufferBindGroupState, ) -> Result<(), UsageConflict> { for &(id, ref resource, state) in &bind_group.buffers { - let index = id.0.unzip().0 as usize; + let index = id.unzip().0 as usize; unsafe { insert_or_merge( @@ -314,11 +314,11 @@ impl BufferTracker { /// the vectors will be extended. A call to set_size is not needed. pub fn insert_single( &mut self, - id: Valid, + id: BufferId, resource: Arc>, state: BufferUses, ) { - let index = id.0.unzip().0 as usize; + let index = id.unzip().0 as usize; self.allow_index(index); @@ -481,7 +481,7 @@ impl BufferTracker { pub unsafe fn set_and_remove_from_usage_scope_sparse( &mut self, scope: &mut BufferUsageScope, - id_source: impl IntoIterator>, + id_source: impl IntoIterator, ) { let incoming_size = scope.state.len(); if incoming_size > self.start.len() { @@ -489,7 +489,7 @@ impl BufferTracker { } for id in id_source { - let (index32, _, _) = id.0.unzip(); + let (index32, _, _) = id.unzip(); let index = index32 as usize; scope.tracker_assert_in_bounds(index); @@ -539,8 +539,8 @@ impl BufferTracker { /// [`Device::trackers`]: crate::device::Device /// [`self.metadata`]: BufferTracker::metadata /// [`Hub::buffers`]: crate::hub::Hub::buffers - pub fn remove_abandoned(&mut self, id: Valid) -> bool { - let index = id.0.unzip().0 as usize; + pub fn remove_abandoned(&mut self, id: BufferId) -> bool { + let index = id.unzip().0 as usize; if index > self.metadata.size() { return false; diff --git a/wgpu-core/src/track/metadata.rs b/wgpu-core/src/track/metadata.rs index 4c429a4cd2..9d47aeccac 100644 --- a/wgpu-core/src/track/metadata.rs +++ b/wgpu-core/src/track/metadata.rs @@ -199,7 +199,7 @@ impl> ResourceMetadataProvider<'_, A, I, T /// - The index must be in bounds of the metadata tracker if this uses an indirect source. #[inline(always)] pub(super) unsafe fn get_epoch(self, index: usize) -> Epoch { - unsafe { self.get_own(index).as_info().id().0.unzip().1 } + unsafe { self.get_own(index).as_info().id().unzip().1 } } } diff --git a/wgpu-core/src/track/stateless.rs b/wgpu-core/src/track/stateless.rs index b2053a9df0..830834ffc8 100644 --- a/wgpu-core/src/track/stateless.rs +++ b/wgpu-core/src/track/stateless.rs @@ -8,7 +8,7 @@ use std::{marker::PhantomData, sync::Arc}; use crate::{ hal_api::HalApi, - id::{TypedId, Valid}, + id::TypedId, resource::Resource, storage::Storage, track::ResourceMetadata, @@ -17,7 +17,7 @@ use crate::{ /// Stores all the resources that a bind group stores. #[derive(Debug)] pub(crate) struct StatelessBindGroupSate> { - resources: Vec<(Valid, Arc)>, + resources: Vec<(Id, Arc)>, } impl> StatelessBindGroupSate { @@ -33,11 +33,11 @@ impl> StatelessBindGroupSate { /// accesses will be in a constant assending order. pub(crate) fn optimize(&mut self) { self.resources - .sort_unstable_by_key(|&(id, _)| id.0.unzip().0); + .sort_unstable_by_key(|&(id, _)| id.unzip().0); } /// Returns a list of all resources tracked. May contain duplicates. - pub fn used(&self) -> impl Iterator> + '_ { + pub fn used(&self) -> impl Iterator + '_ { self.resources.iter().map(|&(id, _)| id) } @@ -52,7 +52,7 @@ impl> StatelessBindGroupSate { pub fn add_single<'a>(&mut self, storage: &'a Storage, id: Id) -> Option<&'a T> { let resource = storage.get(id).ok()?; - self.resources.push((Valid(id), resource.clone())); + self.resources.push((id, resource.clone())); Some(resource) } @@ -105,8 +105,8 @@ impl> StatelessTracker { /// /// If the ID is higher than the length of internal vectors, /// the vectors will be extended. A call to set_size is not needed. - pub fn insert_single(&mut self, id: Valid, resource: Arc) { - let (index32, _epoch, _) = id.0.unzip(); + pub fn insert_single(&mut self, id: Id, resource: Arc) { + let (index32, _epoch, _) = id.unzip(); let index = index32 as usize; self.allow_index(index); @@ -170,8 +170,8 @@ impl> StatelessTracker { /// /// If the ID is higher than the length of internal vectors, /// false will be returned. - pub fn remove_abandoned(&mut self, id: Valid) -> bool { - let index = id.0.unzip().0 as usize; + pub fn remove_abandoned(&mut self, id: Id) -> bool { + let index = id.unzip().0 as usize; if index > self.metadata.size() { return false; diff --git a/wgpu-core/src/track/texture.rs b/wgpu-core/src/track/texture.rs index 013b9dfb56..6666f74872 100644 --- a/wgpu-core/src/track/texture.rs +++ b/wgpu-core/src/track/texture.rs @@ -22,7 +22,7 @@ use super::{range::RangedStates, PendingTransition}; use crate::{ hal_api::HalApi, - id::{TextureId, TypedId, Valid}, + id::{TextureId, TypedId}, resource::Texture, storage::Storage, track::{ @@ -150,7 +150,7 @@ impl ComplexTextureState { #[derive(Debug)] struct TextureBindGroupStateData { - id: Valid, + id: TextureId, selector: Option, texture: Arc>, usage: TextureUses, @@ -173,7 +173,7 @@ impl TextureBindGroupState { /// When this list of states is merged into a tracker, the memory /// accesses will be in a constant assending order. pub(crate) fn optimize(&mut self) { - self.textures.sort_unstable_by_key(|v| v.id.0.unzip().0); + self.textures.sort_unstable_by_key(|v| v.id.unzip().0); } /// Returns a list of all textures tracked. May contain duplicates. @@ -192,7 +192,7 @@ impl TextureBindGroupState { let resource = storage.get(id).ok()?; self.textures.push(TextureBindGroupStateData { - id: Valid(id), + id, selector, texture: resource.clone(), usage: state, @@ -352,12 +352,12 @@ impl TextureUsageScope { pub unsafe fn merge_single( &mut self, storage: &Storage, TextureId>, - id: Valid, + id: TextureId, selector: Option, new_state: TextureUses, ) -> Result<(), UsageConflict> { - let index = id.0.unzip().0 as usize; - let resource = storage.get(id.0).unwrap(); + let index = id.unzip().0 as usize; + let resource = storage.get(id).unwrap(); self.tracker_assert_in_bounds(index); @@ -641,7 +641,7 @@ impl TextureTracker { } for t in bind_group_state.textures.iter() { - let index = t.id.0.unzip().0 as usize; + let index = t.id.unzip().0 as usize; scope.tracker_assert_in_bounds(index); if unsafe { !scope.metadata.contains_unchecked(index) } { @@ -674,8 +674,8 @@ impl TextureTracker { /// /// If the ID is higher than the length of internal vectors, /// false will be returned. - pub fn remove(&mut self, id: Valid) -> bool { - let index = id.0.unzip().0 as usize; + pub fn remove(&mut self, id: TextureId) -> bool { + let index = id.unzip().0 as usize; if index > self.metadata.size() { return false; @@ -702,8 +702,8 @@ impl TextureTracker { /// /// If the ID is higher than the length of internal vectors, /// false will be returned. - pub fn remove_abandoned(&mut self, id: Valid) -> bool { - let index = id.0.unzip().0 as usize; + pub fn remove_abandoned(&mut self, id: TextureId) -> bool { + let index = id.unzip().0 as usize; if index > self.metadata.size() { return false; From da588ff10ce35eb84406968805639e979d1b8115 Mon Sep 17 00:00:00 2001 From: gents83 Date: Sat, 26 Aug 2023 18:10:29 +0200 Subject: [PATCH 086/132] Keep resources in registry till user release them --- wgpu-core/src/binding_model.rs | 2 +- wgpu-core/src/command/bundle.rs | 5 +---- wgpu-core/src/device/global.rs | 7 +++---- wgpu-core/src/device/life.rs | 24 +++++++++++------------- wgpu-core/src/device/queue.rs | 9 +++++---- wgpu-core/src/instance.rs | 12 ++++++------ wgpu-core/src/present.rs | 8 +++----- wgpu-core/src/registry.rs | 5 +---- wgpu-core/src/track/buffer.rs | 7 +------ wgpu-core/src/track/stateless.rs | 9 ++------- 10 files changed, 34 insertions(+), 54 deletions(-) diff --git a/wgpu-core/src/binding_model.rs b/wgpu-core/src/binding_model.rs index 13004007c1..85bbe231a9 100644 --- a/wgpu-core/src/binding_model.rs +++ b/wgpu-core/src/binding_model.rs @@ -4,7 +4,7 @@ use crate::{ hal_api::HalApi, id::{ BindGroupId, BindGroupLayoutId, BufferId, PipelineLayoutId, SamplerId, TextureId, - TextureViewId, + TextureViewId, }, init_tracker::{BufferInitTrackerAction, TextureInitTrackerAction}, resource::{Resource, ResourceInfo}, diff --git a/wgpu-core/src/command/bundle.rs b/wgpu-core/src/command/bundle.rs index 64e66ea6df..8beb9fec38 100644 --- a/wgpu-core/src/command/bundle.rs +++ b/wgpu-core/src/command/bundle.rs @@ -666,10 +666,7 @@ impl RenderBundleEncoder { }) } - fn check_valid_to_use( - &self, - device_id: id::DeviceId, - ) -> Result<(), RenderBundleErrorInner> { + fn check_valid_to_use(&self, device_id: id::DeviceId) -> Result<(), RenderBundleErrorInner> { if device_id != self.parent_id { return Err(RenderBundleErrorInner::NotValidToUse); } diff --git a/wgpu-core/src/device/global.rs b/wgpu-core/src/device/global.rs index 7ef51165d3..59284eb914 100644 --- a/wgpu-core/src/device/global.rs +++ b/wgpu-core/src/device/global.rs @@ -321,8 +321,7 @@ impl Global { let hub = A::hub(self); let last_submission = { - let buffer_guard = hub.buffers.write(); - match buffer_guard.get(buffer_id) { + match hub.buffers.get(buffer_id) { Ok(buffer) => buffer.info.submission_index(), Err(_) => return Ok(()), } @@ -507,10 +506,10 @@ impl Global { let (last_submit_index, buffer) = { let mut buffer_guard = hub.buffers.write(); - match buffer_guard.get(buffer_id) { + match hub.buffers.get(buffer_id) { Ok(buffer) => { let last_submit_index = buffer.info.submission_index(); - (last_submit_index, buffer.clone()) + (last_submit_index, buffer) } Err(_) => { hub.buffers.unregister_locked(buffer_id, &mut *buffer_guard); diff --git a/wgpu-core/src/device/life.rs b/wgpu-core/src/device/life.rs index 0cfdc4d533..cc9c6b9a02 100644 --- a/wgpu-core/src/device/life.rs +++ b/wgpu-core/src/device/life.rs @@ -361,9 +361,7 @@ impl LifetimeTracker { pub fn post_submit(&mut self) { for v in self.future_suspected_buffers.drain(..).take(1) { - self.suspected_resources - .buffers - .insert(v.as_info().id(), v); + self.suspected_resources.buffers.insert(v.as_info().id(), v); } for v in self.future_suspected_textures.drain(..).take(1) { self.suspected_resources @@ -490,7 +488,7 @@ impl LifetimeTracker { log::info!("Bundle {:?} is removed from registry", id); f(bundle_id); - if let Some(res) = hub.render_bundles.unregister(id) { + if let Ok(res) = hub.render_bundles.get(id) { for v in res.used.buffers.used_resources() { self.suspected_resources .buffers @@ -545,7 +543,7 @@ impl LifetimeTracker { log::info!("BindGroup {:?} is removed from registry", id); f(bind_group_id); - if let Some(res) = hub.bind_groups.unregister(id) { + if let Ok(res) = hub.bind_groups.get(id) { for v in res.used.buffers.used_resources() { self.suspected_resources .buffers @@ -607,7 +605,7 @@ impl LifetimeTracker { log::info!("TextureView {:?} is removed from registry", id); f(view_id); - if let Some(res) = hub.texture_views.unregister(id) { + if let Ok(res) = hub.texture_views.get(id) { if let Some(parent_texture) = res.parent.as_ref() { self.suspected_resources .textures @@ -649,7 +647,7 @@ impl LifetimeTracker { log::info!("Texture {:?} is removed from registry", id); f(texture_id); - if let Some(res) = hub.textures.unregister(id) { + if let Ok(res) = hub.textures.get(id) { let submit_index = res.info.submission_index(); let non_referenced_resources = self .active @@ -695,7 +693,7 @@ impl LifetimeTracker { log::info!("Sampler {:?} is removed from registry", id); f(sampler_id); - if let Some(res) = hub.samplers.unregister(id) { + if let Ok(res) = hub.samplers.get(id) { let submit_index = res.info.submission_index(); self.active .iter_mut() @@ -732,7 +730,7 @@ impl LifetimeTracker { log::info!("Buffer {:?} is removed from registry", id); f(buffer_id); - if let Some(res) = hub.buffers.unregister(id) { + if let Ok(res) = hub.buffers.get(id) { let submit_index = res.info.submission_index(); if let resource::BufferMapState::Init { ref stage_buffer, .. @@ -774,7 +772,7 @@ impl LifetimeTracker { log::info!("ComputePipeline {:?} is removed from registry", id); f(compute_pipeline_id); - if let Some(res) = hub.compute_pipelines.unregister(id) { + if let Ok(res) = hub.compute_pipelines.get(id) { let submit_index = res.info.submission_index(); self.active .iter_mut() @@ -812,7 +810,7 @@ impl LifetimeTracker { log::info!("RenderPipeline {:?} is removed from registry", id); f(render_pipeline_id); - if let Some(res) = hub.render_pipelines.unregister(id) { + if let Ok(res) = hub.render_pipelines.get(id) { let submit_index = res.info.submission_index(); self.active .iter_mut() @@ -921,7 +919,7 @@ impl LifetimeTracker { log::info!("QuerySet {:?} is removed from registry", id); // #[cfg(feature = "trace")] // trace.map(|t| t.add(trace::Action::DestroyComputePipeline(id))); - if let Some(res) = hub.query_sets.unregister(id) { + if let Ok(res) = hub.query_sets.get(id) { let submit_index = res.info.submission_index(); self.active .iter_mut() @@ -1099,7 +1097,7 @@ impl LifetimeTracker { if is_removed { *buffer.map_state.lock() = resource::BufferMapState::Idle; log::info!("Buffer {:?} is removed from registry", buffer_id); - if let Some(buf) = hub.buffers.unregister(buffer_id) { + if let Ok(buf) = hub.buffers.get(buffer_id) { self.free_resources.buffers.push(buf); } } else { diff --git a/wgpu-core/src/device/queue.rs b/wgpu-core/src/device/queue.rs index 59487cb57c..57ef8fd993 100644 --- a/wgpu-core/src/device/queue.rs +++ b/wgpu-core/src/device/queue.rs @@ -1260,10 +1260,11 @@ impl Global { for texture_view in cmd_buf_trackers.views.used_resources() { texture_view.info.use_at(submit_index); if texture_view.is_unique() { - device.temp_suspected.lock().texture_views.insert( - texture_view.as_info().id(), - texture_view.clone(), - ); + device + .temp_suspected + .lock() + .texture_views + .insert(texture_view.as_info().id(), texture_view.clone()); } } { diff --git a/wgpu-core/src/instance.rs b/wgpu-core/src/instance.rs index 6eea6d63b6..3c3eb8a3fa 100644 --- a/wgpu-core/src/instance.rs +++ b/wgpu-core/src/instance.rs @@ -994,17 +994,17 @@ impl Global { let fid = A::hub(self).adapters.prepare(input); - let id = match A::VARIANT { + let (id, _) = match A::VARIANT { #[cfg(all(feature = "vulkan", not(target_arch = "wasm32")))] - Backend::Vulkan => fid.assign(Adapter::new(hal_adapter)) .0, + Backend::Vulkan => fid.assign(Adapter::new(hal_adapter)), #[cfg(all(feature = "metal", any(target_os = "macos", target_os = "ios")))] - Backend::Metal => fid.assign(Adapter::new(hal_adapter)).0 .0, + Backend::Metal => fid.assign(Adapter::new(hal_adapter)), #[cfg(all(feature = "dx12", windows))] - Backend::Dx12 => fid.assign(Adapter::new(hal_adapter)) .0, + Backend::Dx12 => fid.assign(Adapter::new(hal_adapter)), #[cfg(all(feature = "dx11", windows))] - Backend::Dx11 => fid.assign(Adapter::new(hal_adapter)) .0, + Backend::Dx11 => fid.assign(Adapter::new(hal_adapter)), #[cfg(feature = "gles")] - Backend::Gl => fid.assign(Adapter::new(hal_adapter)) .0, + Backend::Gl => fid.assign(Adapter::new(hal_adapter)), _ => unreachable!(), }; log::info!("Created Adapter {:?}", id); diff --git a/wgpu-core/src/present.rs b/wgpu-core/src/present.rs index b217bd05a3..b768797a37 100644 --- a/wgpu-core/src/present.rs +++ b/wgpu-core/src/present.rs @@ -217,11 +217,9 @@ impl Global { { // register it in the device tracker as uninitialized let mut trackers = device.trackers.lock(); - trackers.textures.insert_single( - id, - resource, - hal::TextureUses::UNINITIALIZED, - ); + trackers + .textures + .insert_single(id, resource, hal::TextureUses::UNINITIALIZED); } if present.acquired_texture.is_some() { diff --git a/wgpu-core/src/registry.rs b/wgpu-core/src/registry.rs index 0d2db72d67..627ef0def9 100644 --- a/wgpu-core/src/registry.rs +++ b/wgpu-core/src/registry.rs @@ -61,10 +61,7 @@ impl> FutureId<'_, I, T> { pub fn assign(self, mut value: T) -> (I, Arc) { value.as_info_mut().set_id(self.id); self.data.write().insert(self.id, Arc::new(value)); - ( - self.id, - self.data.read().get(self.id).unwrap().clone(), - ) + (self.id, self.data.read().get(self.id).unwrap().clone()) } pub fn assign_error(self, label: &str) -> I { diff --git a/wgpu-core/src/track/buffer.rs b/wgpu-core/src/track/buffer.rs index edffa0ed7b..32e5dce28b 100644 --- a/wgpu-core/src/track/buffer.rs +++ b/wgpu-core/src/track/buffer.rs @@ -312,12 +312,7 @@ impl BufferTracker { /// /// If the ID is higher than the length of internal vectors, /// the vectors will be extended. A call to set_size is not needed. - pub fn insert_single( - &mut self, - id: BufferId, - resource: Arc>, - state: BufferUses, - ) { + pub fn insert_single(&mut self, id: BufferId, resource: Arc>, state: BufferUses) { let index = id.unzip().0 as usize; self.allow_index(index); diff --git a/wgpu-core/src/track/stateless.rs b/wgpu-core/src/track/stateless.rs index 830834ffc8..08d05c4703 100644 --- a/wgpu-core/src/track/stateless.rs +++ b/wgpu-core/src/track/stateless.rs @@ -7,11 +7,7 @@ use std::{marker::PhantomData, sync::Arc}; use crate::{ - hal_api::HalApi, - id::TypedId, - resource::Resource, - storage::Storage, - track::ResourceMetadata, + hal_api::HalApi, id::TypedId, resource::Resource, storage::Storage, track::ResourceMetadata, }; /// Stores all the resources that a bind group stores. @@ -32,8 +28,7 @@ impl> StatelessBindGroupSate { /// When this list of states is merged into a tracker, the memory /// accesses will be in a constant assending order. pub(crate) fn optimize(&mut self) { - self.resources - .sort_unstable_by_key(|&(id, _)| id.unzip().0); + self.resources.sort_unstable_by_key(|&(id, _)| id.unzip().0); } /// Returns a list of all resources tracked. May contain duplicates. From 1765710db0a5bfb989d9d6aa97b37708a0295c81 Mon Sep 17 00:00:00 2001 From: gents83 Date: Sat, 26 Aug 2023 18:29:28 +0200 Subject: [PATCH 087/132] First anysurface will win --- wgpu-core/src/instance.rs | 75 ++++++++++++++++++++++----------------- 1 file changed, 43 insertions(+), 32 deletions(-) diff --git a/wgpu-core/src/instance.rs b/wgpu-core/src/instance.rs index 3c3eb8a3fa..c8b32aef86 100644 --- a/wgpu-core/src/instance.rs +++ b/wgpu-core/src/instance.rs @@ -467,51 +467,62 @@ impl Global { profiling::scope!("Instance::create_surface"); fn init( + any_surface: &mut Option, inst: &Option, display_handle: raw_window_handle::RawDisplayHandle, window_handle: raw_window_handle::RawWindowHandle, - ) -> Option> { - inst.as_ref().and_then(|inst| unsafe { - match inst.create_surface(display_handle, window_handle) { - Ok(raw) => Some(HalSurface { raw: Arc::new(raw) }), - Err(e) => { - log::warn!("Error: {:?}", e); - None + ) { + if any_surface.is_none() { + if let Some(surface) = inst.as_ref().and_then(|inst| unsafe { + match inst.create_surface(display_handle, window_handle) { + Ok(raw) => Some(HalSurface:: { raw: Arc::new(raw) }), + Err(e) => { + log::warn!("Error: {:?}", e); + None + } } + }) { + *any_surface = Some(AnySurface::new(surface)); } - }) + } } let mut hal_surface = None; #[cfg(all(feature = "vulkan", not(target_arch = "wasm32")))] - if let Some(raw) = - init::(&self.instance.vulkan, display_handle, window_handle) - { - hal_surface = Some(AnySurface::new(raw)); - } + init::( + &mut hal_surface, + &self.instance.vulkan, + display_handle, + window_handle, + ); #[cfg(all(feature = "metal", any(target_os = "macos", target_os = "ios")))] - if let Some(raw) = - init::(&self.instance.metal, display_handle, window_handle) - { - hal_surface = Some(AnySurface::new(raw)); - } + init::( + &mut hal_surface, + &self.instance.metal, + display_handle, + window_handle, + ); #[cfg(all(feature = "dx12", windows))] - if let Some(raw) = - init::(&self.instance.dx12, display_handle, window_handle) - { - hal_surface = Some(AnySurface::new(raw)); - } + init::( + &mut hal_surface, + &self.instance.dx12, + display_handle, + window_handle, + ); #[cfg(all(feature = "dx11", windows))] - if let Some(raw) = - init::(&self.instance.dx11, display_handle, window_handle) - { - hal_surface = Some(AnySurface::new(raw)); - } + init::( + &mut hal_surface, + &self.instance.dx11, + display_handle, + window_handle, + ); #[cfg(feature = "gles")] - if let Some(raw) = init::(&self.instance.gl, display_handle, window_handle) - { - hal_surface = Some(AnySurface::new(raw)); - } + init::( + &mut hal_surface, + &self.instance.gl, + display_handle, + window_handle, + ); let surface = Surface { presentation: Mutex::new(None), From c4f150d9ea3d1705c6773d088996f055835c3143 Mon Sep 17 00:00:00 2001 From: gents83 Date: Sat, 26 Aug 2023 18:34:27 +0200 Subject: [PATCH 088/132] Fixing locking --- wgpu-core/src/device/global.rs | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/wgpu-core/src/device/global.rs b/wgpu-core/src/device/global.rs index 59284eb914..7ef51165d3 100644 --- a/wgpu-core/src/device/global.rs +++ b/wgpu-core/src/device/global.rs @@ -321,7 +321,8 @@ impl Global { let hub = A::hub(self); let last_submission = { - match hub.buffers.get(buffer_id) { + let buffer_guard = hub.buffers.write(); + match buffer_guard.get(buffer_id) { Ok(buffer) => buffer.info.submission_index(), Err(_) => return Ok(()), } @@ -506,10 +507,10 @@ impl Global { let (last_submit_index, buffer) = { let mut buffer_guard = hub.buffers.write(); - match hub.buffers.get(buffer_id) { + match buffer_guard.get(buffer_id) { Ok(buffer) => { let last_submit_index = buffer.info.submission_index(); - (last_submit_index, buffer) + (last_submit_index, buffer.clone()) } Err(_) => { hub.buffers.unregister_locked(buffer_id, &mut *buffer_guard); From 4f1bcabf6e8e1bb3f0567a47f66f2ea7cc9a0d38 Mon Sep 17 00:00:00 2001 From: gents83 Date: Sat, 26 Aug 2023 19:09:11 +0200 Subject: [PATCH 089/132] Fix compilation error --- wgpu-core/src/device/queue.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/wgpu-core/src/device/queue.rs b/wgpu-core/src/device/queue.rs index 57ef8fd993..cb9d1fb85e 100644 --- a/wgpu-core/src/device/queue.rs +++ b/wgpu-core/src/device/queue.rs @@ -1050,7 +1050,7 @@ impl Global { let mut trackers = device.trackers.lock(); crate::command::clear_texture( &*texture_guard, - id::Valid(destination.texture), + destination.texture, TextureInitRange { mip_range: destination.mip_level..(destination.mip_level + 1), layer_range, From 969e411e17b7928238d7d0cb62a4e909612e8020 Mon Sep 17 00:00:00 2001 From: gents83 Date: Sun, 27 Aug 2023 10:53:17 +0200 Subject: [PATCH 090/132] Fixing wasm32 --- wgpu-core/src/instance.rs | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/wgpu-core/src/instance.rs b/wgpu-core/src/instance.rs index c8b32aef86..f2ffa74a75 100644 --- a/wgpu-core/src/instance.rs +++ b/wgpu-core/src/instance.rs @@ -590,11 +590,13 @@ impl Global { .gl .as_ref() .map(|inst| { + let raw_surface = inst.create_surface_from_canvas(canvas)?; Ok(HalSurface { - raw: Arc::new(inst.create_surface_from_canvas(canvas)?), + raw: Arc::new(raw_surface), }) }) - .transpose()?; + .transpose()? + .unwrap(); AnySurface::new(hal_surface) }, }; @@ -624,11 +626,13 @@ impl Global { .gl .as_ref() .map(|inst| { + let raw_surface = inst.create_surface_from_offscreen_canvas(canvas)?; Ok(HalSurface { - raw: Arc::new(inst.create_surface_from_offscreen_canvas(canvas)?), + raw: Arc::new(raw_surface), }) }) - .transpose()?; + .transpose()? + .unwrap(); AnySurface::new(hal_surface) }, }; From 27d5a683447fd4097f956599b71d16d91fa8e009 Mon Sep 17 00:00:00 2001 From: gents83 Date: Sun, 27 Aug 2023 11:45:02 +0200 Subject: [PATCH 091/132] Fix wasm compilation --- wgpu-core/src/instance.rs | 27 ++++++++++++++------------- 1 file changed, 14 insertions(+), 13 deletions(-) diff --git a/wgpu-core/src/instance.rs b/wgpu-core/src/instance.rs index f2ffa74a75..91c85727bc 100644 --- a/wgpu-core/src/instance.rs +++ b/wgpu-core/src/instance.rs @@ -1009,19 +1009,20 @@ impl Global { let fid = A::hub(self).adapters.prepare(input); - let (id, _) = match A::VARIANT { - #[cfg(all(feature = "vulkan", not(target_arch = "wasm32")))] - Backend::Vulkan => fid.assign(Adapter::new(hal_adapter)), - #[cfg(all(feature = "metal", any(target_os = "macos", target_os = "ios")))] - Backend::Metal => fid.assign(Adapter::new(hal_adapter)), - #[cfg(all(feature = "dx12", windows))] - Backend::Dx12 => fid.assign(Adapter::new(hal_adapter)), - #[cfg(all(feature = "dx11", windows))] - Backend::Dx11 => fid.assign(Adapter::new(hal_adapter)), - #[cfg(feature = "gles")] - Backend::Gl => fid.assign(Adapter::new(hal_adapter)), - _ => unreachable!(), - }; + let (id, _adapter): (crate::id::Id>, Arc>) = + match A::VARIANT { + #[cfg(all(feature = "vulkan", not(target_arch = "wasm32")))] + Backend::Vulkan => fid.assign(Adapter::new(hal_adapter)), + #[cfg(all(feature = "metal", any(target_os = "macos", target_os = "ios")))] + Backend::Metal => fid.assign(Adapter::new(hal_adapter)), + #[cfg(all(feature = "dx12", windows))] + Backend::Dx12 => fid.assign(Adapter::new(hal_adapter)), + #[cfg(all(feature = "dx11", windows))] + Backend::Dx11 => fid.assign(Adapter::new(hal_adapter)), + #[cfg(feature = "gles")] + Backend::Gl => fid.assign(Adapter::new(hal_adapter)), + _ => unreachable!(), + }; log::info!("Created Adapter {:?}", id); id } From 7394fd793de703280509a1c177874c5fcb0e540e Mon Sep 17 00:00:00 2001 From: gents83 Date: Fri, 8 Sep 2023 23:59:35 +0200 Subject: [PATCH 092/132] Storage is now only for user - wgpu use arcs --- player/src/bin/play.rs | 2 +- player/src/lib.rs | 40 +- player/tests/test.rs | 2 +- tests/src/lib.rs | 8 +- tests/tests/mem_leaks.rs | 249 ++++++++++ tests/tests/root.rs | 1 + wgpu-core/src/binding_model.rs | 4 +- wgpu-core/src/command/bundle.rs | 70 +-- wgpu-core/src/command/clear.rs | 34 +- wgpu-core/src/command/compute.rs | 56 +-- wgpu-core/src/command/memory_init.rs | 120 +++-- wgpu-core/src/command/mod.rs | 58 +-- wgpu-core/src/command/query.rs | 7 +- wgpu-core/src/command/render.rs | 165 +++---- wgpu-core/src/command/transfer.rs | 75 ++- wgpu-core/src/device/global.rs | 644 ++++++++------------------ wgpu-core/src/device/life.rs | 492 ++++++++++---------- wgpu-core/src/device/mod.rs | 6 +- wgpu-core/src/device/queue.rs | 67 +-- wgpu-core/src/device/resource.rs | 210 ++++----- wgpu-core/src/global.rs | 45 +- wgpu-core/src/hal_api.rs | 14 +- wgpu-core/src/hub.rs | 94 ++-- wgpu-core/src/id.rs | 17 +- wgpu-core/src/identity.rs | 106 +++-- wgpu-core/src/init_tracker/buffer.rs | 28 +- wgpu-core/src/init_tracker/texture.rs | 16 +- wgpu-core/src/instance.rs | 32 +- wgpu-core/src/present.rs | 2 +- wgpu-core/src/registry.rs | 91 +++- wgpu-core/src/resource.rs | 181 +++++++- wgpu-core/src/storage.rs | 80 +--- wgpu-core/src/track/buffer.rs | 88 ++-- wgpu-core/src/track/mod.rs | 22 +- wgpu-core/src/track/stateless.rs | 33 +- wgpu-core/src/track/texture.rs | 97 ++-- 36 files changed, 1605 insertions(+), 1651 deletions(-) create mode 100644 tests/tests/mem_leaks.rs diff --git a/player/src/bin/play.rs b/player/src/bin/play.rs index ce0b4c3bd4..0429b4bcb6 100644 --- a/player/src/bin/play.rs +++ b/player/src/bin/play.rs @@ -49,7 +49,7 @@ fn main() { IdentityPassThroughFactory, wgt::InstanceDescriptor::default(), ); - let mut command_buffer_id_manager = wgc::identity::IdentityManager::default(); + let mut command_buffer_id_manager = wgc::identity::IdentityManager::new(); #[cfg(feature = "winit")] let surface = global.instance_create_surface( diff --git a/player/src/lib.rs b/player/src/lib.rs index fa6ec72317..340f6e2be6 100644 --- a/player/src/lib.rs +++ b/player/src/lib.rs @@ -8,39 +8,23 @@ #![cfg(not(target_arch = "wasm32"))] #![warn(unsafe_op_in_unsafe_fn)] -use wgc::device::trace; +use wgc::{device::trace, identity::IdentityManager}; -use std::{borrow::Cow, fmt::Debug, fs, marker::PhantomData, path::Path}; +use std::{borrow::Cow, fs, path::Path, sync::Arc}; -#[derive(Debug)] -pub struct IdentityPassThrough(PhantomData); +pub struct IdentityPassThroughFactory; -impl wgc::identity::IdentityHandler - for IdentityPassThrough -{ +impl wgc::identity::IdentityHandlerFactory for IdentityPassThroughFactory { type Input = I; - fn process(&self, id: I, backend: wgt::Backend) -> I { - let (index, epoch, _backend) = id.unzip(); - I::zip(index, epoch, backend) + fn spawn(&self) -> Option>> { + None } - fn free(&self, _id: I) {} -} - -pub struct IdentityPassThroughFactory; -impl wgc::identity::IdentityHandlerFactory - for IdentityPassThroughFactory -{ - type Filter = IdentityPassThrough; - fn spawn(&self) -> Self::Filter { - IdentityPassThrough(PhantomData) - } -} -impl wgc::identity::GlobalIdentityHandlerFactory for IdentityPassThroughFactory { - fn ids_are_generated_in_wgpu() -> bool { - false + fn input_to_id(id_in: Self::Input) -> I { + id_in } } +impl wgc::identity::GlobalIdentityHandlerFactory for IdentityPassThroughFactory {} pub trait GlobalPlay { fn encode_commands( @@ -53,7 +37,7 @@ pub trait GlobalPlay { device: wgc::id::DeviceId, action: trace::Action, dir: &Path, - comb_manager: &mut wgc::identity::IdentityManager, + comb_manager: &mut wgc::identity::IdentityManager, ); } @@ -168,7 +152,7 @@ impl GlobalPlay for wgc::global::Global { device: wgc::id::DeviceId, action: trace::Action, dir: &Path, - comb_manager: &mut wgc::identity::IdentityManager, + comb_manager: &mut wgc::identity::IdentityManager, ) { use wgc::device::trace::Action; log::debug!("action {:?}", action); @@ -390,7 +374,7 @@ impl GlobalPlay for wgc::global::Global { let (encoder, error) = self.device_create_command_encoder::( device, &wgt::CommandEncoderDescriptor { label: None }, - comb_manager.alloc(device.backend()), + comb_manager.process(device.backend()), ); if let Some(e) = error { panic!("{:?}", e); diff --git a/player/tests/test.rs b/player/tests/test.rs index 2cfa030101..e3a2a6a796 100644 --- a/player/tests/test.rs +++ b/player/tests/test.rs @@ -100,7 +100,7 @@ impl Test<'_> { panic!("{:?}", e); } - let mut command_buffer_id_manager = wgc::identity::IdentityManager::default(); + let mut command_buffer_id_manager = wgc::identity::IdentityManager::new(); println!("\t\t\tRunning..."); for action in self.actions { wgc::gfx_select!(device_id => global.process(device_id, action, dir, &mut command_buffer_id_manager)); diff --git a/tests/src/lib.rs b/tests/src/lib.rs index 51b067fc26..91f531b76a 100644 --- a/tests/src/lib.rs +++ b/tests/src/lib.rs @@ -39,6 +39,7 @@ async fn initialize_device( } pub struct TestingContext { + pub instance: Instance, pub adapter: Adapter, pub adapter_info: wgt::AdapterInfo, pub adapter_downlevel_capabilities: wgt::DownlevelCapabilities, @@ -210,7 +211,7 @@ pub fn initialize_test(parameters: TestParameters, test_function: impl FnOnce(Te let _test_guard = isolation::OneTestPerProcessGuard::new(); - let (adapter, _surface_guard) = initialize_adapter(); + let (instance, adapter, _surface_guard) = initialize_adapter(); let adapter_info = adapter.get_info(); let adapter_lowercase_name = adapter_info.name.to_lowercase(); @@ -256,6 +257,7 @@ pub fn initialize_test(parameters: TestParameters, test_function: impl FnOnce(Te )); let context = TestingContext { + instance, adapter, adapter_info: adapter_info.clone(), adapter_downlevel_capabilities, @@ -375,7 +377,7 @@ pub fn initialize_test(parameters: TestParameters, test_function: impl FnOnce(Te } } -fn initialize_adapter() -> (Adapter, SurfaceGuard) { +fn initialize_adapter() -> (Instance, Adapter, SurfaceGuard) { let backends = wgpu::util::backend_bits_from_env().unwrap_or_else(Backends::all); let dx12_shader_compiler = wgpu::util::dx12_shader_compiler_from_env().unwrap_or_default(); let gles_minor_version = wgpu::util::gles_minor_version_from_env().unwrap_or_default(); @@ -440,7 +442,7 @@ fn initialize_adapter() -> (Adapter, SurfaceGuard) { )) .expect("could not find suitable adapter on the system"); - (adapter, surface_guard) + (instance, adapter, surface_guard) } struct SurfaceGuard { diff --git a/tests/tests/mem_leaks.rs b/tests/tests/mem_leaks.rs new file mode 100644 index 0000000000..2db005c4e0 --- /dev/null +++ b/tests/tests/mem_leaks.rs @@ -0,0 +1,249 @@ +use std::num::NonZeroU64; + +use wasm_bindgen_test::*; +use wgpu::util::DeviceExt; + +use wgpu_test::{initialize_test, TestParameters, TestingContext}; + +#[cfg(any( + not(target_arch = "wasm32"), + target_os = "emscripten", + feature = "webgl" +))] +fn draw_test_with_reports( + ctx: TestingContext, + expected: &[u32], + function: impl FnOnce(&mut wgpu::RenderPass<'_>), +) { + let global_report = ctx.instance.generate_report(); + let report = global_report.hub_report(); + assert_eq!(report.adapters.num_allocated, 1); + assert_eq!(report.devices.num_allocated, 1); + assert_eq!(report.queues.num_allocated, 1); + + let shader = ctx + .device + .create_shader_module(wgpu::include_wgsl!("./vertex_indices/draw.vert.wgsl")); + + let global_report = ctx.instance.generate_report(); + let report = global_report.hub_report(); + assert_eq!(report.shader_modules.num_allocated, 1); + + let bgl = ctx + .device + .create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor { + label: None, + entries: &[wgpu::BindGroupLayoutEntry { + binding: 0, + ty: wgpu::BindingType::Buffer { + ty: wgpu::BufferBindingType::Storage { read_only: false }, + has_dynamic_offset: false, + min_binding_size: NonZeroU64::new(4), + }, + visibility: wgpu::ShaderStages::VERTEX, + count: None, + }], + }); + + let global_report = ctx.instance.generate_report(); + let report = global_report.hub_report(); + assert_eq!(report.buffers.num_allocated, 0); + assert_eq!(report.bind_groups.num_allocated, 0); + assert_eq!(report.bind_group_layouts.num_allocated, 1); + + let buffer = ctx.device.create_buffer(&wgpu::BufferDescriptor { + label: None, + size: 4 * expected.len() as u64, + usage: wgpu::BufferUsages::COPY_SRC + | wgpu::BufferUsages::STORAGE + | wgpu::BufferUsages::MAP_READ, + mapped_at_creation: false, + }); + + let global_report = ctx.instance.generate_report(); + let report = global_report.hub_report(); + assert_eq!(report.buffers.num_allocated, 1); + + let bg = ctx.device.create_bind_group(&wgpu::BindGroupDescriptor { + label: None, + layout: &bgl, + entries: &[wgpu::BindGroupEntry { + binding: 0, + resource: buffer.as_entire_binding(), + }], + }); + + let global_report = ctx.instance.generate_report(); + let report = global_report.hub_report(); + assert_eq!(report.buffers.num_allocated, 1); + assert_eq!(report.bind_groups.num_allocated, 1); + assert_eq!(report.bind_group_layouts.num_allocated, 1); + + let ppl = ctx + .device + .create_pipeline_layout(&wgpu::PipelineLayoutDescriptor { + label: None, + bind_group_layouts: &[&bgl], + push_constant_ranges: &[], + }); + + let global_report = ctx.instance.generate_report(); + let report = global_report.hub_report(); + assert_eq!(report.pipeline_layouts.num_allocated, 1); + assert_eq!(report.render_pipelines.num_allocated, 0); + assert_eq!(report.compute_pipelines.num_allocated, 0); + + let pipeline = ctx + .device + .create_render_pipeline(&wgpu::RenderPipelineDescriptor { + label: None, + layout: Some(&ppl), + vertex: wgpu::VertexState { + buffers: &[], + entry_point: "vs_main", + module: &shader, + }, + primitive: wgpu::PrimitiveState::default(), + depth_stencil: None, + multisample: wgpu::MultisampleState::default(), + fragment: Some(wgpu::FragmentState { + entry_point: "fs_main", + module: &shader, + targets: &[Some(wgpu::ColorTargetState { + format: wgpu::TextureFormat::Rgba8Unorm, + blend: None, + write_mask: wgpu::ColorWrites::ALL, + })], + }), + multiview: None, + }); + + let global_report = ctx.instance.generate_report(); + let report = global_report.hub_report(); + assert_eq!(report.buffers.num_allocated, 1); + assert_eq!(report.bind_groups.num_allocated, 1); + assert_eq!(report.bind_group_layouts.num_allocated, 1); + assert_eq!(report.pipeline_layouts.num_allocated, 1); + assert_eq!(report.render_pipelines.num_allocated, 1); + assert_eq!(report.compute_pipelines.num_allocated, 0); + + let texture = ctx.device.create_texture_with_data( + &ctx.queue, + &wgpu::TextureDescriptor { + label: Some("dummy"), + size: wgpu::Extent3d { + width: 1, + height: 1, + depth_or_array_layers: 1, + }, + mip_level_count: 1, + sample_count: 1, + dimension: wgpu::TextureDimension::D2, + format: wgpu::TextureFormat::Rgba8Unorm, + usage: wgpu::TextureUsages::RENDER_ATTACHMENT | wgpu::TextureUsages::COPY_DST, + view_formats: &[], + }, + &[0, 0, 0, 1], + ); + let texture_view = texture.create_view(&wgpu::TextureViewDescriptor::default()); + + let global_report = ctx.instance.generate_report(); + let report = global_report.hub_report(); + assert_eq!(report.buffers.num_allocated, 1); + assert_eq!(report.texture_views.num_allocated, 1); + assert_eq!(report.textures.num_allocated, 1); + + drop(texture); + + let global_report = ctx.instance.generate_report(); + let report = global_report.hub_report(); + assert_eq!(report.buffers.num_allocated, 1); + assert_eq!(report.texture_views.num_allocated, 1); + assert_eq!(report.textures.num_allocated, 1); + assert_eq!(report.textures.num_kept_from_user, 0); + + let mut encoder = ctx + .device + .create_command_encoder(&wgpu::CommandEncoderDescriptor::default()); + + let global_report = ctx.instance.generate_report(); + let report = global_report.hub_report(); + assert_eq!(report.command_buffers.num_allocated, 1); + assert_eq!(report.buffers.num_allocated, 1); + + let mut rpass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor { + label: None, + color_attachments: &[Some(wgpu::RenderPassColorAttachment { + ops: wgpu::Operations::default(), + resolve_target: None, + view: &texture_view, + })], + depth_stencil_attachment: None, + timestamp_writes: None, + occlusion_query_set: None, + }); + + rpass.set_pipeline(&pipeline); + rpass.set_bind_group(0, &bg, &[]); + + let global_report = ctx.instance.generate_report(); + let report = global_report.hub_report(); + assert_eq!(report.buffers.num_allocated, 1); + assert_eq!(report.bind_groups.num_allocated, 1); + assert_eq!(report.bind_group_layouts.num_allocated, 1); + assert_eq!(report.pipeline_layouts.num_allocated, 1); + assert_eq!(report.render_pipelines.num_allocated, 1); + assert_eq!(report.compute_pipelines.num_allocated, 0); + assert_eq!(report.command_buffers.num_allocated, 1); + assert_eq!(report.render_bundles.num_allocated, 0); + assert_eq!(report.texture_views.num_allocated, 1); + assert_eq!(report.textures.num_allocated, 1); + + function(&mut rpass); + + drop(rpass); + drop(pipeline); + drop(texture_view); + + let global_report = ctx.instance.generate_report(); + let report = global_report.hub_report(); + assert_eq!(report.command_buffers.num_allocated, 1); + assert_eq!(report.render_pipelines.num_allocated, 1); + assert_eq!(report.render_pipelines.num_kept_from_user, 0); + assert_eq!(report.texture_views.num_allocated, 1); + assert_eq!(report.texture_views.num_kept_from_user, 0); + assert_eq!(report.textures.num_allocated, 1); + assert_eq!(report.textures.num_kept_from_user, 0); + + ctx.queue.submit(Some(encoder.finish())); + + let global_report = ctx.instance.generate_report(); + let report = global_report.hub_report(); + assert_eq!(report.command_buffers.num_allocated, 0); + + ctx.device.poll(wgpu::Maintain::Wait); + + let global_report = ctx.instance.generate_report(); + let report = global_report.hub_report(); + assert_eq!(report.render_pipelines.num_allocated, 0); + assert_eq!(report.buffers.num_allocated, 1); + assert_eq!(report.bind_groups.num_allocated, 1); + assert_eq!(report.bind_group_layouts.num_allocated, 1); + assert_eq!(report.pipeline_layouts.num_allocated, 1); + assert_eq!(report.texture_views.num_allocated, 0); +} + +#[test] +#[wasm_bindgen_test] +#[cfg(any( + not(target_arch = "wasm32"), + target_os = "emscripten", + feature = "webgl" +))] +fn simple_draw_leaks() { + initialize_test(TestParameters::default().test_features_limits(), |ctx| { + draw_test_with_reports(ctx, &[0, 1, 2, 3, 4, 5], |cmb| { + cmb.draw(0..6, 0..1); + }) + }) +} diff --git a/tests/tests/root.rs b/tests/tests/root.rs index b376ab4981..fdab9ad27b 100644 --- a/tests/tests/root.rs +++ b/tests/tests/root.rs @@ -15,6 +15,7 @@ mod encoder; mod example_wgsl; mod external_texture; mod instance; +mod mem_leaks; mod occlusion_query; mod partially_bounded_arrays; mod poll; diff --git a/wgpu-core/src/binding_model.rs b/wgpu-core/src/binding_model.rs index 85bbe231a9..e4d389703e 100644 --- a/wgpu-core/src/binding_model.rs +++ b/wgpu-core/src/binding_model.rs @@ -847,8 +847,8 @@ pub struct BindGroup { pub(crate) layout: Arc>, pub(crate) info: ResourceInfo, pub(crate) used: BindGroupStates, - pub(crate) used_buffer_ranges: Vec, - pub(crate) used_texture_ranges: Vec, + pub(crate) used_buffer_ranges: Vec>, + pub(crate) used_texture_ranges: Vec>, pub(crate) dynamic_binding_info: Vec, /// Actual binding sizes for buffers that don't have `min_binding_size` /// specified in BGL. Listed in the order of iteration of `BGL.entries`. diff --git a/wgpu-core/src/command/bundle.rs b/wgpu-core/src/command/bundle.rs index 8beb9fec38..0da78e04bd 100644 --- a/wgpu-core/src/command/bundle.rs +++ b/wgpu-core/src/command/bundle.rs @@ -93,12 +93,9 @@ use crate::{ hal_api::HalApi, hub::Hub, id::{self, RenderBundleId}, - identity::GlobalIdentityHandlerFactory, init_tracker::{BufferInitTrackerAction, MemoryInitKind, TextureInitTrackerAction}, pipeline::{self, PipelineFlags, RenderPipeline}, - resource, resource::{Resource, ResourceInfo}, - storage::Storage, track::RenderBundleScope, validation::check_buffer_usage, Label, LabelHelpers, @@ -253,11 +250,11 @@ impl RenderBundleEncoder { /// and accumulate buffer and texture initialization actions. /// /// [`ExecuteBundle`]: RenderCommand::ExecuteBundle - pub(crate) fn finish( + pub(crate) fn finish( self, desc: &RenderBundleDescriptor, device: &Arc>, - hub: &Hub, + hub: &Hub, ) -> Result, RenderBundleError> { let bind_group_guard = hub.bind_groups.read(); let pipeline_guard = hub.render_pipelines.read(); @@ -351,7 +348,7 @@ impl RenderBundleEncoder { unsafe { state .trackers - .merge_bind_group(&*texture_guard, &bind_group.used) + .merge_bind_group(&bind_group.used) .map_pass_err(scope)? }; //Note: stateless trackers are not merged: the lifetime reference @@ -402,7 +399,7 @@ impl RenderBundleEncoder { size, } => { let scope = PassErrorScope::SetIndexBuffer(buffer_id); - let buffer: &resource::Buffer = state + let buffer = state .trackers .buffers .merge_single(&*buffer_guard, buffer_id, hal::BufferUses::INDEX) @@ -417,7 +414,7 @@ impl RenderBundleEncoder { None => buffer.size, }; buffer_memory_init_actions.extend(buffer.initialization_status.read().create_action( - buffer_id, + buffer, offset..end, MemoryInitKind::NeedsInitializedMemory, )); @@ -430,7 +427,7 @@ impl RenderBundleEncoder { size, } => { let scope = PassErrorScope::SetVertexBuffer(buffer_id); - let buffer: &resource::Buffer = state + let buffer = state .trackers .buffers .merge_single(&*buffer_guard, buffer_id, hal::BufferUses::VERTEX) @@ -445,7 +442,7 @@ impl RenderBundleEncoder { None => buffer.size, }; buffer_memory_init_actions.extend(buffer.initialization_status.read().create_action( - buffer_id, + buffer, offset..end, MemoryInitKind::NeedsInitializedMemory, )); @@ -565,7 +562,7 @@ impl RenderBundleEncoder { let pipeline = state.pipeline(scope)?; let used_bind_groups = pipeline.used_bind_groups; - let buffer: &resource::Buffer = state + let buffer = state .trackers .buffers .merge_single(&*buffer_guard, buffer_id, hal::BufferUses::INDIRECT) @@ -576,7 +573,7 @@ impl RenderBundleEncoder { .map_pass_err(scope)?; buffer_memory_init_actions.extend(buffer.initialization_status.read().create_action( - buffer_id, + buffer, offset..(offset + mem::size_of::() as u64), MemoryInitKind::NeedsInitializedMemory, )); @@ -603,7 +600,7 @@ impl RenderBundleEncoder { let pipeline = state.pipeline(scope)?; let used_bind_groups = pipeline.used_bind_groups; - let buffer: &resource::Buffer = state + let buffer = state .trackers .buffers .merge_single(&*buffer_guard, buffer_id, hal::BufferUses::INDIRECT) @@ -614,7 +611,7 @@ impl RenderBundleEncoder { .map_pass_err(scope)?; buffer_memory_init_actions.extend(buffer.initialization_status.read().create_action( - buffer_id, + buffer, offset..(offset + mem::size_of::() as u64), MemoryInitKind::NeedsInitializedMemory, )); @@ -735,8 +732,8 @@ pub struct RenderBundle { pub(super) is_stencil_read_only: bool, pub(crate) device: Arc>, pub(crate) used: RenderBundleScope, - pub(super) buffer_memory_init_actions: Vec, - pub(super) texture_memory_init_actions: Vec, + pub(super) buffer_memory_init_actions: Vec>, + pub(super) texture_memory_init_actions: Vec>, pub(super) context: RenderPassContext, pub(crate) info: ResourceInfo, } @@ -768,13 +765,8 @@ impl RenderBundle { /// Note that the function isn't expected to fail, generally. /// All the validation has already been done by this point. /// The only failure condition is if some of the used buffers are destroyed. - pub(super) unsafe fn execute( - &self, - raw: &mut A::CommandEncoder, - bind_group_guard: &Storage, id::BindGroupId>, - pipeline_guard: &Storage, id::RenderPipelineId>, - buffer_guard: &Storage, id::BufferId>, - ) -> Result<(), ExecutionError> { + pub(super) unsafe fn execute(&self, raw: &mut A::CommandEncoder) -> Result<(), ExecutionError> { + let trackers = &self.used; let mut offsets = self.base.dynamic_offsets.as_slice(); let mut pipeline_layout = None::>>; if let Some(ref label) = self.base.label { @@ -788,7 +780,7 @@ impl RenderBundle { num_dynamic_offsets, bind_group_id, } => { - let bind_group = bind_group_guard.get(bind_group_id).unwrap(); + let bind_group = trackers.bind_groups.get(bind_group_id).unwrap(); unsafe { raw.set_bind_group( pipeline_layout.as_ref().unwrap().raw(), @@ -800,7 +792,7 @@ impl RenderBundle { offsets = &offsets[num_dynamic_offsets as usize..]; } RenderCommand::SetPipeline(pipeline_id) => { - let pipeline = pipeline_guard.get(pipeline_id).unwrap(); + let pipeline = trackers.render_pipelines.get(pipeline_id).unwrap(); unsafe { raw.set_render_pipeline(pipeline.raw()) }; pipeline_layout = Some(pipeline.layout.clone()); @@ -811,12 +803,7 @@ impl RenderBundle { offset, size, } => { - let buffer = buffer_guard - .get(buffer_id) - .unwrap() - .raw - .as_ref() - .ok_or(ExecutionError::DestroyedBuffer(buffer_id))?; + let buffer = trackers.buffers.get(buffer_id).unwrap().raw(); let bb = hal::BufferBinding { buffer, offset, @@ -830,12 +817,7 @@ impl RenderBundle { offset, size, } => { - let buffer = buffer_guard - .get(buffer_id) - .unwrap() - .raw - .as_ref() - .ok_or(ExecutionError::DestroyedBuffer(buffer_id))?; + let buffer = trackers.buffers.get(buffer_id).unwrap().raw(); let bb = hal::BufferBinding { buffer, offset, @@ -913,12 +895,7 @@ impl RenderBundle { count: None, indexed: false, } => { - let buffer = buffer_guard - .get(buffer_id) - .unwrap() - .raw - .as_ref() - .ok_or(ExecutionError::DestroyedBuffer(buffer_id))?; + let buffer = trackers.buffers.get(buffer_id).unwrap().raw(); unsafe { raw.draw_indirect(buffer, offset, 1) }; } RenderCommand::MultiDrawIndirect { @@ -927,12 +904,7 @@ impl RenderBundle { count: None, indexed: true, } => { - let buffer = buffer_guard - .get(buffer_id) - .unwrap() - .raw - .as_ref() - .ok_or(ExecutionError::DestroyedBuffer(buffer_id))?; + let buffer = trackers.buffers.get(buffer_id).unwrap().raw(); unsafe { raw.draw_indexed_indirect(buffer, offset, 1) }; } RenderCommand::MultiDrawIndirect { .. } diff --git a/wgpu-core/src/command/clear.rs b/wgpu-core/src/command/clear.rs index 693882a6a9..26221d19f3 100644 --- a/wgpu-core/src/command/clear.rs +++ b/wgpu-core/src/command/clear.rs @@ -1,4 +1,4 @@ -use std::ops::Range; +use std::{ops::Range, sync::Arc}; #[cfg(feature = "trace")] use crate::device::trace::Command as TraceCommand; @@ -10,8 +10,7 @@ use crate::{ id::{BufferId, CommandEncoderId, DeviceId, TextureId}, identity::GlobalIdentityHandlerFactory, init_tracker::{MemoryInitKind, TextureInitRange}, - resource::{Texture, TextureClearMode}, - storage::Storage, + resource::{Resource, Texture, TextureClearMode}, track::{TextureSelector, TextureTracker}, }; @@ -94,10 +93,13 @@ impl Global { let (dst_buffer, dst_pending) = { let buffer_guard = hub.buffers.read(); + let dst_buffer = buffer_guard + .get(dst) + .map_err(|_| ClearError::InvalidBuffer(dst))?; cmd_buf_data .trackers .buffers - .set_single(&*buffer_guard, dst, hal::BufferUses::COPY_DST) + .set_single(dst_buffer, hal::BufferUses::COPY_DST) .ok_or(ClearError::InvalidBuffer(dst))? }; let dst_raw = dst_buffer @@ -138,7 +140,7 @@ impl Global { // Mark dest as initialized. cmd_buf_data.buffer_memory_init_actions.extend( dst_buffer.initialization_status.read().create_action( - dst, + &dst_buffer, offset..end, MemoryInitKind::ImplicitlyInitialized, ), @@ -221,10 +223,9 @@ impl Global { let device = &cmd_buf.device; let (encoder, tracker) = cmd_buf_data.open_encoder_and_tracker(); - let texture_guard = hub.textures.read(); + clear_texture( - &*texture_guard, - dst, + &dst_texture, TextureInitRange { mip_range: subresource_mip_range, layer_range: subresource_layer_range, @@ -238,22 +239,19 @@ impl Global { } pub(crate) fn clear_texture( - storage: &Storage, TextureId>, - dst_texture_id: TextureId, + dst_texture: &Arc>, range: TextureInitRange, encoder: &mut A::CommandEncoder, texture_tracker: &mut TextureTracker, alignments: &hal::Alignments, zero_buffer: &A::Buffer, ) -> Result<(), ClearError> { - let dst_texture = &storage[dst_texture_id]; - let dst_raw = dst_texture .inner .as_ref() .unwrap() .as_raw() - .ok_or(ClearError::InvalidTexture(dst_texture_id))?; + .ok_or_else(|| ClearError::InvalidTexture(dst_texture.as_info().id()))?; // Issue the right barrier. let clear_usage = match *dst_texture.clear_mode.read() { @@ -265,7 +263,9 @@ pub(crate) fn clear_texture( hal::TextureUses::COLOR_TARGET } TextureClearMode::None => { - return Err(ClearError::NoValidTextureClearMode(dst_texture_id)); + return Err(ClearError::NoValidTextureClearMode( + dst_texture.as_info().id(), + )); } }; @@ -288,7 +288,7 @@ pub(crate) fn clear_texture( // clear_texture api in order to remove this check and call the cheaper // change_replace_tracked whenever possible. let dst_barrier = texture_tracker - .set_single(dst_texture, dst_texture_id, selector, clear_usage) + .set_single(dst_texture, selector, clear_usage) .unwrap() .map(|pending| pending.into_hal(dst_texture)); unsafe { @@ -312,7 +312,9 @@ pub(crate) fn clear_texture( clear_texture_via_render_passes(dst_texture, range, is_color, encoder)? } TextureClearMode::None => { - return Err(ClearError::NoValidTextureClearMode(dst_texture_id)); + return Err(ClearError::NoValidTextureClearMode( + dst_texture.as_info().id(), + )); } } Ok(()) diff --git a/wgpu-core/src/command/compute.rs b/wgpu-core/src/command/compute.rs index 96fb09daf7..2c8d8a5146 100644 --- a/wgpu-core/src/command/compute.rs +++ b/wgpu-core/src/command/compute.rs @@ -18,7 +18,7 @@ use crate::{ identity::GlobalIdentityHandlerFactory, init_tracker::MemoryInitKind, pipeline, - resource::{self, Buffer, Texture}, + resource::{self}, storage::Storage, track::{Tracker, UsageConflict, UsageScope}, validation::{check_buffer_usage, MissingBufferUsageError}, @@ -306,15 +306,10 @@ impl State { raw_encoder: &mut A::CommandEncoder, base_trackers: &mut Tracker, bind_group_guard: &Storage, id::BindGroupId>, - buffer_guard: &Storage, id::BufferId>, - texture_guard: &Storage, id::TextureId>, indirect_buffer: Option, ) -> Result<(), UsageConflict> { for id in self.binder.list_active() { - unsafe { - self.scope - .merge_bind_group(texture_guard, &bind_group_guard[id].used)? - }; + unsafe { self.scope.merge_bind_group(&bind_group_guard[id].used)? }; // Note: stateless trackers are not merged: the lifetime reference // is held to the bind group itself. } @@ -322,7 +317,6 @@ impl State { for id in self.binder.list_active() { unsafe { base_trackers.set_and_remove_from_usage_scope_sparse( - texture_guard, &mut self.scope, &bind_group_guard[id].used, ) @@ -338,7 +332,7 @@ impl State { log::trace!("Encoding dispatch barriers"); - CommandBuffer::drain_barriers(raw_encoder, base_trackers, buffer_guard, texture_guard); + CommandBuffer::drain_barriers(raw_encoder, base_trackers); Ok(()) } } @@ -511,20 +505,18 @@ impl Global { .map_pass_err(scope)?; buffer_memory_init_actions.extend( - bind_group.used_buffer_ranges.iter().filter_map( - |action| match buffer_guard.get(action.id) { - Ok(buffer) => { - buffer.initialization_status.read().check_action(action) - } - Err(_) => None, - }, - ), + bind_group.used_buffer_ranges.iter().filter_map(|action| { + action + .buffer + .initialization_status + .read() + .check_action(action) + }), ); for action in bind_group.used_texture_ranges.iter() { - pending_discard_init_fixups.extend( - texture_memory_actions.register_init_action(action, &texture_guard), - ); + pending_discard_init_fixups + .extend(texture_memory_actions.register_init_action(action)); } let pipeline_layout = &state.binder.pipeline_layout; @@ -662,14 +654,7 @@ impl Global { state.is_ready().map_pass_err(scope)?; state - .flush_states( - raw, - &mut intermediate_trackers, - &*bind_group_guard, - &*buffer_guard, - &*texture_guard, - None, - ) + .flush_states(raw, &mut intermediate_trackers, &*bind_group_guard, None) .map_pass_err(scope)?; let groups_size_limit = cmd_buf.limits.max_compute_workgroups_per_dimension; @@ -703,7 +688,7 @@ impl Global { .require_downlevel_flags(wgt::DownlevelFlags::INDIRECT_EXECUTION) .map_pass_err(scope)?; - let indirect_buffer: &Buffer = state + let indirect_buffer = state .scope .buffers .merge_single(&*buffer_guard, buffer_id, hal::BufferUses::INDIRECT) @@ -731,7 +716,7 @@ impl Global { buffer_memory_init_actions.extend( indirect_buffer.initialization_status.read().create_action( - buffer_id, + indirect_buffer, offset..(offset + stride), MemoryInitKind::NeedsInitializedMemory, ), @@ -742,8 +727,6 @@ impl Global { raw, &mut intermediate_trackers, &*bind_group_guard, - &*buffer_guard, - &*texture_guard, Some(buffer_id), ) .map_pass_err(scope)?; @@ -849,17 +832,10 @@ impl Global { fixup_discarded_surfaces( pending_discard_init_fixups.into_iter(), transit, - &texture_guard, &mut tracker.textures, device, ); - CommandBuffer::insert_barriers_from_tracker( - transit, - tracker, - &intermediate_trackers, - &*buffer_guard, - &*texture_guard, - ); + CommandBuffer::insert_barriers_from_tracker(transit, tracker, &intermediate_trackers); // Close the command buffer, and swap it with the previous. encoder.close_and_swap(); diff --git a/wgpu-core/src/command/memory_init.rs b/wgpu-core/src/command/memory_init.rs index dd70628479..f10c85e2be 100644 --- a/wgpu-core/src/command/memory_init.rs +++ b/wgpu-core/src/command/memory_init.rs @@ -1,14 +1,12 @@ -use std::{collections::hash_map::Entry, ops::Range, vec::Drain}; +use std::{collections::hash_map::Entry, ops::Range, sync::Arc, vec::Drain}; use hal::CommandEncoder; use crate::{ device::Device, hal_api::HalApi, - id::{self, TextureId}, init_tracker::*, - resource::{Buffer, Texture}, - storage::Storage, + resource::{Resource, Texture}, track::{TextureTracker, Tracker}, FastHashMap, }; @@ -18,31 +16,39 @@ use super::{clear::clear_texture, BakedCommands, DestroyedBufferError, Destroyed /// Surface that was discarded by `StoreOp::Discard` of a preceding renderpass. /// Any read access to this surface needs to be preceded by a texture initialization. #[derive(Clone)] -pub(crate) struct TextureSurfaceDiscard { - pub texture: TextureId, +pub(crate) struct TextureSurfaceDiscard { + pub texture: Arc>, pub mip_level: u32, pub layer: u32, } -pub(crate) type SurfacesInDiscardState = Vec; +pub(crate) type SurfacesInDiscardState = Vec>; -#[derive(Default)] -pub(crate) struct CommandBufferTextureMemoryActions { +pub(crate) struct CommandBufferTextureMemoryActions { /// The tracker actions that we need to be executed before the command /// buffer is executed. - init_actions: Vec, + init_actions: Vec>, /// All the discards that haven't been followed by init again within the /// command buffer i.e. everything in this list resets the texture init /// state *after* the command buffer execution - discards: Vec, + discards: Vec>, } -impl CommandBufferTextureMemoryActions { - pub(crate) fn drain_init_actions(&mut self) -> Drain { +impl Default for CommandBufferTextureMemoryActions { + fn default() -> Self { + Self { + init_actions: Default::default(), + discards: Default::default(), + } + } +} + +impl CommandBufferTextureMemoryActions { + pub(crate) fn drain_init_actions(&mut self) -> Drain> { self.init_actions.drain(..) } - pub(crate) fn discard(&mut self, discard: TextureSurfaceDiscard) { + pub(crate) fn discard(&mut self, discard: TextureSurfaceDiscard) { self.discards.push(discard); } @@ -50,11 +56,10 @@ impl CommandBufferTextureMemoryActions { // Returns previously discarded surface that need to be initialized *immediately* now. // Only returns a non-empty list if action is MemoryInitKind::NeedsInitializedMemory. #[must_use] - pub(crate) fn register_init_action( + pub(crate) fn register_init_action( &mut self, - action: &TextureInitTrackerAction, - texture_guard: &Storage, TextureId>, - ) -> SurfacesInDiscardState { + action: &TextureInitTrackerAction, + ) -> SurfacesInDiscardState { let mut immediately_necessary_clears = SurfacesInDiscardState::new(); // Note that within a command buffer we may stack arbitrary memory init @@ -64,18 +69,20 @@ impl CommandBufferTextureMemoryActions { // We don't need to add MemoryInitKind::NeedsInitializedMemory to // init_actions if a surface is part of the discard list. But that would // mean splitting up the action which is more than we'd win here. - self.init_actions - .extend(match texture_guard.get(action.id) { - Ok(texture) => texture.initialization_status.read().check_action(action), - Err(_) => return immediately_necessary_clears, // texture no longer exists - }); + self.init_actions.extend( + action + .texture + .initialization_status + .read() + .check_action(action), + ); // We expect very few discarded surfaces at any point in time which is // why a simple linear search is likely best. (i.e. most of the time // self.discards is empty!) let init_actions = &mut self.init_actions; self.discards.retain(|discarded_surface| { - if discarded_surface.texture == action.id + if discarded_surface.texture.as_info().id() == action.texture.as_info().id() && action.range.layer_range.contains(&discarded_surface.layer) && action .range @@ -89,7 +96,7 @@ impl CommandBufferTextureMemoryActions { // because it might have been uninitialized prior to // discarding init_actions.push(TextureInitTrackerAction { - id: discarded_surface.texture, + texture: discarded_surface.texture.clone(), range: TextureInitRange { mip_range: discarded_surface.mip_level ..(discarded_surface.mip_level + 1), @@ -109,20 +116,16 @@ impl CommandBufferTextureMemoryActions { // Shortcut for register_init_action when it is known that the action is an // implicit init, not requiring any immediate resource init. - pub(crate) fn register_implicit_init( + pub(crate) fn register_implicit_init( &mut self, - id: TextureId, + texture: &Arc>, range: TextureInitRange, - texture_guard: &Storage, TextureId>, ) { - let must_be_empty = self.register_init_action( - &TextureInitTrackerAction { - id, - range, - kind: MemoryInitKind::ImplicitlyInitialized, - }, - texture_guard, - ); + let must_be_empty = self.register_init_action(&TextureInitTrackerAction { + texture: texture.clone(), + range, + kind: MemoryInitKind::ImplicitlyInitialized, + }); assert!(must_be_empty.is_empty()); } } @@ -133,18 +136,16 @@ impl CommandBufferTextureMemoryActions { // Takes care of barriers as well! pub(crate) fn fixup_discarded_surfaces< A: HalApi, - InitIter: Iterator, + InitIter: Iterator>, >( inits: InitIter, encoder: &mut A::CommandEncoder, - texture_guard: &Storage, TextureId>, texture_tracker: &mut TextureTracker, device: &Device, ) { for init in inits { clear_texture( - texture_guard, - init.texture, + &init.texture, TextureInitRange { mip_range: init.mip_level..(init.mip_level + 1), layer_range: init.layer..(init.layer + 1), @@ -164,17 +165,13 @@ impl BakedCommands { pub(crate) fn initialize_buffer_memory( &mut self, device_tracker: &mut Tracker, - buffer_guard: &Storage, id::BufferId>, ) -> Result<(), DestroyedBufferError> { // Gather init ranges for each buffer so we can collapse them. // It is not possible to do this at an earlier point since previously // executed command buffer change the resource init state. let mut uninitialized_ranges_per_buffer = FastHashMap::default(); for buffer_use in self.buffer_memory_init_actions.drain(..) { - let buffer = buffer_guard - .get(buffer_use.id) - .map_err(|_| DestroyedBufferError(buffer_use.id))?; - let mut initialization_status = buffer.initialization_status.write(); + let mut initialization_status = buffer_use.buffer.initialization_status.write(); // align the end to 4 let end_remainder = buffer_use.range.end % wgt::COPY_BUFFER_ALIGNMENT; @@ -188,21 +185,22 @@ impl BakedCommands { match buffer_use.kind { MemoryInitKind::ImplicitlyInitialized => {} MemoryInitKind::NeedsInitializedMemory => { - match uninitialized_ranges_per_buffer.entry(buffer_use.id) { + match uninitialized_ranges_per_buffer.entry(buffer_use.buffer.as_info().id()) { Entry::Vacant(e) => { - e.insert( + e.insert(( + buffer_use.buffer.clone(), uninitialized_ranges.collect::>>(), - ); + )); } Entry::Occupied(mut e) => { - e.get_mut().extend(uninitialized_ranges); + e.get_mut().1.extend(uninitialized_ranges); } } } } } - for (buffer_id, mut ranges) in uninitialized_ranges_per_buffer { + for (buffer_id, (buffer, mut ranges)) in uninitialized_ranges_per_buffer { // Collapse touching ranges. ranges.sort_by_key(|r| r.start); for i in (1..ranges.len()).rev() { @@ -221,19 +219,16 @@ impl BakedCommands { // must already know about it. let transition = device_tracker .buffers - .set_single(buffer_guard, buffer_id, hal::BufferUses::COPY_DST) + .set_single(&buffer, hal::BufferUses::COPY_DST) .unwrap() .1; - let buffer = buffer_guard - .get(buffer_id) - .map_err(|_| DestroyedBufferError(buffer_id))?; let raw_buf = buffer.raw.as_ref().ok_or(DestroyedBufferError(buffer_id))?; unsafe { self.encoder.transition_buffers( transition - .map(|pending| pending.into_hal(buffer)) + .map(|pending| pending.into_hal(&buffer)) .into_iter(), ); } @@ -269,15 +264,11 @@ impl BakedCommands { pub(crate) fn initialize_texture_memory( &mut self, device_tracker: &mut Tracker, - texture_guard: &Storage, TextureId>, device: &Device, ) -> Result<(), DestroyedTextureError> { let mut ranges: Vec = Vec::new(); for texture_use in self.texture_memory_actions.drain_init_actions() { - let texture = texture_guard - .get(texture_use.id) - .map_err(|_| DestroyedTextureError(texture_use.id))?; - let mut initialization_status = texture.initialization_status.write(); + let mut initialization_status = texture_use.texture.initialization_status.write(); let use_range = texture_use.range; let affected_mip_trackers = initialization_status .mips @@ -307,8 +298,7 @@ impl BakedCommands { // TODO: Could we attempt some range collapsing here? for range in ranges.drain(..) { clear_texture( - texture_guard, - texture_use.id, + &texture_use.texture, range, &mut self.encoder, &mut device_tracker.textures, @@ -323,10 +313,8 @@ impl BakedCommands { // cmdbuf start, we discard init states for textures it left discarded // after its execution. for surface_discard in self.texture_memory_actions.discards.iter() { - let texture = texture_guard - .get(surface_discard.texture) - .map_err(|_| DestroyedTextureError(surface_discard.texture))?; - texture + surface_discard + .texture .initialization_status .write() .discard(surface_discard.mip_level, surface_discard.layer); diff --git a/wgpu-core/src/command/mod.rs b/wgpu-core/src/command/mod.rs index 12c11093f8..9dc8122a03 100644 --- a/wgpu-core/src/command/mod.rs +++ b/wgpu-core/src/command/mod.rs @@ -26,15 +26,7 @@ use crate::id::CommandBufferId; use crate::init_tracker::BufferInitTrackerAction; use crate::resource::{Resource, ResourceInfo}; use crate::track::{Tracker, UsageScope}; -use crate::{ - global::Global, - hal_api::HalApi, - id, - identity::GlobalIdentityHandlerFactory, - resource::{Buffer, Texture}, - storage::Storage, - Label, -}; +use crate::{global::Global, hal_api::HalApi, id, identity::GlobalIdentityHandlerFactory, Label}; use hal::CommandEncoder as _; use parking_lot::Mutex; @@ -104,8 +96,8 @@ pub struct BakedCommands { pub(crate) encoder: A::CommandEncoder, pub(crate) list: Vec, pub(crate) trackers: Tracker, - buffer_memory_init_actions: Vec, - texture_memory_actions: CommandBufferTextureMemoryActions, + buffer_memory_init_actions: Vec>, + texture_memory_actions: CommandBufferTextureMemoryActions, } pub(crate) struct DestroyedBufferError(pub id::BufferId); @@ -115,8 +107,8 @@ pub struct CommandBufferMutable { encoder: CommandEncoder, status: CommandEncoderStatus, pub(crate) trackers: Tracker, - buffer_memory_init_actions: Vec, - texture_memory_actions: CommandBufferTextureMemoryActions, + buffer_memory_init_actions: Vec>, + texture_memory_actions: CommandBufferTextureMemoryActions, pub(crate) pending_query_resets: QueryResetMap, #[cfg(feature = "trace")] pub(crate) commands: Option>, @@ -199,50 +191,33 @@ impl CommandBuffer { raw: &mut A::CommandEncoder, base: &mut Tracker, head: &Tracker, - buffer_guard: &Storage, id::BufferId>, - texture_guard: &Storage, id::TextureId>, ) { profiling::scope!("insert_barriers"); base.buffers.set_from_tracker(&head.buffers); - base.textures - .set_from_tracker(texture_guard, &head.textures); + base.textures.set_from_tracker(&head.textures); - Self::drain_barriers(raw, base, buffer_guard, texture_guard); + Self::drain_barriers(raw, base); } pub(crate) fn insert_barriers_from_scope( raw: &mut A::CommandEncoder, base: &mut Tracker, head: &UsageScope, - buffer_guard: &Storage, id::BufferId>, - texture_guard: &Storage, id::TextureId>, ) { profiling::scope!("insert_barriers"); base.buffers.set_from_usage_scope(&head.buffers); - base.textures - .set_from_usage_scope(texture_guard, &head.textures); + base.textures.set_from_usage_scope(&head.textures); - Self::drain_barriers(raw, base, buffer_guard, texture_guard); + Self::drain_barriers(raw, base); } - pub(crate) fn drain_barriers( - raw: &mut A::CommandEncoder, - base: &mut Tracker, - buffer_guard: &Storage, id::BufferId>, - texture_guard: &Storage, id::TextureId>, - ) { + pub(crate) fn drain_barriers(raw: &mut A::CommandEncoder, base: &mut Tracker) { profiling::scope!("drain_barriers"); - let buffer_barriers = base.buffers.drain().map(|pending| { - let buf = unsafe { buffer_guard.get_unchecked(pending.id) }; - pending.into_hal(buf) - }); - let texture_barriers = base.textures.drain().map(|pending| { - let tex = unsafe { texture_guard.get_unchecked(pending.id) }; - pending.into_hal(tex) - }); + let buffer_barriers = base.buffers.drain_transitions(); + let texture_barriers = base.textures.drain_transitions(); unsafe { raw.transition_buffers(buffer_barriers); @@ -252,13 +227,10 @@ impl CommandBuffer { } impl CommandBuffer { - fn get_encoder( - hub: &Hub, + fn get_encoder( + hub: &Hub, id: id::CommandEncoderId, - ) -> Result, CommandEncoderError> - where - G: GlobalIdentityHandlerFactory, - { + ) -> Result, CommandEncoderError> { let storage = hub.command_buffers.read(); match storage.get(id) { Ok(cmd_buf) => match cmd_buf.data.lock().as_ref().unwrap().status { diff --git a/wgpu-core/src/command/query.rs b/wgpu-core/src/command/query.rs index d3254ab5bc..3d4404b41e 100644 --- a/wgpu-core/src/command/query.rs +++ b/wgpu-core/src/command/query.rs @@ -422,9 +422,12 @@ impl Global { let (dst_buffer, dst_pending) = { let buffer_guard = hub.buffers.read(); + let dst_buffer = buffer_guard + .get(destination) + .map_err(|_| QueryError::InvalidBuffer(destination))?; tracker .buffers - .set_single(&*buffer_guard, destination, hal::BufferUses::COPY_DST) + .set_single(dst_buffer, hal::BufferUses::COPY_DST) .ok_or(QueryError::InvalidBuffer(destination))? }; let dst_barrier = dst_pending.map(|pending| pending.into_hal(&dst_buffer)); @@ -468,7 +471,7 @@ impl Global { // TODO(https://github.com/gfx-rs/wgpu/issues/3993): Need to track initialization state. buffer_memory_init_actions.extend(dst_buffer.initialization_status.read().create_action( - destination, + &dst_buffer, buffer_start_offset..buffer_end_offset, MemoryInitKind::ImplicitlyInitialized, )); diff --git a/wgpu-core/src/command/render.rs b/wgpu-core/src/command/render.rs index 9c30983f11..08dd5806be 100644 --- a/wgpu-core/src/command/render.rs +++ b/wgpu-core/src/command/render.rs @@ -43,6 +43,7 @@ use serde::Deserialize; #[cfg(any(feature = "serial-pass", feature = "trace"))] use serde::Serialize; +use std::sync::Arc; use std::{borrow::Cow, fmt, iter, marker::PhantomData, mem, num::NonZeroU32, ops::Range, str}; use super::{ @@ -683,16 +684,16 @@ where } } -struct RenderAttachment<'a> { - texture_id: &'a id::TextureId, +struct RenderAttachment<'a, A: HalApi> { + texture: Arc>, selector: &'a TextureSelector, usage: hal::TextureUses, } impl TextureView { - fn to_render_attachment(&self, usage: hal::TextureUses) -> RenderAttachment { + fn to_render_attachment(&self, usage: hal::TextureUses) -> RenderAttachment { RenderAttachment { - texture_id: &self.parent_id, + texture: self.parent.as_ref().unwrap().clone(), selector: &self.selector, usage, } @@ -706,13 +707,13 @@ struct RenderPassInfo<'a, A: HalApi> { context: RenderPassContext, usage_scope: UsageScope, /// All render attachments, including depth/stencil - render_attachments: AttachmentDataVec>, + render_attachments: AttachmentDataVec>, is_depth_read_only: bool, is_stencil_read_only: bool, extent: wgt::Extent3d, _phantom: PhantomData, - pending_discard_init_fixups: SurfacesInDiscardState, + pending_discard_init_fixups: SurfacesInDiscardState, divergent_discarded_depth_stencil_aspect: Option<(wgt::TextureAspect, &'a TextureView)>, multiview: Option, } @@ -720,27 +721,24 @@ struct RenderPassInfo<'a, A: HalApi> { impl<'a, A: HalApi> RenderPassInfo<'a, A> { fn add_pass_texture_init_actions( channel: &PassChannel, - texture_memory_actions: &mut CommandBufferTextureMemoryActions, + texture_memory_actions: &mut CommandBufferTextureMemoryActions, view: &TextureView, - texture_guard: &Storage, id::TextureId>, - pending_discard_init_fixups: &mut SurfacesInDiscardState, + pending_discard_init_fixups: &mut SurfacesInDiscardState, ) { if channel.load_op == LoadOp::Load { pending_discard_init_fixups.extend(texture_memory_actions.register_init_action( &TextureInitTrackerAction { - id: view.parent_id, + texture: view.parent.as_ref().unwrap().clone(), range: TextureInitRange::from(view.selector.clone()), // Note that this is needed even if the target is discarded, kind: MemoryInitKind::NeedsInitializedMemory, }, - texture_guard, )); } else if channel.store_op == StoreOp::Store { // Clear + Store texture_memory_actions.register_implicit_init( - view.parent_id, + view.parent.as_ref().unwrap(), TextureInitRange::from(view.selector.clone()), - texture_guard, ); } if channel.store_op == StoreOp::Discard { @@ -748,7 +746,7 @@ impl<'a, A: HalApi> RenderPassInfo<'a, A> { // discard right away be alright since the texture can't be used // during the pass anyways texture_memory_actions.discard(TextureSurfaceDiscard { - texture: view.parent_id, + texture: view.parent.as_ref().unwrap().clone(), mip_level: view.selector.mips.start, layer: view.selector.layers.start, }); @@ -764,7 +762,7 @@ impl<'a, A: HalApi> RenderPassInfo<'a, A> { occlusion_query_set: Option, encoder: &mut CommandEncoder, trackers: &mut Tracker, - texture_memory_actions: &mut CommandBufferTextureMemoryActions, + texture_memory_actions: &mut CommandBufferTextureMemoryActions, pending_query_resets: &mut QueryResetMap, view_guard: &'a Storage, id::TextureViewId>, buffer_guard: &'a Storage, id::BufferId>, @@ -779,7 +777,7 @@ impl<'a, A: HalApi> RenderPassInfo<'a, A> { let mut is_depth_read_only = false; let mut is_stencil_read_only = false; - let mut render_attachments = AttachmentDataVec::::new(); + let mut render_attachments = AttachmentDataVec::>::new(); let mut discarded_surfaces = AttachmentDataVec::new(); let mut pending_discard_init_fixups = SurfacesInDiscardState::new(); let mut divergent_discarded_depth_stencil_aspect = None; @@ -881,7 +879,6 @@ impl<'a, A: HalApi> RenderPassInfo<'a, A> { &at.depth, texture_memory_actions, view, - texture_guard, &mut pending_discard_init_fixups, ); } else if !ds_aspects.contains(hal::FormatAspects::DEPTH) { @@ -889,7 +886,6 @@ impl<'a, A: HalApi> RenderPassInfo<'a, A> { &at.stencil, texture_memory_actions, view, - texture_guard, &mut pending_discard_init_fixups, ); } else { @@ -918,14 +914,11 @@ impl<'a, A: HalApi> RenderPassInfo<'a, A> { at.depth.load_op == LoadOp::Load || at.stencil.load_op == LoadOp::Load; if need_init_beforehand { pending_discard_init_fixups.extend( - texture_memory_actions.register_init_action( - &TextureInitTrackerAction { - id: view.parent_id, - range: TextureInitRange::from(view.selector.clone()), - kind: MemoryInitKind::NeedsInitializedMemory, - }, - texture_guard, - ), + texture_memory_actions.register_init_action(&TextureInitTrackerAction { + texture: view.parent.as_ref().unwrap().clone(), + range: TextureInitRange::from(view.selector.clone()), + kind: MemoryInitKind::NeedsInitializedMemory, + }), ); } @@ -940,9 +933,8 @@ impl<'a, A: HalApi> RenderPassInfo<'a, A> { if at.depth.store_op != at.stencil.store_op { if !need_init_beforehand { texture_memory_actions.register_implicit_init( - view.parent_id, + view.parent.as_ref().unwrap(), TextureInitRange::from(view.selector.clone()), - texture_guard, ); } divergent_discarded_depth_stencil_aspect = Some(( @@ -956,7 +948,7 @@ impl<'a, A: HalApi> RenderPassInfo<'a, A> { } else if at.depth.store_op == StoreOp::Discard { // Both are discarded using the regular path. discarded_surfaces.push(TextureSurfaceDiscard { - texture: view.parent_id, + texture: view.parent.as_ref().unwrap().clone(), mip_level: view.selector.mips.start, layer: view.selector.layers.start, }); @@ -1023,7 +1015,6 @@ impl<'a, A: HalApi> RenderPassInfo<'a, A> { &at.channel, texture_memory_actions, color_view, - texture_guard, &mut pending_discard_init_fixups, ); render_attachments @@ -1083,9 +1074,8 @@ impl<'a, A: HalApi> RenderPassInfo<'a, A> { } texture_memory_actions.register_implicit_init( - resolve_view.parent_id, + resolve_view.parent.as_ref().unwrap(), TextureInitRange::from(resolve_view.selector.clone()), - texture_guard, ); render_attachments .push(resolve_view.to_render_attachment(hal::TextureUses::COLOR_TARGET)); @@ -1198,30 +1188,21 @@ impl<'a, A: HalApi> RenderPassInfo<'a, A> { fn finish( mut self, raw: &mut A::CommandEncoder, - texture_guard: &Storage, id::TextureId>, - ) -> Result<(UsageScope, SurfacesInDiscardState), RenderPassErrorInner> { + ) -> Result<(UsageScope, SurfacesInDiscardState), RenderPassErrorInner> { profiling::scope!("RenderPassInfo::finish"); unsafe { raw.end_render_pass(); } for ra in self.render_attachments { - if !texture_guard.contains(*ra.texture_id) { - return Err(RenderPassErrorInner::SurfaceTextureDropped); - } - let texture = &texture_guard[*ra.texture_id]; + let texture = &ra.texture; check_texture_usage(texture.desc.usage, TextureUsages::RENDER_ATTACHMENT)?; // the tracker set of the pass is always in "extend" mode unsafe { self.usage_scope .textures - .merge_single( - texture_guard, - *ra.texture_id, - Some(ra.selector.clone()), - ra.usage, - ) + .merge_single(texture, Some(ra.selector.clone()), ra.usage) .map_err(UsageConflict::from)? }; } @@ -1436,7 +1417,7 @@ impl Global { // merge the resource tracker in unsafe { info.usage_scope - .merge_bind_group(&*texture_guard, &bind_group.used) + .merge_bind_group(&bind_group.used) .map_pass_err(scope)?; } //Note: stateless trackers are not merged: the lifetime reference @@ -1444,18 +1425,16 @@ impl Global { buffer_memory_init_actions.extend( bind_group.used_buffer_ranges.iter().filter_map(|action| { - match buffer_guard.get(action.id) { - Ok(buffer) => { - buffer.initialization_status.read().check_action(action) - } - Err(_) => None, - } + action + .buffer + .initialization_status + .read() + .check_action(action) }), ); for action in bind_group.used_texture_ranges.iter() { - info.pending_discard_init_fixups.extend( - texture_memory_actions.register_init_action(action, &texture_guard), - ); + info.pending_discard_init_fixups + .extend(texture_memory_actions.register_init_action(action)); } let pipeline_layout = state.binder.pipeline_layout.clone(); @@ -1603,7 +1582,7 @@ impl Global { size, } => { let scope = PassErrorScope::SetIndexBuffer(buffer_id); - let buffer: &Buffer = info + let buffer = info .usage_scope .buffers .merge_single(&*buffer_guard, buffer_id, hal::BufferUses::INDEX) @@ -1627,7 +1606,7 @@ impl Global { buffer_memory_init_actions.extend( buffer.initialization_status.read().create_action( - buffer_id, + buffer, offset..end, MemoryInitKind::NeedsInitializedMemory, ), @@ -1649,7 +1628,7 @@ impl Global { size, } => { let scope = PassErrorScope::SetVertexBuffer(buffer_id); - let buffer: &Buffer = info + let buffer = info .usage_scope .buffers .merge_single(&*buffer_guard, buffer_id, hal::BufferUses::VERTEX) @@ -1678,7 +1657,7 @@ impl Global { buffer_memory_init_actions.extend( buffer.initialization_status.read().create_action( - buffer_id, + buffer, offset..(offset + vertex_state.total_size), MemoryInitKind::NeedsInitializedMemory, ), @@ -1921,7 +1900,7 @@ impl Global { .require_downlevel_flags(wgt::DownlevelFlags::INDIRECT_EXECUTION) .map_pass_err(scope)?; - let indirect_buffer: &Buffer = info + let indirect_buffer = info .usage_scope .buffers .merge_single(&*buffer_guard, buffer_id, hal::BufferUses::INDIRECT) @@ -1949,7 +1928,7 @@ impl Global { buffer_memory_init_actions.extend( indirect_buffer.initialization_status.read().create_action( - buffer_id, + indirect_buffer, offset..end_offset, MemoryInitKind::NeedsInitializedMemory, ), @@ -1991,7 +1970,7 @@ impl Global { .require_downlevel_flags(wgt::DownlevelFlags::INDIRECT_EXECUTION) .map_pass_err(scope)?; - let indirect_buffer: &Buffer = info + let indirect_buffer = info .usage_scope .buffers .merge_single(&*buffer_guard, buffer_id, hal::BufferUses::INDIRECT) @@ -2004,7 +1983,7 @@ impl Global { .ok_or(RenderCommandError::DestroyedBuffer(buffer_id)) .map_pass_err(scope)?; - let count_buffer: &Buffer = info + let count_buffer = info .usage_scope .buffers .merge_single( @@ -2033,7 +2012,7 @@ impl Global { } buffer_memory_init_actions.extend( indirect_buffer.initialization_status.read().create_action( - buffer_id, + indirect_buffer, offset..end_offset, MemoryInitKind::NeedsInitializedMemory, ), @@ -2051,7 +2030,7 @@ impl Global { } buffer_memory_init_actions.extend( count_buffer.initialization_status.read().create_action( - count_buffer_id, + count_buffer, count_buffer_offset..end_count_offset, MemoryInitKind::NeedsInitializedMemory, ), @@ -2224,40 +2203,33 @@ impl Global { bundle .buffer_memory_init_actions .iter() - .filter_map(|action| match buffer_guard.get(action.id) { - Ok(buffer) => { - buffer.initialization_status.read().check_action(action) - } - Err(_) => None, + .filter_map(|action| { + action + .buffer + .initialization_status + .read() + .check_action(action) }), ); for action in bundle.texture_memory_init_actions.iter() { - info.pending_discard_init_fixups.extend( - texture_memory_actions.register_init_action(action, &texture_guard), - ); + info.pending_discard_init_fixups + .extend(texture_memory_actions.register_init_action(action)); } - unsafe { - bundle.execute( - raw, - &*bind_group_guard, - &*render_pipeline_guard, - &*buffer_guard, - ) - } - .map_err(|e| match e { - ExecutionError::DestroyedBuffer(id) => { - RenderCommandError::DestroyedBuffer(id) - } - ExecutionError::Unimplemented(what) => { - RenderCommandError::Unimplemented(what) - } - }) - .map_pass_err(scope)?; + unsafe { bundle.execute(raw) } + .map_err(|e| match e { + ExecutionError::DestroyedBuffer(id) => { + RenderCommandError::DestroyedBuffer(id) + } + ExecutionError::Unimplemented(what) => { + RenderCommandError::Unimplemented(what) + } + }) + .map_pass_err(scope)?; unsafe { info.usage_scope - .merge_render_bundle(&*texture_guard, &bundle.used) + .merge_render_bundle(&bundle.used) .map_pass_err(scope)?; tracker .add_from_render_bundle(&bundle.used) @@ -2270,15 +2242,13 @@ impl Global { log::trace!("Merging renderpass into cmd_buf {:?}", encoder_id); let (trackers, pending_discard_init_fixups) = - info.finish(raw, &*texture_guard).map_pass_err(init_scope)?; + info.finish(raw).map_pass_err(init_scope)?; encoder.close(); (trackers, pending_discard_init_fixups) }; let query_set_guard = hub.query_sets.read(); - let buffer_guard = hub.buffers.read(); - let texture_guard = hub.textures.read(); let cmd_buf = hub.command_buffers.get(encoder_id).unwrap(); let mut cmd_buf_data = cmd_buf.data.lock(); @@ -2294,7 +2264,6 @@ impl Global { fixup_discarded_surfaces( pending_discard_init_fixups.into_iter(), transit, - &texture_guard, &mut tracker.textures, &cmd_buf.device, ); @@ -2309,13 +2278,7 @@ impl Global { .map_err(RenderCommandError::InvalidQuerySet) .map_pass_err(PassErrorScope::QueryReset)?; - super::CommandBuffer::insert_barriers_from_scope( - transit, - tracker, - &scope, - &*buffer_guard, - &*texture_guard, - ); + super::CommandBuffer::insert_barriers_from_scope(transit, tracker, &scope); } *status = CommandEncoderStatus::Recording; diff --git a/wgpu-core/src/command/transfer.rs b/wgpu-core/src/command/transfer.rs index 2a4a790783..4984e3bebf 100644 --- a/wgpu-core/src/command/transfer.rs +++ b/wgpu-core/src/command/transfer.rs @@ -440,14 +440,15 @@ fn handle_texture_init( init_kind: MemoryInitKind, encoder: &mut CommandEncoder, trackers: &mut Tracker, - texture_memory_actions: &mut CommandBufferTextureMemoryActions, + texture_memory_actions: &mut CommandBufferTextureMemoryActions, device: &Device, copy_texture: &ImageCopyTexture, copy_size: &Extent3d, texture_guard: &Storage, TextureId>, ) { + let texture = texture_guard.get(copy_texture.texture).unwrap(); let init_action = TextureInitTrackerAction { - id: copy_texture.texture, + texture: texture.clone(), range: TextureInitRange { mip_range: copy_texture.mip_level..copy_texture.mip_level + 1, layer_range: copy_texture.origin.z @@ -457,16 +458,14 @@ fn handle_texture_init( }; // Register the init action. - let immediate_inits = - texture_memory_actions.register_init_action(&{ init_action }, texture_guard); + let immediate_inits = texture_memory_actions.register_init_action(&{ init_action }); // In rare cases we may need to insert an init operation immediately onto the command buffer. if !immediate_inits.is_empty() { let cmd_buf_raw = encoder.open(); for init in immediate_inits { clear_texture( - texture_guard, - init.texture, + &init.texture, TextureInitRange { mip_range: init.mip_level..(init.mip_level + 1), layer_range: init.layer..(init.layer + 1), @@ -488,7 +487,7 @@ fn handle_texture_init( fn handle_src_texture_init( encoder: &mut CommandEncoder, trackers: &mut Tracker, - texture_memory_actions: &mut CommandBufferTextureMemoryActions, + texture_memory_actions: &mut CommandBufferTextureMemoryActions, device: &Device, source: &ImageCopyTexture, copy_size: &Extent3d, @@ -518,7 +517,7 @@ fn handle_src_texture_init( fn handle_dst_texture_init( encoder: &mut CommandEncoder, trackers: &mut Tracker, - texture_memory_actions: &mut CommandBufferTextureMemoryActions, + texture_memory_actions: &mut CommandBufferTextureMemoryActions, device: &Device, destination: &ImageCopyTexture, copy_size: &Extent3d, @@ -591,10 +590,13 @@ impl Global { let (src_buffer, src_pending) = { let buffer_guard = hub.buffers.read(); + let src_buffer = buffer_guard + .get(source) + .map_err(|_| TransferError::InvalidBuffer(source))?; cmd_buf_data .trackers .buffers - .set_single(&*buffer_guard, source, hal::BufferUses::COPY_SRC) + .set_single(src_buffer, hal::BufferUses::COPY_SRC) .ok_or(TransferError::InvalidBuffer(source))? }; let src_raw = src_buffer @@ -609,10 +611,13 @@ impl Global { let (dst_buffer, dst_pending) = { let buffer_guard = hub.buffers.read(); + let dst_buffer = buffer_guard + .get(destination) + .map_err(|_| TransferError::InvalidBuffer(destination))?; cmd_buf_data .trackers .buffers - .set_single(&*buffer_guard, destination, hal::BufferUses::COPY_DST) + .set_single(dst_buffer, hal::BufferUses::COPY_DST) .ok_or(TransferError::InvalidBuffer(destination))? }; let dst_raw = dst_buffer @@ -683,14 +688,14 @@ impl Global { // Make sure source is initialized memory and mark dest as initialized. cmd_buf_data.buffer_memory_init_actions.extend( dst_buffer.initialization_status.read().create_action( - destination, + &dst_buffer, destination_offset..(destination_offset + size), MemoryInitKind::ImplicitlyInitialized, ), ); cmd_buf_data.buffer_memory_init_actions.extend( src_buffer.initialization_status.read().create_action( - source, + &src_buffer, source_offset..(source_offset + size), MemoryInitKind::NeedsInitializedMemory, ), @@ -775,9 +780,12 @@ impl Global { let (src_buffer, src_pending) = { let buffer_guard = hub.buffers.read(); + let src_buffer = buffer_guard + .get(source.buffer) + .map_err(|_| TransferError::InvalidBuffer(source.buffer))?; tracker .buffers - .set_single(&*buffer_guard, source.buffer, hal::BufferUses::COPY_SRC) + .set_single(src_buffer, hal::BufferUses::COPY_SRC) .ok_or(TransferError::InvalidBuffer(source.buffer))? }; let src_raw = src_buffer @@ -791,12 +799,7 @@ impl Global { let dst_pending = tracker .textures - .set_single( - dst_texture, - destination.texture, - dst_range, - hal::TextureUses::COPY_DST, - ) + .set_single(dst_texture, dst_range, hal::TextureUses::COPY_DST) .ok_or(TransferError::InvalidTexture(destination.texture))?; let dst_raw = dst_texture .inner @@ -840,7 +843,7 @@ impl Global { } buffer_memory_init_actions.extend(src_buffer.initialization_status.read().create_action( - source.buffer, + &src_buffer, source.layout.offset..(source.layout.offset + required_buffer_bytes_in_copy), MemoryInitKind::NeedsInitializedMemory, )); @@ -927,12 +930,7 @@ impl Global { let src_pending = tracker .textures - .set_single( - src_texture, - source.texture, - src_range, - hal::TextureUses::COPY_SRC, - ) + .set_single(src_texture, src_range, hal::TextureUses::COPY_SRC) .ok_or(TransferError::InvalidTexture(source.texture))?; let src_raw = src_texture .inner @@ -960,13 +958,12 @@ impl Global { let (dst_buffer, dst_pending) = { let buffer_guard = hub.buffers.read(); + let dst_buffer = buffer_guard + .get(destination.buffer) + .map_err(|_| TransferError::InvalidBuffer(destination.buffer))?; tracker .buffers - .set_single( - &*buffer_guard, - destination.buffer, - hal::BufferUses::COPY_DST, - ) + .set_single(dst_buffer, hal::BufferUses::COPY_DST) .ok_or(TransferError::InvalidBuffer(destination.buffer))? }; let dst_raw = dst_buffer @@ -1009,7 +1006,7 @@ impl Global { } buffer_memory_init_actions.extend(dst_buffer.initialization_status.read().create_action( - destination.buffer, + &dst_buffer, destination.layout.offset..(destination.layout.offset + required_buffer_bytes_in_copy), MemoryInitKind::ImplicitlyInitialized, )); @@ -1140,12 +1137,7 @@ impl Global { let src_pending = cmd_buf_data .trackers .textures - .set_single( - src_texture, - source.texture, - src_range, - hal::TextureUses::COPY_SRC, - ) + .set_single(src_texture, src_range, hal::TextureUses::COPY_SRC) .ok_or(TransferError::InvalidTexture(source.texture))?; let src_raw = src_texture .inner @@ -1166,12 +1158,7 @@ impl Global { let dst_pending = cmd_buf_data .trackers .textures - .set_single( - dst_texture, - destination.texture, - dst_range, - hal::TextureUses::COPY_DST, - ) + .set_single(dst_texture, dst_range, hal::TextureUses::COPY_DST) .ok_or(TransferError::InvalidTexture(destination.texture))?; let dst_raw = dst_texture .inner diff --git a/wgpu-core/src/device/global.rs b/wgpu-core/src/device/global.rs index 7ef51165d3..1453ea9849 100644 --- a/wgpu-core/src/device/global.rs +++ b/wgpu-core/src/device/global.rs @@ -10,27 +10,21 @@ use crate::{ init_tracker::TextureInitTracker, instance::{self, Adapter, Surface}, pipeline, present, - resource::{self, Buffer, BufferAccessResult, BufferMapState}, + resource::{self, BufferAccessResult}, resource::{BufferAccessError, BufferMapOperation, Resource}, validation::check_buffer_usage, FastHashMap, Label, LabelHelpers as _, }; -use hal::{CommandEncoder as _, Device as _}; +use hal::Device as _; use parking_lot::RwLock; use smallvec::SmallVec; use wgt::{BufferAddress, TextureFormat}; -use std::{ - borrow::Cow, - iter, mem, - ops::Range, - ptr, - sync::{atomic::Ordering, Arc}, -}; +use std::{borrow::Cow, iter, ops::Range, ptr, sync::Arc}; -use super::{BufferMapPendingClosure, ImplicitPipelineIds, InvalidDevice, UserClosures}; +use super::{ImplicitPipelineIds, InvalidDevice, UserClosures}; impl Global { pub fn adapter_is_surface_supported( @@ -136,7 +130,7 @@ impl Global { profiling::scope!("Device::create_buffer"); let hub = A::hub(self); - let fid = hub.buffers.prepare(id_in); + let fid = hub.buffers.prepare::(id_in); let error = loop { let device = match hub.devices.get(device_id) { @@ -152,7 +146,7 @@ impl Global { #[cfg(feature = "trace")] if let Some(ref mut trace) = *device.trace.lock() { let mut desc = desc.clone(); - let mapped_at_creation = mem::replace(&mut desc.mapped_at_creation, false); + let mapped_at_creation = std::mem::replace(&mut desc.mapped_at_creation, false); if mapped_at_creation && !desc.usage.contains(wgt::BufferUsages::MAP_WRITE) { desc.usage |= wgt::BufferUsages::COPY_DST; } @@ -286,7 +280,7 @@ impl Global { /// [`wgpu_types::BufferUsages`]: wgt::BufferUsages pub fn create_buffer_error(&self, id_in: Input, label: Label) { let hub = A::hub(self); - let fid = hub.buffers.prepare(id_in); + let fid = hub.buffers.prepare::(id_in); fid.assign_error(label.borrow_or_default()); } @@ -297,7 +291,7 @@ impl Global { label: Label, ) { let hub = A::hub(self); - let fid = hub.render_bundles.prepare(id_in); + let fid = hub.render_bundles.prepare::(id_in); fid.assign_error(label.borrow_or_default()); } @@ -307,7 +301,7 @@ impl Global { /// See `create_buffer_error` for more context and explaination. pub fn create_texture_error(&self, id_in: Input, label: Label) { let hub = A::hub(self); - let fid = hub.textures.prepare(id_in); + let fid = hub.textures.prepare::(id_in); fid.assign_error(label.borrow_or_default()); } @@ -444,59 +438,14 @@ impl Global { ) -> Result<(), resource::DestroyError> { profiling::scope!("Buffer::destroy"); - let map_closure; - // Restrict the locks to this scope. - { - let hub = A::hub(self); - - //TODO: lock pending writes separately, keep the device read-only - - log::debug!("Buffer {:?} is asked to be dropped", buffer_id); - let buffer = hub - .buffers - .get(buffer_id) - .map_err(|_| resource::DestroyError::Invalid)?; - - let device = &buffer.device; - - map_closure = match &*buffer.map_state.lock() { - &BufferMapState::Waiting(..) // To get the proper callback behavior. - | &BufferMapState::Init { .. } - | &BufferMapState::Active { .. } - => { - self.buffer_unmap_inner(buffer_id, &buffer, device) - .unwrap_or(None) - } - _ => None, - }; - - #[cfg(feature = "trace")] - if let Some(ref mut trace) = *device.trace.lock() { - trace.add(trace::Action::FreeBuffer(buffer_id)); - } - if buffer.raw.is_none() { - return Err(resource::DestroyError::AlreadyDestroyed); - } - - let temp = queue::TempResource::Buffer(buffer.clone()); - let mut pending_writes = device.pending_writes.lock(); - let pending_writes = pending_writes.as_mut().unwrap(); - if pending_writes.dst_buffers.contains_key(&buffer_id) { - pending_writes.temp_resources.push(temp); - } else { - let last_submit_index = buffer.info.submission_index(); - device - .lock_life() - .schedule_resource_destruction(temp, last_submit_index); - } - } - - // Note: outside the scope where locks are held when calling the callback - if let Some((operation, status)) = map_closure { - operation.callback.call(status); - } + let hub = A::hub(self); - Ok(()) + log::debug!("Buffer {:?} is asked to be dropped", buffer_id); + let buffer = hub + .buffers + .get(buffer_id) + .map_err(|_| resource::DestroyError::Invalid)?; + buffer.destroy() } pub fn buffer_drop(&self, buffer_id: id::BufferId, wait: bool) { @@ -505,22 +454,15 @@ impl Global { let hub = A::hub(self); - let (last_submit_index, buffer) = { - let mut buffer_guard = hub.buffers.write(); - match buffer_guard.get(buffer_id) { - Ok(buffer) => { - let last_submit_index = buffer.info.submission_index(); - (last_submit_index, buffer.clone()) - } - Err(_) => { - hub.buffers.unregister_locked(buffer_id, &mut *buffer_guard); - return; - } + if let Some(buffer) = hub.buffers.unregister(buffer_id) { + if buffer.is_unique() { + buffer.destroy().ok(); } - }; - let device = &buffer.device; - { + let last_submit_index = buffer.info.submission_index(); + + let device = buffer.device.clone(); + if device .pending_writes .lock() @@ -529,23 +471,20 @@ impl Global { .dst_buffers .contains_key(&buffer_id) { - device - .lock_life() - .future_suspected_buffers - .push(buffer.clone()); + device.lock_life().future_suspected_buffers.push(buffer); } else { device .lock_life() .suspected_resources .buffers - .insert(buffer_id, buffer.clone()); + .insert(buffer_id, buffer); } - } - if wait { - match device.wait_for_submit(last_submit_index) { - Ok(()) => (), - Err(e) => log::error!("Failed to wait for buffer {:?}: {:?}", buffer_id, e), + if wait { + match device.wait_for_submit(last_submit_index) { + Ok(()) => (), + Err(e) => log::error!("Failed to wait for buffer {:?}: {:?}", buffer_id, e), + } } } } @@ -560,7 +499,7 @@ impl Global { let hub = A::hub(self); - let fid = hub.textures.prepare(id_in); + let fid = hub.textures.prepare::(id_in); let error = loop { let device = match hub.devices.get(device_id) { @@ -608,7 +547,7 @@ impl Global { array_layer_count: Some(1), }, }; - let view = device.create_texture_view(&resource, id, &desc).unwrap(); + let view = device.create_texture_view(&resource, &desc).unwrap(); clear_views.push(Arc::new(view)); } } @@ -648,7 +587,7 @@ impl Global { let hub = A::hub(self); - let fid = hub.textures.prepare(id_in); + let fid = hub.textures.prepare::(id_in); let error = loop { let device = match hub.devices.get(device_id) { @@ -717,7 +656,7 @@ impl Global { profiling::scope!("Device::create_buffer"); let hub = A::hub(self); - let fid = hub.buffers.prepare(id_in); + let fid = hub.buffers.prepare::(id_in); let error = loop { let device_guard = hub.devices.read(); @@ -827,45 +766,34 @@ impl Global { let hub = A::hub(self); - let (last_submit_index, texture) = { - let mut texture_guard = hub.textures.write(); - match texture_guard.get(texture_id) { - Ok(texture) => { - let last_submit_index = texture.info.submission_index(); - (last_submit_index, texture.clone()) - } - Err(_) => { - hub.textures - .unregister_locked(texture_id, &mut *texture_guard); - return; - } - } - }; + if let Some(texture) = hub.textures.unregister(texture_id) { + let last_submit_index = texture.info.submission_index(); - let device = &texture.device; - { - let mut life_lock = device.lock_life(); - if device - .pending_writes - .lock() - .as_ref() - .unwrap() - .dst_textures - .contains_key(&texture_id) + let device = &texture.device; { - life_lock.future_suspected_textures.push(texture.clone()); - } else { - life_lock - .suspected_resources - .textures - .insert(texture_id, texture.clone()); + let mut life_lock = device.lock_life(); + if device + .pending_writes + .lock() + .as_ref() + .unwrap() + .dst_textures + .contains_key(&texture_id) + { + life_lock.future_suspected_textures.push(texture.clone()); + } else { + life_lock + .suspected_resources + .textures + .insert(texture_id, texture.clone()); + } } - } - if wait { - match device.wait_for_submit(last_submit_index) { - Ok(()) => (), - Err(e) => log::error!("Failed to wait for texture {:?}: {:?}", texture_id, e), + if wait { + match device.wait_for_submit(last_submit_index) { + Ok(()) => (), + Err(e) => log::error!("Failed to wait for texture {:?}: {:?}", texture_id, e), + } } } } @@ -880,7 +808,7 @@ impl Global { let hub = A::hub(self); - let fid = hub.texture_views.prepare(id_in); + let fid = hub.texture_views.prepare::(id_in); let error = loop { let texture = match hub.textures.get(texture_id) { @@ -897,7 +825,7 @@ impl Global { }); } - let view = match device.create_texture_view(&texture, texture_id, desc) { + let view = match device.create_texture_view(&texture, desc) { Ok(view) => view, Err(e) => break e, }; @@ -925,36 +853,25 @@ impl Global { let hub = A::hub(self); - let (last_submit_index, view) = { - let mut texture_view_guard = hub.texture_views.write(); - match texture_view_guard.get(texture_view_id) { - Ok(view) => { - let last_submit_index = view.info.submission_index(); - (last_submit_index, view.clone()) - } - Err(_) => { - hub.texture_views - .unregister_locked(texture_view_id, &mut *texture_view_guard); - return Ok(()); + if let Some(view) = hub.texture_views.unregister(texture_view_id) { + let last_submit_index = view.info.submission_index(); + + view.device + .lock_life() + .suspected_resources + .texture_views + .insert(texture_view_id, view.clone()); + + if wait { + match view.device.wait_for_submit(last_submit_index) { + Ok(()) => (), + Err(e) => log::error!( + "Failed to wait for texture view {:?}: {:?}", + texture_view_id, + e + ), } } - }; - - view.device - .lock_life() - .suspected_resources - .texture_views - .insert(texture_view_id, view.clone()); - - if wait { - match view.device.wait_for_submit(last_submit_index) { - Ok(()) => (), - Err(e) => log::error!( - "Failed to wait for texture view {:?}: {:?}", - texture_view_id, - e - ), - } } Ok(()) } @@ -968,7 +885,7 @@ impl Global { profiling::scope!("Device::create_sampler"); let hub = A::hub(self); - let fid = hub.samplers.prepare(id_in); + let fid = hub.samplers.prepare::(id_in); let error = loop { let device = match hub.devices.get(device_id) { @@ -1006,24 +923,14 @@ impl Global { let hub = A::hub(self); - let sampler = { - let mut sampler_guard = hub.samplers.write(); - match sampler_guard.get(sampler_id) { - Ok(sampler) => sampler.clone(), - Err(_) => { - hub.samplers - .unregister_locked(sampler_id, &mut *sampler_guard); - return; - } - } - }; - - sampler - .device - .lock_life() - .suspected_resources - .samplers - .insert(sampler_id, sampler.clone()); + if let Some(sampler) = hub.samplers.unregister(sampler_id) { + sampler + .device + .lock_life() + .suspected_resources + .samplers + .insert(sampler_id, sampler.clone()); + } } pub fn device_create_bind_group_layout( @@ -1038,7 +945,7 @@ impl Global { profiling::scope!("Device::create_bind_group_layout"); let hub = A::hub(self); - let fid = hub.bind_group_layouts.prepare(id_in); + let fid = hub.bind_group_layouts.prepare::(id_in); let error = 'outer: loop { let device = match hub.devices.get(device_id) { @@ -1068,20 +975,9 @@ impl Global { let mut compatible_layout = None; { let bgl_guard = hub.bind_group_layouts.read(); - if let Some((id, layout)) = + if let Some((_id, layout)) = Device::deduplicate_bind_group_layout(device_id, &entry_map, &*bgl_guard) { - // If there is an equivalent BGL, just bump the refcount and return it. - // This is only applicable if ids are generated in wgpu. In practice: - // - wgpu users take this branch and return the existing - // id without using the indirection layer in BindGroupLayout. - // - Other users like gecko or the replay tool use don't take - // the branch and instead rely on the indirection to use the - // proper bind group layout id. - if G::ids_are_generated_in_wgpu() { - return (id, None); - } - compatible_layout = Some(layout.clone()); } } @@ -1116,24 +1012,14 @@ impl Global { let hub = A::hub(self); - let layout = { - let mut bind_group_layout_guard = hub.bind_group_layouts.write(); - match bind_group_layout_guard.get(bind_group_layout_id) { - Ok(layout) => layout.clone(), - Err(_) => { - hub.bind_group_layouts - .unregister_locked(bind_group_layout_id, &mut *bind_group_layout_guard); - return; - } - } - }; - - layout - .device - .lock_life() - .suspected_resources - .bind_group_layouts - .insert(bind_group_layout_id, layout.clone()); + if let Some(layout) = hub.bind_group_layouts.unregister(bind_group_layout_id) { + layout + .device + .lock_life() + .suspected_resources + .bind_group_layouts + .insert(bind_group_layout_id, layout.clone()); + } } pub fn device_create_pipeline_layout( @@ -1148,7 +1034,7 @@ impl Global { profiling::scope!("Device::create_pipeline_layout"); let hub = A::hub(self); - let fid = hub.pipeline_layouts.prepare(id_in); + let fid = hub.pipeline_layouts.prepare::(id_in); let error = loop { let device = match hub.devices.get(device_id) { @@ -1189,25 +1075,14 @@ impl Global { ); let hub = A::hub(self); - - let layout = { - let mut pipeline_layout_guard = hub.pipeline_layouts.write(); - match pipeline_layout_guard.get(pipeline_layout_id) { - Ok(layout) => layout.clone(), - Err(_) => { - hub.pipeline_layouts - .unregister_locked(pipeline_layout_id, &mut *pipeline_layout_guard); - return; - } - } - }; - - layout - .device - .lock_life() - .suspected_resources - .pipeline_layouts - .insert(pipeline_layout_id, layout.clone()); + if let Some(layout) = hub.pipeline_layouts.unregister(pipeline_layout_id) { + layout + .device + .lock_life() + .suspected_resources + .pipeline_layouts + .insert(pipeline_layout_id, layout.clone()); + } } pub fn device_create_bind_group( @@ -1219,7 +1094,7 @@ impl Global { profiling::scope!("Device::create_bind_group"); let hub = A::hub(self); - let fid = hub.bind_groups.prepare(id_in); + let fid = hub.bind_groups.prepare::(id_in); let error = loop { let device = match hub.devices.get(device_id) { @@ -1271,24 +1146,14 @@ impl Global { let hub = A::hub(self); - let bind_group = { - let mut bind_group_guard = hub.bind_groups.write(); - match bind_group_guard.get(bind_group_id) { - Ok(bind_group) => bind_group.clone(), - Err(_) => { - hub.bind_groups - .unregister_locked(bind_group_id, &mut *bind_group_guard); - return; - } - } - }; - - bind_group - .device - .lock_life() - .suspected_resources - .bind_groups - .insert(bind_group_id, bind_group.clone()); + if let Some(bind_group) = hub.bind_groups.unregister(bind_group_id) { + bind_group + .device + .lock_life() + .suspected_resources + .bind_groups + .insert(bind_group_id, bind_group.clone()); + } } pub fn device_create_shader_module( @@ -1304,7 +1169,7 @@ impl Global { profiling::scope!("Device::create_shader_module"); let hub = A::hub(self); - let fid = hub.shader_modules.prepare(id_in); + let fid = hub.shader_modules.prepare::(id_in); let error = loop { let device = match hub.devices.get(device_id) { @@ -1367,7 +1232,7 @@ impl Global { profiling::scope!("Device::create_shader_module"); let hub = A::hub(self); - let fid = hub.shader_modules.prepare(id_in); + let fid = hub.shader_modules.prepare::(id_in); let error = loop { let device = match hub.devices.get(device_id) { @@ -1420,7 +1285,7 @@ impl Global { profiling::scope!("Device::create_command_encoder"); let hub = A::hub(self); - let fid = hub.command_buffers.prepare(id_in); + let fid = hub.command_buffers.prepare::(id_in); let error = loop { let device = match hub.devices.get(device_id) { @@ -1471,10 +1336,11 @@ impl Global { let hub = A::hub(self); - let cmd_buf = hub.command_buffers.unregister(command_encoder_id).unwrap(); - cmd_buf - .device - .untrack(&cmd_buf.data.lock().as_ref().unwrap().trackers); + if let Some(cmd_buf) = hub.command_buffers.unregister(command_encoder_id) { + cmd_buf + .device + .untrack(&cmd_buf.data.lock().as_ref().unwrap().trackers); + } } pub fn command_buffer_drop(&self, command_buffer_id: id::CommandBufferId) { @@ -1512,7 +1378,7 @@ impl Global { let hub = A::hub(self); - let fid = hub.render_bundles.prepare(id_in); + let fid = hub.render_bundles.prepare::(id_in); let error = loop { let device = match hub.devices.get(bundle_encoder.parent()) { @@ -1557,24 +1423,14 @@ impl Global { log::debug!("RenderBundle {:?} is asked to be dropped", render_bundle_id); let hub = A::hub(self); - let bundle = { - let mut bundle_guard = hub.render_bundles.write(); - match bundle_guard.get(render_bundle_id) { - Ok(bundle) => bundle.clone(), - Err(_) => { - hub.render_bundles - .unregister_locked(render_bundle_id, &mut *bundle_guard); - return; - } - } - }; - - bundle - .device - .lock_life() - .suspected_resources - .render_bundles - .insert(render_bundle_id, bundle.clone()); + if let Some(bundle) = hub.render_bundles.unregister(render_bundle_id) { + bundle + .device + .lock_life() + .suspected_resources + .render_bundles + .insert(render_bundle_id, bundle.clone()); + } } pub fn device_create_query_set( @@ -1586,7 +1442,7 @@ impl Global { profiling::scope!("Device::create_query_set"); let hub = A::hub(self); - let fid = hub.query_sets.prepare(id_in); + let fid = hub.query_sets.prepare::(id_in); let error = loop { let device = match hub.devices.get(device_id) { @@ -1626,25 +1482,21 @@ impl Global { log::debug!("QuerySet {:?} is asked to be dropped", query_set_id); let hub = A::hub(self); - let query_set_guard = hub.query_sets.read(); - let query_set = { - let query_set = query_set_guard.get(query_set_id).unwrap(); - query_set - }; + if let Some(query_set) = hub.query_sets.unregister(query_set_id) { + let device = &query_set.device; - let device = &query_set.device; + #[cfg(feature = "trace")] + if let Some(ref mut trace) = *device.trace.lock() { + trace.add(trace::Action::DestroyQuerySet(query_set_id)); + } - #[cfg(feature = "trace")] - if let Some(ref mut trace) = *device.trace.lock() { - trace.add(trace::Action::DestroyQuerySet(query_set_id)); + device + .lock_life() + .suspected_resources + .query_sets + .insert(query_set_id, query_set.clone()); } - - device - .lock_life() - .suspected_resources - .query_sets - .insert(query_set_id, query_set.clone()); } pub fn query_set_label(&self, id: id::QuerySetId) -> String { @@ -1665,7 +1517,7 @@ impl Global { let hub = A::hub(self); - let fid = hub.render_pipelines.prepare(id_in); + let fid = hub.render_pipelines.prepare::(id_in); let implicit_context = implicit_pipeline_ids.map(|ipi| ipi.prepare(hub)); let error = loop { @@ -1723,7 +1575,10 @@ impl Global { Err(_) => break binding_model::GetBindGroupLayoutError::InvalidPipeline, }; let id = match pipeline.layout.bind_group_layouts.get(index as usize) { - Some(id) => id.as_info().id(), + Some(bg) => hub + .bind_group_layouts + .prepare::(id_in) + .assign_existing(bg), None => break binding_model::GetBindGroupLayoutError::InvalidGroupIndex(index), }; @@ -1732,7 +1587,7 @@ impl Global { let id = hub .bind_group_layouts - .prepare(id_in) + .prepare::(id_in) .assign_error(""); (id, Some(error)) } @@ -1749,29 +1604,20 @@ impl Global { ); let hub = A::hub(self); - let (pipeline, layout) = { - let mut pipeline_guard = hub.render_pipelines.write(); - match pipeline_guard.get(render_pipeline_id) { - Ok(pipeline) => (pipeline.clone(), pipeline.layout.clone()), - Err(_) => { - hub.render_pipelines - .unregister_locked(render_pipeline_id, &mut *pipeline_guard); - return; - } - } - }; - let layout_id = layout.as_info().id(); - let device = &pipeline.device; - let mut life_lock = device.lock_life(); - life_lock - .suspected_resources - .render_pipelines - .insert(render_pipeline_id, pipeline.clone()); + if let Some(pipeline) = hub.render_pipelines.unregister(render_pipeline_id) { + let layout_id = pipeline.layout.as_info().id(); + let device = &pipeline.device; + let mut life_lock = device.lock_life(); + life_lock + .suspected_resources + .render_pipelines + .insert(render_pipeline_id, pipeline.clone()); - life_lock - .suspected_resources - .pipeline_layouts - .insert(layout_id, layout); + life_lock + .suspected_resources + .pipeline_layouts + .insert(layout_id, pipeline.layout.clone()); + } } pub fn device_create_compute_pipeline( @@ -1788,7 +1634,7 @@ impl Global { let hub = A::hub(self); - let fid = hub.compute_pipelines.prepare(id_in); + let fid = hub.compute_pipelines.prepare::(id_in); let implicit_context = implicit_pipeline_ids.map(|ipi| ipi.prepare(hub)); let error = loop { @@ -1846,17 +1692,20 @@ impl Global { Err(_) => break binding_model::GetBindGroupLayoutError::InvalidPipeline, }; - let layout = match pipeline.layout.bind_group_layouts.get(index as usize) { - Some(id) => id, + let id = match pipeline.layout.bind_group_layouts.get(index as usize) { + Some(bg) => hub + .bind_group_layouts + .prepare::(id_in) + .assign_existing(bg), None => break binding_model::GetBindGroupLayoutError::InvalidGroupIndex(index), }; - return (layout.as_info().id(), None); + return (id, None); }; let id = hub .bind_group_layouts - .prepare(id_in) + .prepare::(id_in) .assign_error(""); (id, Some(error)) } @@ -1873,28 +1722,19 @@ impl Global { ); let hub = A::hub(self); - let (pipeline, layout) = { - let mut pipeline_guard = hub.compute_pipelines.write(); - match pipeline_guard.get(compute_pipeline_id) { - Ok(pipeline) => (pipeline.clone(), pipeline.layout.clone()), - Err(_) => { - hub.compute_pipelines - .unregister_locked(compute_pipeline_id, &mut *pipeline_guard); - return; - } - } - }; - let device = &pipeline.device; - let mut life_lock = device.lock_life(); - life_lock - .suspected_resources - .compute_pipelines - .insert(compute_pipeline_id, pipeline.clone()); - let layout_id = layout.as_info().id(); - life_lock - .suspected_resources - .pipeline_layouts - .insert(layout_id, layout); + if let Some(pipeline) = hub.compute_pipelines.unregister(compute_pipeline_id) { + let layout_id = pipeline.layout.as_info().id(); + let device = &pipeline.device; + let mut life_lock = device.lock_life(); + life_lock + .suspected_resources + .compute_pipelines + .insert(compute_pipeline_id, pipeline.clone()); + life_lock + .suspected_resources + .pipeline_layouts + .insert(layout_id, pipeline.layout.clone()); + } } pub fn surface_configure( @@ -2405,12 +2245,9 @@ impl Global { { let mut trackers = buffer.device.as_ref().trackers.lock(); - let buffer_guard = hub.buffers.read(); - trackers - .buffers - .set_single(&*buffer_guard, buffer_id, internal_use); + trackers.buffers.set_single(&buffer, internal_use); //TODO: Check if draining ALL buffers is correct! - trackers.buffers.drain(); + let _ = trackers.buffers.drain_transitions(); } buffer @@ -2487,114 +2324,6 @@ impl Global { } } } - - fn buffer_unmap_inner( - &self, - buffer_id: id::BufferId, - buffer: &Arc>, - device: &Device, - ) -> Result, BufferAccessError> { - log::debug!("Buffer {:?} map state -> Idle", buffer_id); - match mem::replace( - &mut *buffer.map_state.lock(), - resource::BufferMapState::Idle, - ) { - resource::BufferMapState::Init { - ptr, - stage_buffer, - needs_flush, - } => { - #[cfg(feature = "trace")] - if let Some(ref mut trace) = *device.trace.lock() { - let data = trace.make_binary("bin", unsafe { - std::slice::from_raw_parts(ptr.as_ptr(), buffer.size as usize) - }); - trace.add(trace::Action::WriteBuffer { - id: buffer_id, - data, - range: 0..buffer.size, - queued: true, - }); - } - let _ = ptr; - if needs_flush { - unsafe { - device - .raw() - .flush_mapped_ranges(stage_buffer.raw(), iter::once(0..buffer.size)); - } - } - - let raw_buf = buffer.raw.as_ref().ok_or(BufferAccessError::Destroyed)?; - - buffer - .info - .use_at(device.active_submission_index.load(Ordering::Relaxed) + 1); - let region = wgt::BufferSize::new(buffer.size).map(|size| hal::BufferCopy { - src_offset: 0, - dst_offset: 0, - size, - }); - let transition_src = hal::BufferBarrier { - buffer: stage_buffer.raw(), - usage: hal::BufferUses::MAP_WRITE..hal::BufferUses::COPY_SRC, - }; - let transition_dst = hal::BufferBarrier { - buffer: raw_buf, - usage: hal::BufferUses::empty()..hal::BufferUses::COPY_DST, - }; - let mut pending_writes = device.pending_writes.lock(); - let pending_writes = pending_writes.as_mut().unwrap(); - let encoder = pending_writes.activate(); - unsafe { - encoder.transition_buffers( - iter::once(transition_src).chain(iter::once(transition_dst)), - ); - if buffer.size > 0 { - encoder.copy_buffer_to_buffer( - stage_buffer.raw(), - raw_buf, - region.into_iter(), - ); - } - } - pending_writes.consume_temp(queue::TempResource::Buffer(stage_buffer)); - pending_writes.dst_buffers.insert(buffer_id, buffer.clone()); - } - resource::BufferMapState::Idle => { - return Err(BufferAccessError::NotMapped); - } - resource::BufferMapState::Waiting(pending) => { - return Ok(Some((pending.op, Err(BufferAccessError::MapAborted)))); - } - resource::BufferMapState::Active { ptr, range, host } => { - if host == HostMap::Write { - #[cfg(feature = "trace")] - if let Some(ref mut trace) = *device.trace.lock() { - let size = range.end - range.start; - let data = trace.make_binary("bin", unsafe { - std::slice::from_raw_parts(ptr.as_ptr(), size as usize) - }); - trace.add(trace::Action::WriteBuffer { - id: buffer_id, - data, - range: range.clone(), - queued: false, - }); - } - let _ = (ptr, range); - } - unsafe { - device - .raw() - .unmap_buffer(buffer.raw()) - .map_err(DeviceError::from)? - }; - } - } - Ok(None) - } - pub fn buffer_unmap(&self, buffer_id: id::BufferId) -> BufferAccessResult { profiling::scope!("unmap", "Buffer"); @@ -2607,9 +2336,8 @@ impl Global { .buffers .get(buffer_id) .map_err(|_| BufferAccessError::Invalid)?; - let device = &buffer.device; - closure = self.buffer_unmap_inner(buffer_id, &buffer, device) + closure = buffer.buffer_unmap_inner() } // Note: outside the scope where locks are held when calling the callback diff --git a/wgpu-core/src/device/life.rs b/wgpu-core/src/device/life.rs index cc9c6b9a02..a3dd69cf66 100644 --- a/wgpu-core/src/device/life.rs +++ b/wgpu-core/src/device/life.rs @@ -10,7 +10,6 @@ use crate::{ hal_api::HalApi, hub::Hub, id::{self}, - identity::GlobalIdentityHandlerFactory, pipeline::{ComputePipeline, RenderPipeline}, resource::{self, Buffer, QuerySet, Resource, Sampler, Texture, TextureView}, track::Tracker, @@ -466,54 +465,52 @@ impl LifetimeTracker { } impl LifetimeTracker { - fn triage_suspected_render_bundles( + fn triage_suspected_render_bundles( &mut self, - hub: &Hub, + hub: &Hub, trackers: &Mutex>, mut f: F, ) -> &mut Self where - G: GlobalIdentityHandlerFactory, F: FnMut(&id::RenderBundleId), { self.suspected_resources .render_bundles - .retain(|bundle_id, bundle| { - let id = bundle.info.id(); + .retain(|&bundle_id, bundle| { let is_removed = { let mut trackers = trackers.lock(); - trackers.bundles.remove_abandoned(id) + trackers + .bundles + .remove_abandoned(bundle_id, hub.render_bundles.contains(bundle_id)) }; if is_removed { - log::info!("Bundle {:?} is removed from registry", id); - f(bundle_id); - - if let Ok(res) = hub.render_bundles.get(id) { - for v in res.used.buffers.used_resources() { - self.suspected_resources - .buffers - .insert(v.as_info().id(), v.clone()); - } - for v in res.used.textures.used_resources() { - self.suspected_resources - .textures - .insert(v.as_info().id(), v.clone()); - } - for v in res.used.bind_groups.used_resources() { - self.suspected_resources - .bind_groups - .insert(v.as_info().id(), v.clone()); - } - for v in res.used.render_pipelines.used_resources() { - self.suspected_resources - .render_pipelines - .insert(v.as_info().id(), v.clone()); - } - for v in res.used.query_sets.used_resources() { - self.suspected_resources - .query_sets - .insert(v.as_info().id(), v.clone()); - } + log::info!("Bundle {:?} is not tracked anymore", bundle_id); + f(&bundle_id); + + for v in bundle.used.buffers.used_resources() { + self.suspected_resources + .buffers + .insert(v.as_info().id(), v.clone()); + } + for v in bundle.used.textures.used_resources() { + self.suspected_resources + .textures + .insert(v.as_info().id(), v.clone()); + } + for v in bundle.used.bind_groups.used_resources() { + self.suspected_resources + .bind_groups + .insert(v.as_info().id(), v.clone()); + } + for v in bundle.used.render_pipelines.used_resources() { + self.suspected_resources + .render_pipelines + .insert(v.as_info().id(), v.clone()); + } + for v in bundle.used.query_sets.used_resources() { + self.suspected_resources + .query_sets + .insert(v.as_info().id(), v.clone()); } } !is_removed @@ -521,266 +518,258 @@ impl LifetimeTracker { self } - fn triage_suspected_bind_groups( + fn triage_suspected_bind_groups( &mut self, - hub: &Hub, + hub: &Hub, trackers: &Mutex>, mut f: F, ) -> &mut Self where - G: GlobalIdentityHandlerFactory, F: FnMut(&id::BindGroupId), { self.suspected_resources .bind_groups - .retain(|bind_group_id, bind_group| { - let id = bind_group.info.id(); + .retain(|&bind_group_id, bind_group| { let is_removed = { let mut trackers = trackers.lock(); - trackers.bind_groups.remove_abandoned(id) + trackers + .bind_groups + .remove_abandoned(bind_group_id, hub.bind_groups.contains(bind_group_id)) }; if is_removed { - log::info!("BindGroup {:?} is removed from registry", id); - f(bind_group_id); - - if let Ok(res) = hub.bind_groups.get(id) { - for v in res.used.buffers.used_resources() { - self.suspected_resources - .buffers - .insert(v.as_info().id(), v.clone()); - } - for v in res.used.textures.used_resources() { - self.suspected_resources - .textures - .insert(v.as_info().id(), v.clone()); - } - for v in res.used.views.used_resources() { - self.suspected_resources - .texture_views - .insert(v.as_info().id(), v.clone()); - } - for v in res.used.samplers.used_resources() { - self.suspected_resources - .samplers - .insert(v.as_info().id(), v.clone()); - } + log::info!("BindGroup {:?} is not tracked anymore", bind_group_id); + f(&bind_group_id); + for v in bind_group.used.buffers.used_resources() { self.suspected_resources - .bind_group_layouts - .insert(res.layout.as_info().id(), res.layout.clone()); - - let submit_index = res.info.submission_index(); - self.active - .iter_mut() - .find(|a| a.index == submit_index) - .map_or(&mut self.free_resources, |a| &mut a.last_resources) - .bind_groups - .push(res); + .buffers + .insert(v.as_info().id(), v.clone()); + } + for v in bind_group.used.textures.used_resources() { + self.suspected_resources + .textures + .insert(v.as_info().id(), v.clone()); } + for v in bind_group.used.views.used_resources() { + self.suspected_resources + .texture_views + .insert(v.as_info().id(), v.clone()); + } + for v in bind_group.used.samplers.used_resources() { + self.suspected_resources + .samplers + .insert(v.as_info().id(), v.clone()); + } + + self.suspected_resources + .bind_group_layouts + .insert(bind_group.layout.as_info().id(), bind_group.layout.clone()); + + let submit_index = bind_group.info.submission_index(); + self.active + .iter_mut() + .find(|a| a.index == submit_index) + .map_or(&mut self.free_resources, |a| &mut a.last_resources) + .bind_groups + .push(bind_group.clone()); } !is_removed }); self } - fn triage_suspected_texture_views( + fn triage_suspected_texture_views( &mut self, - hub: &Hub, + hub: &Hub, trackers: &Mutex>, mut f: F, ) -> &mut Self where - G: GlobalIdentityHandlerFactory, F: FnMut(&id::TextureViewId), { self.suspected_resources .texture_views - .retain(|view_id, view| { - let id = view.info.id(); + .retain(|&view_id, view| { let is_removed = { let mut trackers = trackers.lock(); - trackers.views.remove_abandoned(id) + trackers + .views + .remove_abandoned(view_id, hub.texture_views.contains(view_id)) }; if is_removed { - log::info!("TextureView {:?} is removed from registry", id); - f(view_id); - - if let Ok(res) = hub.texture_views.get(id) { - if let Some(parent_texture) = res.parent.as_ref() { - self.suspected_resources - .textures - .insert(parent_texture.as_info().id(), parent_texture.clone()); - } - let submit_index = res.info.submission_index(); - self.active - .iter_mut() - .find(|a| a.index == submit_index) - .map_or(&mut self.free_resources, |a| &mut a.last_resources) - .texture_views - .push(res); + log::info!("TextureView {:?} is not tracked anymore", view_id); + f(&view_id); + + if let Some(parent_texture) = view.parent.as_ref() { + self.suspected_resources + .textures + .insert(parent_texture.as_info().id(), parent_texture.clone()); } + let submit_index = view.info.submission_index(); + self.active + .iter_mut() + .find(|a| a.index == submit_index) + .map_or(&mut self.free_resources, |a| &mut a.last_resources) + .texture_views + .push(view.clone()); } !is_removed }); self } - fn triage_suspected_textures( + fn triage_suspected_textures( &mut self, - hub: &Hub, + hub: &Hub, trackers: &Mutex>, mut f: F, ) -> &mut Self where - G: GlobalIdentityHandlerFactory, F: FnMut(&id::TextureId), { self.suspected_resources .textures - .retain(|texture_id, texture| { - let id = texture.info.id(); + .retain(|&texture_id, texture| { let is_removed = { let mut trackers = trackers.lock(); - trackers.textures.remove_abandoned(id) + trackers + .textures + .remove_abandoned(texture_id, hub.textures.contains(texture_id)) }; if is_removed { - log::info!("Texture {:?} is removed from registry", id); - f(texture_id); - - if let Ok(res) = hub.textures.get(id) { - let submit_index = res.info.submission_index(); - let non_referenced_resources = self - .active - .iter_mut() - .find(|a| a.index == submit_index) - .map_or(&mut self.free_resources, |a| &mut a.last_resources); - - if let &resource::TextureClearMode::RenderPass { - ref clear_views, .. - } = &*res.clear_mode.read() - { - non_referenced_resources - .texture_views - .extend(clear_views.iter().cloned()); - } - non_referenced_resources.textures.push(res); + log::info!("Texture {:?} is not tracked anymore", texture_id); + f(&texture_id); + + let submit_index = texture.info.submission_index(); + let non_referenced_resources = self + .active + .iter_mut() + .find(|a| a.index == submit_index) + .map_or(&mut self.free_resources, |a| &mut a.last_resources); + + if let &resource::TextureClearMode::RenderPass { + ref clear_views, .. + } = &*texture.clear_mode.read() + { + non_referenced_resources + .texture_views + .extend(clear_views.iter().cloned()); } + non_referenced_resources.textures.push(texture.clone()); } !is_removed }); self } - fn triage_suspected_samplers( + fn triage_suspected_samplers( &mut self, - hub: &Hub, + hub: &Hub, trackers: &Mutex>, mut f: F, ) -> &mut Self where - G: GlobalIdentityHandlerFactory, F: FnMut(&id::SamplerId), { self.suspected_resources .samplers - .retain(|sampler_id, sampler| { - let id = sampler.info.id(); + .retain(|&sampler_id, sampler| { let is_removed = { let mut trackers = trackers.lock(); - trackers.samplers.remove_abandoned(id) + trackers + .samplers + .remove_abandoned(sampler_id, hub.samplers.contains(sampler_id)) }; if is_removed { - log::info!("Sampler {:?} is removed from registry", id); - f(sampler_id); - - if let Ok(res) = hub.samplers.get(id) { - let submit_index = res.info.submission_index(); - self.active - .iter_mut() - .find(|a| a.index == submit_index) - .map_or(&mut self.free_resources, |a| &mut a.last_resources) - .samplers - .push(res); - } + log::info!("Sampler {:?} is not tracked anymore", sampler_id); + f(&sampler_id); + + let submit_index = sampler.info.submission_index(); + self.active + .iter_mut() + .find(|a| a.index == submit_index) + .map_or(&mut self.free_resources, |a| &mut a.last_resources) + .samplers + .push(sampler.clone()); } !is_removed }); self } - fn triage_suspected_buffers( + fn triage_suspected_buffers( &mut self, - hub: &Hub, + hub: &Hub, trackers: &Mutex>, mut f: F, ) -> &mut Self where - G: GlobalIdentityHandlerFactory, F: FnMut(&id::BufferId), { self.suspected_resources .buffers - .retain(|buffer_id, buffer| { - let id = buffer.info.id(); + .retain(|&buffer_id, buffer| { let is_removed = { let mut trackers = trackers.lock(); - trackers.buffers.remove_abandoned(id) + trackers + .buffers + .remove_abandoned(buffer_id, hub.buffers.contains(buffer_id)) }; if is_removed { - log::info!("Buffer {:?} is removed from registry", id); - f(buffer_id); - - if let Ok(res) = hub.buffers.get(id) { - let submit_index = res.info.submission_index(); - if let resource::BufferMapState::Init { - ref stage_buffer, .. - } = *res.map_state.lock() - { - self.free_resources.buffers.push(stage_buffer.clone()); - } - self.active - .iter_mut() - .find(|a| a.index == submit_index) - .map_or(&mut self.free_resources, |a| &mut a.last_resources) - .buffers - .push(res); + log::info!("Buffer {:?} is not tracked anymore", buffer_id); + f(&buffer_id); + + let submit_index = buffer.info.submission_index(); + if let resource::BufferMapState::Init { + ref stage_buffer, .. + } = *buffer.map_state.lock() + { + self.free_resources.buffers.push(stage_buffer.clone()); } + self.active + .iter_mut() + .find(|a| a.index == submit_index) + .map_or(&mut self.free_resources, |a| &mut a.last_resources) + .buffers + .push(buffer.clone()); } !is_removed }); self } - fn triage_suspected_compute_pipelines( + fn triage_suspected_compute_pipelines( &mut self, - hub: &Hub, + hub: &Hub, trackers: &Mutex>, mut f: F, ) -> &mut Self where - G: GlobalIdentityHandlerFactory, F: FnMut(&id::ComputePipelineId), { self.suspected_resources.compute_pipelines.retain( - |compute_pipeline_id, compute_pipeline| { - let id = compute_pipeline.info.id(); + |&compute_pipeline_id, compute_pipeline| { let is_removed = { let mut trackers = trackers.lock(); - trackers.compute_pipelines.remove_abandoned(id) + trackers.compute_pipelines.remove_abandoned( + compute_pipeline_id, + hub.compute_pipelines.contains(compute_pipeline_id), + ) }; if is_removed { - log::info!("ComputePipeline {:?} is removed from registry", id); - f(compute_pipeline_id); - - if let Ok(res) = hub.compute_pipelines.get(id) { - let submit_index = res.info.submission_index(); - self.active - .iter_mut() - .find(|a| a.index == submit_index) - .map_or(&mut self.free_resources, |a| &mut a.last_resources) - .compute_pipes - .push(res); - } + log::info!( + "ComputePipeline {:?} is not tracked anymore", + compute_pipeline_id + ); + f(&compute_pipeline_id); + + let submit_index = compute_pipeline.info.submission_index(); + self.active + .iter_mut() + .find(|a| a.index == submit_index) + .map_or(&mut self.free_resources, |a| &mut a.last_resources) + .compute_pipes + .push(compute_pipeline.clone()); } !is_removed }, @@ -788,69 +777,65 @@ impl LifetimeTracker { self } - fn triage_suspected_render_pipelines( + fn triage_suspected_render_pipelines( &mut self, - hub: &Hub, + hub: &Hub, trackers: &Mutex>, mut f: F, ) -> &mut Self where - G: GlobalIdentityHandlerFactory, F: FnMut(&id::RenderPipelineId), { self.suspected_resources .render_pipelines - .retain(|render_pipeline_id, render_pipeline| { - let id = render_pipeline.info.id(); + .retain(|&render_pipeline_id, render_pipeline| { let is_removed = { let mut trackers = trackers.lock(); - trackers.render_pipelines.remove_abandoned(id) + trackers.render_pipelines.remove_abandoned( + render_pipeline_id, + hub.render_pipelines.contains(render_pipeline_id), + ) }; if is_removed { - log::info!("RenderPipeline {:?} is removed from registry", id); - f(render_pipeline_id); - - if let Ok(res) = hub.render_pipelines.get(id) { - let submit_index = res.info.submission_index(); - self.active - .iter_mut() - .find(|a| a.index == submit_index) - .map_or(&mut self.free_resources, |a| &mut a.last_resources) - .render_pipes - .push(res); - } + log::info!( + "RenderPipeline {:?} is not tracked anymore", + render_pipeline_id + ); + f(&render_pipeline_id); + + let submit_index = render_pipeline.info.submission_index(); + self.active + .iter_mut() + .find(|a| a.index == submit_index) + .map_or(&mut self.free_resources, |a| &mut a.last_resources) + .render_pipes + .push(render_pipeline.clone()); } !is_removed }); self } - fn triage_suspected_pipeline_layouts(&mut self, hub: &Hub, mut f: F) -> &mut Self + fn triage_suspected_pipeline_layouts(&mut self, mut f: F) -> &mut Self where - G: GlobalIdentityHandlerFactory, F: FnMut(&id::PipelineLayoutId), { - let mut pipeline_layouts_locked = hub.pipeline_layouts.write(); self.suspected_resources .pipeline_layouts .retain(|pipeline_layout_id, pipeline_layout| { - let id = pipeline_layout.info.id(); //Note: this has to happen after all the suspected pipelines are destroyed - if pipeline_layouts_locked.is_unique(id).unwrap() { - log::debug!("PipelineLayout {:?} will be removed from registry", id); + if pipeline_layout.is_unique() { f(pipeline_layout_id); - if let Some(lay) = hub - .pipeline_layouts - .unregister_locked(id, &mut *pipeline_layouts_locked) - { - for bgl in &lay.bind_group_layouts { - self.suspected_resources - .bind_group_layouts - .insert(bgl.as_info().id(), bgl.clone()); - } - self.free_resources.pipeline_layouts.push(lay); + for bgl in &pipeline_layout.bind_group_layouts { + self.suspected_resources + .bind_group_layouts + .insert(bgl.as_info().id(), bgl.clone()); } + self.free_resources + .pipeline_layouts + .push(pipeline_layout.clone()); + return false; } true @@ -858,23 +843,19 @@ impl LifetimeTracker { self } - fn triage_suspected_bind_group_layouts(&mut self, hub: &Hub, mut f: F) -> &mut Self + fn triage_suspected_bind_group_layouts(&mut self, mut f: F) -> &mut Self where - G: GlobalIdentityHandlerFactory, F: FnMut(&id::BindGroupLayoutId), { - let mut bind_group_layouts_locked = hub.bind_group_layouts.write(); - self.suspected_resources.bind_group_layouts.retain( |bind_group_layout_id, bind_group_layout| { - let id = bind_group_layout.info.id(); //Note: this has to happen after all the suspected pipelines are destroyed //Note: nothing else can bump the refcount since the guard is locked exclusively //Note: same BGL can appear multiple times in the list, but only the last // encounter could drop the refcount to 0. //Note: this has to happen after all the suspected pipelines are destroyed - if bind_group_layouts_locked.is_unique(id).unwrap() { + if bind_group_layout.is_unique() { // If This layout points to a compatible one, go over the latter // to decrement the ref count and potentially destroy it. //bgl_to_check = bind_group_layout.compatible_layout; @@ -885,12 +866,10 @@ impl LifetimeTracker { ); f(bind_group_layout_id); - if let Some(lay) = hub + self.free_resources .bind_group_layouts - .unregister_locked(*bind_group_layout_id, &mut *bind_group_layouts_locked) - { - self.free_resources.bind_group_layouts.push(lay); - } + .push(bind_group_layout.clone()); + return false; } true @@ -899,35 +878,32 @@ impl LifetimeTracker { self } - fn triage_suspected_query_sets( + fn triage_suspected_query_sets( &mut self, - hub: &Hub, + hub: &Hub, trackers: &Mutex>, - ) -> &mut Self - where - G: GlobalIdentityHandlerFactory, - { + ) -> &mut Self { self.suspected_resources .query_sets - .retain(|_query_set_id, query_set| { - let id = query_set.info.id(); + .retain(|&query_set_id, query_set| { let is_removed = { let mut trackers = trackers.lock(); - trackers.query_sets.remove_abandoned(id) + trackers + .query_sets + .remove_abandoned(query_set_id, hub.query_sets.contains(query_set_id)) }; if is_removed { - log::info!("QuerySet {:?} is removed from registry", id); + log::info!("QuerySet {:?} is not tracked anymore", query_set_id); // #[cfg(feature = "trace")] // trace.map(|t| t.add(trace::Action::DestroyComputePipeline(id))); - if let Ok(res) = hub.query_sets.get(id) { - let submit_index = res.info.submission_index(); - self.active - .iter_mut() - .find(|a| a.index == submit_index) - .map_or(&mut self.free_resources, |a| &mut a.last_resources) - .query_sets - .push(res); - } + + let submit_index = query_set.info.submission_index(); + self.active + .iter_mut() + .find(|a| a.index == submit_index) + .map_or(&mut self.free_resources, |a| &mut a.last_resources) + .query_sets + .push(query_set.clone()); } !is_removed }); @@ -973,9 +949,9 @@ impl LifetimeTracker { /// [`self.active`]: LifetimeTracker::active /// [`triage_submissions`]: LifetimeTracker::triage_submissions /// [`self.free_resources`]: LifetimeTracker::free_resources - pub(crate) fn triage_suspected( + pub(crate) fn triage_suspected( &mut self, - hub: &Hub, + hub: &Hub, trackers: &Mutex>, #[cfg(feature = "trace")] mut trace: Option<&mut trace::Trace>, ) { @@ -1029,13 +1005,13 @@ impl LifetimeTracker { t.add(trace::Action::DestroyRenderPipeline(*_id)); } }); - self.triage_suspected_pipeline_layouts(hub, |_id| { + self.triage_suspected_pipeline_layouts(|_id| { #[cfg(feature = "trace")] if let Some(ref mut t) = trace { t.add(trace::Action::DestroyPipelineLayout(*_id)); } }); - self.triage_suspected_bind_group_layouts(hub, |_id| { + self.triage_suspected_bind_group_layouts(|_id| { #[cfg(feature = "trace")] if let Some(ref mut t) = trace { t.add(trace::Action::DestroyBindGroupLayout(*_id)); @@ -1076,9 +1052,9 @@ impl LifetimeTracker { /// /// See the documentation for [`LifetimeTracker`] for details. #[must_use] - pub(crate) fn handle_mapping( + pub(crate) fn handle_mapping( &mut self, - hub: &Hub, + hub: &Hub, raw: &A::Device, trackers: &Mutex>, ) -> Vec { @@ -1092,14 +1068,14 @@ impl LifetimeTracker { let buffer_id = buffer.info.id(); let is_removed = { let mut trackers = trackers.lock(); - trackers.buffers.remove_abandoned(buffer_id) + trackers + .buffers + .remove_abandoned(buffer_id, hub.buffers.contains(buffer_id)) }; if is_removed { *buffer.map_state.lock() = resource::BufferMapState::Idle; - log::info!("Buffer {:?} is removed from registry", buffer_id); - if let Ok(buf) = hub.buffers.get(buffer_id) { - self.free_resources.buffers.push(buf); - } + log::info!("Buffer {:?} is not tracked anymore", buffer_id); + self.free_resources.buffers.push(buffer.clone()); } else { let mapping = match std::mem::replace( &mut *buffer.map_state.lock(), diff --git a/wgpu-core/src/device/mod.rs b/wgpu-core/src/device/mod.rs index 10807833f9..ee657eaa81 100644 --- a/wgpu-core/src/device/mod.rs +++ b/wgpu-core/src/device/mod.rs @@ -342,13 +342,13 @@ pub struct ImplicitPipelineIds<'a, G: GlobalIdentityHandlerFactory> { } impl ImplicitPipelineIds<'_, G> { - fn prepare(self, hub: &Hub) -> ImplicitPipelineContext { + fn prepare(self, hub: &Hub) -> ImplicitPipelineContext { ImplicitPipelineContext { - root_id: hub.pipeline_layouts.prepare(self.root_id).into_id(), + root_id: hub.pipeline_layouts.prepare::(self.root_id).into_id(), group_ids: self .group_ids .iter() - .map(|id_in| hub.bind_group_layouts.prepare(id_in.clone()).into_id()) + .map(|id_in| hub.bind_group_layouts.prepare::(*id_in).into_id()) .collect(), } } diff --git a/wgpu-core/src/device/queue.rs b/wgpu-core/src/device/queue.rs index cb9d1fb85e..46908b9f27 100644 --- a/wgpu-core/src/device/queue.rs +++ b/wgpu-core/src/device/queue.rs @@ -471,7 +471,7 @@ impl Global { let (staging_buffer, staging_buffer_ptr) = prepare_staging_buffer(device, buffer_size.get())?; - let fid = hub.staging_buffers.prepare(id_in); + let fid = hub.staging_buffers.prepare::(id_in); let (id, _) = fid.assign(staging_buffer); log::info!("Created StagingBuffer {:?}", id); @@ -589,10 +589,13 @@ impl Global { let (dst, transition) = { let buffer_guard = hub.buffers.read(); + let dst = buffer_guard + .get(buffer_id) + .map_err(|_| TransferError::InvalidBuffer(buffer_id))?; let mut trackers = device.trackers.lock(); trackers .buffers - .set_single(&buffer_guard, buffer_id, hal::BufferUses::COPY_DST) + .set_single(dst, hal::BufferUses::COPY_DST) .ok_or(TransferError::InvalidBuffer(buffer_id))? }; let dst_raw = dst @@ -774,10 +777,8 @@ impl Global { .collect::>>() { let mut trackers = device.trackers.lock(); - let texture_guard = hub.textures.read(); crate::command::clear_texture( - &*texture_guard, - destination.texture, + &dst, TextureInitRange { mip_range: destination.mip_level..(destination.mip_level + 1), layer_range, @@ -883,12 +884,7 @@ impl Global { let mut trackers = device.trackers.lock(); let transition = trackers .textures - .set_single( - &dst, - destination.texture, - selector, - hal::TextureUses::COPY_DST, - ) + .set_single(&dst, selector, hal::TextureUses::COPY_DST) .ok_or(TransferError::InvalidTexture(destination.texture))?; unsafe { encoder.transition_textures(transition.map(|pending| pending.into_hal(&dst))); @@ -1245,14 +1241,8 @@ impl Global { } if should_extend { unsafe { - let texture_guard = hub.textures.read(); used_surface_textures - .merge_single( - &*texture_guard, - id, - None, - hal::TextureUses::PRESENT, - ) + .merge_single(texture, None, hal::TextureUses::PRESENT) .unwrap(); }; } @@ -1268,19 +1258,16 @@ impl Global { } } { - let texture_view_guard = hub.texture_views.read(); - let sampler_guard = hub.samplers.read(); - for bg in cmd_buf_trackers.bind_groups.used_resources() { bg.info.use_at(submit_index); // We need to update the submission indices for the contained // state-less (!) resources as well, so that they don't get // deleted too early if the parent bind group goes out of scope. - for sub_id in bg.used.views.used() { - texture_view_guard[sub_id].info.use_at(submit_index); + for view in bg.used.views.used_resources() { + view.info.use_at(submit_index); } - for sub_id in bg.used.samplers.used() { - sampler_guard[sub_id].info.use_at(submit_index); + for sampler in bg.used.samplers.used_resources() { + sampler.info.use_at(submit_index); } if bg.is_unique() { device @@ -1354,15 +1341,14 @@ impl Global { .map_err(DeviceError::from)? }; log::trace!("Stitching command buffer {:?} before submission", cmb_id); - let buffer_guard = hub.buffers.read(); - let texture_guard = hub.textures.read(); + //Note: locking the trackers has to be done after the storages let mut trackers = device.trackers.lock(); baked - .initialize_buffer_memory(&mut *trackers, &*buffer_guard) + .initialize_buffer_memory(&mut *trackers) .map_err(|err| QueueSubmitError::DestroyedBuffer(err.0))?; baked - .initialize_texture_memory(&mut *trackers, &*texture_guard, device) + .initialize_texture_memory(&mut *trackers, device) .map_err(|err| QueueSubmitError::DestroyedTexture(err.0))?; //Note: stateless trackers are not merged: // device already knows these resources exist. @@ -1370,8 +1356,6 @@ impl Global { &mut baked.encoder, &mut *trackers, &baked.trackers, - &*buffer_guard, - &*texture_guard, ); let transit = unsafe { baked.encoder.end_encoding().unwrap() }; @@ -1389,11 +1373,8 @@ impl Global { }; trackers .textures - .set_from_usage_scope(&*texture_guard, &used_surface_textures); - let texture_barriers = trackers.textures.drain().map(|pending| { - let tex = unsafe { texture_guard.get_unchecked(pending.id) }; - pending.into_hal(tex) - }); + .set_from_usage_scope(&used_surface_textures); + let texture_barriers = trackers.textures.drain_transitions(); let present = unsafe { baked.encoder.transition_textures(texture_barriers); baked.encoder.end_encoding().unwrap() @@ -1430,12 +1411,7 @@ impl Global { has_work.store(true, Ordering::Relaxed); unsafe { used_surface_textures - .merge_single( - &*texture_guard, - id, - None, - hal::TextureUses::PRESENT, - ) + .merge_single(texture, None, hal::TextureUses::PRESENT) .unwrap() }; } @@ -1447,11 +1423,8 @@ impl Global { trackers .textures - .set_from_usage_scope(&*texture_guard, &used_surface_textures); - let texture_barriers = trackers.textures.drain().map(|pending| { - let tex = unsafe { texture_guard.get_unchecked(pending.id) }; - pending.into_hal(tex) - }); + .set_from_usage_scope(&used_surface_textures); + let texture_barriers = trackers.textures.drain_transitions(); unsafe { pending_writes diff --git a/wgpu-core/src/device/resource.rs b/wgpu-core/src/device/resource.rs index 9666ba4774..3b8d51cda5 100644 --- a/wgpu-core/src/device/resource.rs +++ b/wgpu-core/src/device/resource.rs @@ -15,16 +15,16 @@ use crate::{ hal_api::HalApi, hub::Hub, id::{self, DeviceId, QueueId}, - identity::GlobalIdentityHandlerFactory, init_tracker::{ BufferInitTracker, BufferInitTrackerAction, MemoryInitKind, TextureInitRange, TextureInitTracker, TextureInitTrackerAction, }, instance::Adapter, pipeline, + registry::Registry, resource::ResourceInfo, resource::{ - self, Buffer, QuerySet, Resource, Sampler, Texture, TextureInner, TextureView, + self, Buffer, QuerySet, Resource, Sampler, Texture, TextureView, TextureViewNotRenderableReason, }, storage::Storage, @@ -284,9 +284,9 @@ impl Device { /// submissions still in flight. (We have to take the locks needed to /// produce this information for other reasons, so we might as well just /// return it to our callers.) - pub(crate) fn maintain<'this, G: GlobalIdentityHandlerFactory>( + pub(crate) fn maintain<'this>( &'this self, - hub: &Hub, + hub: &Hub, fence: &A::Fence, maintain: wgt::Maintain, ) -> Result<(UserClosures, bool), WaitIdleError> { @@ -764,31 +764,34 @@ impl Device { Ok(texture) } - pub(crate) fn create_texture_inner_view( + pub(crate) fn create_texture_view( self: &Arc, - texture: &A::Texture, - texture_id: id::TextureId, - texture_desc: &wgt::TextureDescriptor<(), Vec>, - texture_usage: &hal::TextureUses, - texture_format: &wgt::TextureFormatFeatures, + texture: &Arc>, desc: &resource::TextureViewDescriptor, ) -> Result, resource::CreateTextureViewError> { + let texture_raw = texture + .inner + .as_ref() + .unwrap() + .as_raw() + .ok_or(resource::CreateTextureViewError::InvalidTexture)?; // resolve TextureViewDescriptor defaults // https://gpuweb.github.io/gpuweb/#abstract-opdef-resolving-gputextureviewdescriptor-defaults let resolved_format = desc.format.unwrap_or_else(|| { - texture_desc + texture + .desc .format .aspect_specific_format(desc.range.aspect) - .unwrap_or(texture_desc.format) + .unwrap_or(texture.desc.format) }); let resolved_dimension = desc .dimension - .unwrap_or_else(|| match texture_desc.dimension { + .unwrap_or_else(|| match texture.desc.dimension { wgt::TextureDimension::D1 => wgt::TextureViewDimension::D1, wgt::TextureDimension::D2 => { - if texture_desc.array_layer_count() == 1 { + if texture.desc.array_layer_count() == 1 { wgt::TextureViewDimension::D2 } else { wgt::TextureViewDimension::D2Array @@ -798,7 +801,8 @@ impl Device { }); let resolved_mip_level_count = desc.range.mip_level_count.unwrap_or_else(|| { - texture_desc + texture + .desc .mip_level_count .saturating_sub(desc.range.base_mip_level) }); @@ -812,7 +816,8 @@ impl Device { | wgt::TextureViewDimension::D3 => 1, wgt::TextureViewDimension::Cube => 6, wgt::TextureViewDimension::D2Array | wgt::TextureViewDimension::CubeArray => { - texture_desc + texture + .desc .array_layer_count() .saturating_sub(desc.range.base_array_layer) } @@ -820,32 +825,33 @@ impl Device { // validate TextureViewDescriptor - let aspects = hal::FormatAspects::new(texture_desc.format, desc.range.aspect); + let aspects = hal::FormatAspects::new(texture.desc.format, desc.range.aspect); if aspects.is_empty() { return Err(resource::CreateTextureViewError::InvalidAspect { - texture_format: texture_desc.format, + texture_format: texture.desc.format, requested_aspect: desc.range.aspect, }); } let format_is_good = if desc.range.aspect == wgt::TextureAspect::All { - resolved_format == texture_desc.format - || texture_desc.view_formats.contains(&resolved_format) + resolved_format == texture.desc.format + || texture.desc.view_formats.contains(&resolved_format) } else { Some(resolved_format) - == texture_desc + == texture + .desc .format .aspect_specific_format(desc.range.aspect) }; if !format_is_good { return Err(resource::CreateTextureViewError::FormatReinterpretation { - texture: texture_desc.format, + texture: texture.desc.format, view: resolved_format, }); } // check if multisampled texture is seen as anything but 2D - if texture_desc.sample_count > 1 && resolved_dimension != wgt::TextureViewDimension::D2 { + if texture.desc.sample_count > 1 && resolved_dimension != wgt::TextureViewDimension::D2 { return Err( resource::CreateTextureViewError::InvalidMultisampledTextureViewDimension( resolved_dimension, @@ -854,11 +860,11 @@ impl Device { } // check if the dimension is compatible with the texture - if texture_desc.dimension != resolved_dimension.compatible_texture_dimension() { + if texture.desc.dimension != resolved_dimension.compatible_texture_dimension() { return Err( resource::CreateTextureViewError::InvalidTextureViewDimension { view: resolved_dimension, - texture: texture_desc.dimension, + texture: texture.desc.dimension, }, ); } @@ -895,7 +901,7 @@ impl Device { match resolved_dimension { TextureViewDimension::Cube | TextureViewDimension::CubeArray => { - if texture_desc.size.width != texture_desc.size.height { + if texture.desc.size.width != texture.desc.size.height { return Err(resource::CreateTextureViewError::InvalidCubeTextureViewSize); } } @@ -911,7 +917,7 @@ impl Device { .base_mip_level .saturating_add(resolved_mip_level_count); - let level_end = texture_desc.mip_level_count; + let level_end = texture.desc.mip_level_count; if mip_level_end > level_end { return Err(resource::CreateTextureViewError::TooManyMipLevels { requested: mip_level_end, @@ -928,7 +934,7 @@ impl Device { .base_array_layer .saturating_add(resolved_array_layer_count); - let layer_end = texture_desc.array_layer_count(); + let layer_end = texture.desc.array_layer_count(); if array_layer_end > layer_end { return Err(resource::CreateTextureViewError::TooManyArrayLayers { requested: array_layer_end, @@ -938,11 +944,12 @@ impl Device { // https://gpuweb.github.io/gpuweb/#abstract-opdef-renderable-texture-view let render_extent = 'b: loop { - if !texture_desc + if !texture + .desc .usage .contains(wgt::TextureUsages::RENDER_ATTACHMENT) { - break 'b Err(TextureViewNotRenderableReason::Usage(texture_desc.usage)); + break 'b Err(TextureViewNotRenderableReason::Usage(texture.desc.usage)); } if !(resolved_dimension == TextureViewDimension::D2 @@ -968,11 +975,13 @@ impl Device { )); } - if aspects != hal::FormatAspects::from(texture_desc.format) { + if aspects != hal::FormatAspects::from(texture.desc.format) { break 'b Err(TextureViewNotRenderableReason::Aspects(aspects)); } - break 'b Ok(texture_desc.compute_render_extent(desc.range.base_mip_level)); + break 'b Ok(texture + .desc + .compute_render_extent(desc.range.base_mip_level)); }; // filter the usages based on the other criteria @@ -994,18 +1003,18 @@ impl Device { } else { hal::TextureUses::RESOURCE }; - *texture_usage & mask_copy & mask_dimension & mask_mip_level + texture.hal_usage & mask_copy & mask_dimension & mask_mip_level }; log::debug!( "Create view for texture {:?} filters usages to {:?}", - texture_id, + texture.as_info().id(), usage ); // use the combined depth-stencil format for the view - let format = if resolved_format.is_depth_stencil_component(texture_desc.format) { - texture_desc.format + let format = if resolved_format.is_depth_stencil_component(texture.desc.format) { + texture.desc.format } else { resolved_format }; @@ -1030,7 +1039,7 @@ impl Device { self.raw .as_ref() .unwrap() - .create_texture_view(texture, &hal_desc) + .create_texture_view(texture_raw, &hal_desc) .map_err(|_| resource::CreateTextureViewError::OutOfMemory)? }; @@ -1041,51 +1050,21 @@ impl Device { Ok(TextureView { raw: Some(raw), - parent: None, - parent_id: texture_id, + parent: Some(texture.clone()), device: self.clone(), desc: resource::HalTextureViewDescriptor { format: resolved_format, dimension: resolved_dimension, range: resolved_range, }, - format_features: *texture_format, + format_features: texture.format_features, render_extent, - samples: texture_desc.sample_count, + samples: texture.desc.sample_count, selector, info: ResourceInfo::new(desc.label.borrow_or_default()), }) } - pub(crate) fn create_texture_view( - self: &Arc, - texture: &Arc>, - texture_id: id::TextureId, - desc: &resource::TextureViewDescriptor, - ) -> Result, resource::CreateTextureViewError> { - let texture_raw = texture - .inner - .as_ref() - .unwrap() - .as_raw() - .ok_or(resource::CreateTextureViewError::InvalidTexture)?; - - let mut result = self.create_texture_inner_view( - texture_raw, - texture_id, - &texture.desc, - &texture.hal_usage, - &texture.format_features, - desc, - ); - if let TextureInner::Native { .. } = *texture.inner.as_ref().unwrap() { - if let Ok(ref mut texture_view) = result { - texture_view.parent = Some(texture.clone()); - } - } - result - } - pub(crate) fn create_sampler( self: &Arc, desc: &resource::SamplerDescriptor, @@ -1644,7 +1623,7 @@ impl Device { bb: &binding_model::BufferBinding, binding: u32, decl: &wgt::BindGroupLayoutEntry, - used_buffer_ranges: &mut Vec, + used_buffer_ranges: &mut Vec>, dynamic_binding_info: &mut Vec, late_buffer_binding_sizes: &mut FastHashMap, used: &mut BindGroupStates, @@ -1755,7 +1734,7 @@ impl Device { assert_eq!(bb.offset % wgt::COPY_BUFFER_ALIGNMENT, 0); used_buffer_ranges.extend(buffer.initialization_status.read().create_action( - bb.buffer_id, + buffer, bb.offset..bb.offset + bind_size, MemoryInitKind::NeedsInitializedMemory, )); @@ -1769,29 +1748,28 @@ impl Device { pub(crate) fn create_texture_binding( view: &TextureView, - texture_guard: &Storage, id::TextureId>, internal_use: hal::TextureUses, pub_usage: wgt::TextureUsages, used: &mut BindGroupStates, - used_texture_ranges: &mut Vec, + used_texture_ranges: &mut Vec>, ) -> Result<(), binding_model::CreateBindGroupError> { + let texture_id = view.parent.as_ref().unwrap().as_info().id(); // Careful here: the texture may no longer have its own ref count, // if it was deleted by the user. let texture = used .textures .add_single( - texture_guard, - view.parent_id, + view.parent.as_ref().unwrap(), Some(view.selector.clone()), internal_use, ) .ok_or(binding_model::CreateBindGroupError::InvalidTexture( - view.parent_id, + texture_id, ))?; check_texture_usage(texture.desc.usage, pub_usage)?; used_texture_ranges.push(TextureInitTrackerAction { - id: view.parent_id, + texture: texture.clone(), range: TextureInitRange { mip_range: view.desc.range.mip_range(texture.desc.mip_level_count), layer_range: view @@ -1805,11 +1783,11 @@ impl Device { Ok(()) } - pub(crate) fn create_bind_group( + pub(crate) fn create_bind_group( self: &Arc, layout: &Arc>, desc: &binding_model::BindGroupDescriptor, - hub: &Hub, + hub: &Hub, ) -> Result, binding_model::CreateBindGroupError> { use crate::binding_model::{BindingResource as Br, CreateBindGroupError as Error}; { @@ -1833,7 +1811,6 @@ impl Device { let mut used = BindGroupStates::new(); let buffer_guard = hub.buffers.read(); - let texture_guard = hub.textures.read(); let texture_view_guard = hub.texture_views.read(); let sampler_guard = hub.samplers.read(); @@ -1963,7 +1940,6 @@ impl Device { )?; Self::create_texture_binding( view, - &texture_guard, internal_use, pub_usage, &mut used, @@ -1991,7 +1967,6 @@ impl Device { "SampledTextureArray, ReadonlyStorageTextureArray or WriteonlyStorageTextureArray")?; Self::create_texture_binding( view, - &texture_guard, internal_use, pub_usage, &mut used, @@ -2328,8 +2303,8 @@ impl Device { self: &Arc, implicit_context: Option, mut derived_group_layouts: ArrayVec, - bgl_guard: &mut Storage, id::BindGroupLayoutId>, - pipeline_layout_guard: &mut Storage, id::PipelineLayoutId>, + bgl_registry: &Registry>, + pipeline_layout_registry: &Registry>, ) -> Result { while derived_group_layouts .last() @@ -2349,15 +2324,20 @@ impl Device { } for (bgl_id, map) in ids.group_ids.iter_mut().zip(derived_group_layouts) { - match Device::deduplicate_bind_group_layout(self.info.id(), &map, bgl_guard) { + let bgl = match Device::deduplicate_bind_group_layout( + self.info.id(), + &map, + &bgl_registry.read(), + ) { Some((dedup_id, _)) => { *bgl_id = dedup_id; + None } - None => { - let bgl = self.create_bind_group_layout(None, map)?; - bgl_guard.force_replace(*bgl_id, bgl); - } + None => Some(self.create_bind_group_layout(None, map)?), }; + if let Some(bgl) = bgl { + bgl_registry.force_replace(*bgl_id, bgl); + } } let layout_desc = binding_model::PipelineLayoutDescriptor { @@ -2365,25 +2345,23 @@ impl Device { bind_group_layouts: Cow::Borrowed(&ids.group_ids[..group_count]), push_constant_ranges: Cow::Borrowed(&[]), //TODO? }; - let layout = self.create_pipeline_layout(&layout_desc, bgl_guard)?; - pipeline_layout_guard.force_replace(ids.root_id, layout); + let layout = self.create_pipeline_layout(&layout_desc, &bgl_registry.read())?; + pipeline_layout_registry.force_replace(ids.root_id, layout); Ok(ids.root_id) } - pub(crate) fn create_compute_pipeline( + pub(crate) fn create_compute_pipeline( self: &Arc, desc: &pipeline::ComputePipelineDescriptor, implicit_context: Option, - hub: &Hub, + hub: &Hub, ) -> Result, pipeline::CreateComputePipelineError> { - //TODO: only lock mutable if the layout is derived - let mut pipeline_layout_guard = hub.pipeline_layouts.write(); - let mut bgl_guard = hub.bind_group_layouts.write(); - // This has to be done first, or otherwise the IDs may be pointing to entries // that are not even in the storage. if let Some(ref ids) = implicit_context { + let mut pipeline_layout_guard = hub.pipeline_layouts.write(); pipeline_layout_guard.insert_error(ids.root_id, IMPLICIT_FAILURE); + let mut bgl_guard = hub.bind_group_layouts.write(); for &bgl_id in ids.group_ids.iter() { bgl_guard.insert_error(bgl_id, IMPLICIT_FAILURE); } @@ -2404,11 +2382,11 @@ impl Device { { let flag = wgt::ShaderStages::COMPUTE; + let pipeline_layout_guard = hub.pipeline_layouts.read(); let provided_layouts = match desc.layout { Some(pipeline_layout_id) => Some(Device::get_introspection_bind_group_layouts( pipeline_layout_guard .get(pipeline_layout_id) - .as_ref() .map_err(|_| pipeline::CreateComputePipelineError::InvalidLayout)?, )), None => { @@ -2436,10 +2414,11 @@ impl Device { None => self.derive_pipeline_layout( implicit_context, derived_group_layouts, - &mut *bgl_guard, - &mut *pipeline_layout_guard, + &hub.bind_group_layouts, + &hub.pipeline_layouts, )?, }; + let pipeline_layout_guard = hub.pipeline_layouts.read(); let layout = pipeline_layout_guard .get(pipeline_layout_id) .map_err(|_| pipeline::CreateComputePipelineError::InvalidLayout)?; @@ -2484,22 +2463,21 @@ impl Device { Ok(pipeline) } - pub(crate) fn create_render_pipeline( + pub(crate) fn create_render_pipeline( self: &Arc, adapter: &Adapter, desc: &pipeline::RenderPipelineDescriptor, implicit_context: Option, - hub: &Hub, + hub: &Hub, ) -> Result, pipeline::CreateRenderPipelineError> { use wgt::TextureFormatFeatureFlags as Tfff; - //TODO: only lock mutable if the layout is derived - let mut pipeline_layout_guard = hub.pipeline_layouts.write(); - let mut bgl_guard = hub.bind_group_layouts.write(); - // This has to be done first, or otherwise the IDs may be pointing to entries // that are not even in the storage. if let Some(ref ids) = implicit_context { + //TODO: only lock mutable if the layout is derived + let mut pipeline_layout_guard = hub.pipeline_layouts.write(); + let mut bgl_guard = hub.bind_group_layouts.write(); pipeline_layout_guard.insert_error(ids.root_id, IMPLICIT_FAILURE); for &bgl_id in ids.group_ids.iter() { bgl_guard.insert_error(bgl_id, IMPLICIT_FAILURE); @@ -2762,6 +2740,7 @@ impl Device { } })?; + let pipeline_layout_guard = hub.pipeline_layouts.read(); let provided_layouts = match desc.layout { Some(pipeline_layout_id) => { let pipeline_layout = pipeline_layout_guard @@ -2810,6 +2789,7 @@ impl Device { error: validation::StageError::InvalidModule, })?; + let pipeline_layout_guard = hub.pipeline_layouts.read(); let provided_layouts = match desc.layout { Some(pipeline_layout_id) => Some(Device::get_introspection_bind_group_layouts( pipeline_layout_guard @@ -2889,13 +2869,17 @@ impl Device { None => self.derive_pipeline_layout( implicit_context, derived_group_layouts, - &mut *bgl_guard, - &mut *pipeline_layout_guard, + &hub.bind_group_layouts, + &hub.pipeline_layouts, )?, }; - let layout = pipeline_layout_guard - .get(pipeline_layout_id) - .map_err(|_| pipeline::CreateRenderPipelineError::InvalidLayout)?; + let layout = { + let pipeline_layout_guard = hub.pipeline_layouts.read(); + pipeline_layout_guard + .get(pipeline_layout_id) + .map_err(|_| pipeline::CreateRenderPipelineError::InvalidLayout)? + .clone() + }; // Multiview is only supported if the feature is enabled if desc.multiview.is_some() { @@ -2919,7 +2903,7 @@ impl Device { } let late_sized_buffer_groups = - Device::make_late_sized_buffer_groups(&shader_binding_sizes, layout); + Device::make_late_sized_buffer_groups(&shader_binding_sizes, &layout); let pipeline_desc = hal::RenderPipelineDescriptor { label: desc.label.borrow_option(), diff --git a/wgpu-core/src/global.rs b/wgpu-core/src/global.rs index 3991e4f6f8..2244a089cd 100644 --- a/wgpu-core/src/global.rs +++ b/wgpu-core/src/global.rs @@ -1,4 +1,4 @@ -use std::sync::Arc; +use std::{marker::PhantomData, sync::Arc}; use crate::{ hal_api::HalApi, @@ -6,13 +6,13 @@ use crate::{ id::SurfaceId, identity::GlobalIdentityHandlerFactory, instance::{Instance, Surface}, - registry::Registry, - storage::{Element, StorageReport}, + registry::{Registry, RegistryReport}, + storage::Element, }; #[derive(Debug, PartialEq, Eq)] pub struct GlobalReport { - pub surfaces: StorageReport, + pub surfaces: RegistryReport, #[cfg(all(feature = "vulkan", not(target_arch = "wasm32")))] pub vulkan: Option, #[cfg(all(feature = "metal", any(target_os = "macos", target_os = "ios")))] @@ -25,10 +25,40 @@ pub struct GlobalReport { pub gl: Option, } +impl GlobalReport { + pub fn surfaces(&self) -> &RegistryReport { + &self.surfaces + } + pub fn hub_report(&self) -> &HubReport { + #[cfg(all(feature = "vulkan", not(target_arch = "wasm32")))] + if self.vulkan.is_some() { + return self.vulkan.as_ref().unwrap(); + } + #[cfg(all(feature = "metal", any(target_os = "macos", target_os = "ios")))] + if self.metal.is_some() { + return self.metal.as_ref().unwrap(); + } + #[cfg(all(feature = "dx12", windows))] + if self.dx12.is_some() { + return self.dx12.as_ref().unwrap(); + } + #[cfg(all(feature = "dx11", windows))] + if self.dx11.is_some() { + return self.dx11.as_ref().unwrap(); + } + #[cfg(feature = "gles")] + if self.gl.is_some() { + return self.gl.as_ref().unwrap(); + } + unreachable!(); + } +} + pub struct Global { pub instance: Instance, - pub surfaces: Registry, - pub(crate) hubs: Hubs, + pub surfaces: Registry, + pub(crate) hubs: Hubs, + _phantom: PhantomData, } impl Global { @@ -38,6 +68,7 @@ impl Global { instance: Instance::new(name, instance_desc), surfaces: Registry::without_backend(&factory), hubs: Hubs::new(&factory), + _phantom: PhantomData, } } @@ -54,6 +85,7 @@ impl Global { instance: A::create_instance_from_hal(name, hal_instance), surfaces: Registry::without_backend(&factory), hubs: Hubs::new(&factory), + _phantom: PhantomData, } } @@ -73,6 +105,7 @@ impl Global { instance, surfaces: Registry::without_backend(&factory), hubs: Hubs::new(&factory), + _phantom: PhantomData, } } diff --git a/wgpu-core/src/hal_api.rs b/wgpu-core/src/hal_api.rs index df0acdac75..870557b442 100644 --- a/wgpu-core/src/hal_api.rs +++ b/wgpu-core/src/hal_api.rs @@ -11,7 +11,7 @@ pub trait HalApi: hal::Api + 'static { const VARIANT: Backend; fn create_instance_from_hal(name: &str, hal_instance: Self::Instance) -> Instance; fn instance_as_hal(instance: &Instance) -> Option<&Self::Instance>; - fn hub(global: &Global) -> &Hub; + fn hub(global: &Global) -> &Hub; fn get_surface(surface: &Surface) -> Option<&HalSurface>; } @@ -23,7 +23,7 @@ impl HalApi for hal::api::Empty { fn instance_as_hal(_: &Instance) -> Option<&Self::Instance> { unimplemented!("called empty api") } - fn hub(_: &Global) -> &Hub { + fn hub(_: &Global) -> &Hub { unimplemented!("called empty api") } fn get_surface(_: &Surface) -> Option<&HalSurface> { @@ -44,7 +44,7 @@ impl HalApi for hal::api::Vulkan { fn instance_as_hal(instance: &Instance) -> Option<&Self::Instance> { instance.vulkan.as_ref() } - fn hub(global: &Global) -> &Hub { + fn hub(global: &Global) -> &Hub { &global.hubs.vulkan } fn get_surface(surface: &Surface) -> Option<&HalSurface> { @@ -65,7 +65,7 @@ impl HalApi for hal::api::Metal { fn instance_as_hal(instance: &Instance) -> Option<&Self::Instance> { instance.metal.as_ref() } - fn hub(global: &Global) -> &Hub { + fn hub(global: &Global) -> &Hub { &global.hubs.metal } fn get_surface(surface: &Surface) -> Option<&HalSurface> { @@ -86,7 +86,7 @@ impl HalApi for hal::api::Dx12 { fn instance_as_hal(instance: &Instance) -> Option<&Self::Instance> { instance.dx12.as_ref() } - fn hub(global: &Global) -> &Hub { + fn hub(global: &Global) -> &Hub { &global.hubs.dx12 } fn get_surface(surface: &Surface) -> Option<&HalSurface> { @@ -107,7 +107,7 @@ impl HalApi for hal::api::Dx11 { fn instance_as_hal(instance: &Instance) -> Option<&Self::Instance> { instance.dx11.as_ref() } - fn hub(global: &Global) -> &Hub { + fn hub(global: &Global) -> &Hub { &global.hubs.dx11 } fn get_surface(surface: &Surface) -> Option<&HalSurface> { @@ -129,7 +129,7 @@ impl HalApi for hal::api::Gles { fn instance_as_hal(instance: &Instance) -> Option<&Self::Instance> { instance.gl.as_ref() } - fn hub(global: &Global) -> &Hub { + fn hub(global: &Global) -> &Hub { &global.hubs.gl } fn get_surface(surface: &Surface) -> Option<&HalSurface> { diff --git a/wgpu-core/src/hub.rs b/wgpu-core/src/hub.rs index 568c395ffe..62cdfc2f0e 100644 --- a/wgpu-core/src/hub.rs +++ b/wgpu-core/src/hub.rs @@ -158,31 +158,31 @@ use crate::{ identity::GlobalIdentityHandlerFactory, instance::{Adapter, HalSurface, Surface}, pipeline::{ComputePipeline, RenderPipeline, ShaderModule}, - registry::Registry, + registry::{Registry, RegistryReport}, resource::{Buffer, QuerySet, Sampler, StagingBuffer, Texture, TextureView}, - storage::{Element, Storage, StorageReport}, + storage::{Element, Storage}, }; use std::fmt::Debug; #[derive(Debug, PartialEq, Eq)] pub struct HubReport { - pub adapters: StorageReport, - pub devices: StorageReport, - pub queues: StorageReport, - pub pipeline_layouts: StorageReport, - pub shader_modules: StorageReport, - pub bind_group_layouts: StorageReport, - pub bind_groups: StorageReport, - pub command_buffers: StorageReport, - pub render_bundles: StorageReport, - pub render_pipelines: StorageReport, - pub compute_pipelines: StorageReport, - pub query_sets: StorageReport, - pub buffers: StorageReport, - pub textures: StorageReport, - pub texture_views: StorageReport, - pub samplers: StorageReport, + pub adapters: RegistryReport, + pub devices: RegistryReport, + pub queues: RegistryReport, + pub pipeline_layouts: RegistryReport, + pub shader_modules: RegistryReport, + pub bind_group_layouts: RegistryReport, + pub bind_groups: RegistryReport, + pub command_buffers: RegistryReport, + pub render_bundles: RegistryReport, + pub render_pipelines: RegistryReport, + pub compute_pipelines: RegistryReport, + pub query_sets: RegistryReport, + pub buffers: RegistryReport, + pub textures: RegistryReport, + pub texture_views: RegistryReport, + pub samplers: RegistryReport, } impl HubReport { @@ -216,28 +216,28 @@ impl HubReport { /// /// /// [`A::hub(global)`]: HalApi::hub -pub struct Hub { - pub adapters: Registry, F>, - pub devices: Registry, F>, - pub queues: Registry, F>, - pub pipeline_layouts: Registry, F>, - pub shader_modules: Registry, F>, - pub bind_group_layouts: Registry, F>, - pub bind_groups: Registry, F>, - pub command_buffers: Registry, F>, - pub render_bundles: Registry, F>, - pub render_pipelines: Registry, F>, - pub compute_pipelines: Registry, F>, - pub query_sets: Registry, F>, - pub buffers: Registry, F>, - pub staging_buffers: Registry, F>, - pub textures: Registry, F>, - pub texture_views: Registry, F>, - pub samplers: Registry, F>, +pub struct Hub { + pub adapters: Registry>, + pub devices: Registry>, + pub queues: Registry>, + pub pipeline_layouts: Registry>, + pub shader_modules: Registry>, + pub bind_group_layouts: Registry>, + pub bind_groups: Registry>, + pub command_buffers: Registry>, + pub render_bundles: Registry>, + pub render_pipelines: Registry>, + pub compute_pipelines: Registry>, + pub query_sets: Registry>, + pub buffers: Registry>, + pub staging_buffers: Registry>, + pub textures: Registry>, + pub texture_views: Registry>, + pub samplers: Registry>, } -impl Hub { - fn new(factory: &F) -> Self { +impl Hub { + fn new(factory: &F) -> Self { Self { adapters: Registry::new(A::VARIANT, factory), devices: Registry::new(A::VARIANT, factory), @@ -341,17 +341,17 @@ impl Hub { } } -pub struct Hubs { +pub struct Hubs { #[cfg(all(feature = "vulkan", not(target_arch = "wasm32")))] - pub(crate) vulkan: Hub, + pub(crate) vulkan: Hub, #[cfg(all(feature = "metal", any(target_os = "macos", target_os = "ios")))] - pub(crate) metal: Hub, + pub(crate) metal: Hub, #[cfg(all(feature = "dx12", windows))] - pub(crate) dx12: Hub, + pub(crate) dx12: Hub, #[cfg(all(feature = "dx11", windows))] - pub(crate) dx11: Hub, + pub(crate) dx11: Hub, #[cfg(feature = "gles")] - pub(crate) gl: Hub, + pub(crate) gl: Hub, #[cfg(all( not(all(feature = "vulkan", not(target_arch = "wasm32"))), not(all(feature = "metal", any(target_os = "macos", target_os = "ios"))), @@ -359,11 +359,11 @@ pub struct Hubs { not(all(feature = "dx11", windows)), not(feature = "gles"), ))] - pub(crate) empty: Hub, + pub(crate) empty: Hub, } -impl Hubs { - pub(crate) fn new(factory: &F) -> Self { +impl Hubs { + pub(crate) fn new(factory: &F) -> Self { Self { #[cfg(all(feature = "vulkan", not(target_arch = "wasm32")))] vulkan: Hub::new(factory), diff --git a/wgpu-core/src/id.rs b/wgpu-core/src/id.rs index 5d33309096..116fb00fa2 100644 --- a/wgpu-core/src/id.rs +++ b/wgpu-core/src/id.rs @@ -1,5 +1,10 @@ use crate::{Epoch, Index}; -use std::{cmp::Ordering, fmt, marker::PhantomData}; +use std::{ + any::Any, + cmp::Ordering, + fmt::{self, Debug}, + marker::PhantomData, +}; use wgt::Backend; #[cfg(feature = "id32")] @@ -66,7 +71,7 @@ type Dummy = hal::api::Empty; all(feature = "serde", not(feature = "replay")), derive(serde::Deserialize) )] -pub struct Id(NonZeroId, PhantomData); +pub struct Id(NonZeroId, PhantomData); // This type represents Id in a more readable (and editable) way. #[allow(dead_code)] @@ -77,7 +82,7 @@ enum SerialId { Id(Index, Epoch, Backend), } #[cfg(feature = "trace")] -impl From> for SerialId { +impl From> for SerialId { fn from(id: Id) -> Self { let (index, epoch, backend) = id.unzip(); Self::Id(index, epoch, backend) @@ -131,7 +136,7 @@ impl Clone for Id { } } -impl fmt::Debug for Id { +impl Debug for Id { fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { self.unzip().fmt(formatter) } @@ -168,14 +173,14 @@ impl Ord for Id { /// Most `wgpu-core` clients should not use this trait. Unusual clients that /// need to construct `Id` values directly, or access their components, like the /// WGPU recording player, may use this trait to do so. -pub trait TypedId: Copy + std::fmt::Debug { +pub trait TypedId: Copy + Debug + Any { fn zip(index: Index, epoch: Epoch, backend: Backend) -> Self; fn unzip(self) -> (Index, Epoch, Backend); fn into_raw(self) -> NonZeroId; } #[allow(trivial_numeric_casts)] -impl TypedId for Id { +impl TypedId for Id { fn zip(index: Index, epoch: Epoch, backend: Backend) -> Self { assert_eq!(0, epoch >> EPOCH_BITS); assert_eq!(0, (index as IdType) >> INDEX_BITS); diff --git a/wgpu-core/src/identity.rs b/wgpu-core/src/identity.rs index fe10bedb0e..54facc7410 100644 --- a/wgpu-core/src/identity.rs +++ b/wgpu-core/src/identity.rs @@ -2,7 +2,7 @@ use parking_lot::Mutex; use wgt::Backend; use crate::{id, Epoch, Index}; -use std::fmt::Debug; +use std::{fmt::Debug, marker::PhantomData, sync::Arc}; /// A simple structure to allocate [`Id`] identifiers. /// @@ -32,7 +32,7 @@ use std::fmt::Debug; /// [`alloc`]: IdentityManager::alloc /// [`free`]: IdentityManager::free #[derive(Debug, Default)] -pub struct IdentityManager { +pub(super) struct IdentityValues { /// Available index values. If empty, then `epochs.len()` is the next index /// to allocate. free: Vec, @@ -45,14 +45,17 @@ pub struct IdentityManager { /// If index `i` is currently unused, `epochs[i]` is the epoch to use in its /// next `Id`. epochs: Vec, + + count: usize, } -impl IdentityManager { +impl IdentityValues { /// Allocate a fresh, never-before-seen id with the given `backend`. /// /// The backend is incorporated into the id, so that ids allocated with /// different `backend` values are always distinct. pub fn alloc(&mut self, backend: Backend) -> I { + self.count += 1; match self.free.pop() { Some(index) => I::zip(index, self.epochs[index as usize], backend), None => { @@ -65,7 +68,7 @@ impl IdentityManager { } /// Free `id`. It will never be returned from `alloc` again. - pub fn free(&mut self, id: I) { + pub fn release(&mut self, id: I) { let (index, epoch, _backend) = id.unzip(); let pe = &mut self.epochs[index as usize]; assert_eq!(*pe, epoch); @@ -74,56 +77,51 @@ impl IdentityManager { if epoch < id::EPOCH_MASK { *pe = epoch + 1; self.free.push(index); + self.count -= 1; } } -} - -/// A type that can build true ids from proto-ids, and free true ids. -/// -/// For some implementations, the true id is based on the proto-id. -/// The caller is responsible for providing well-allocated proto-ids. -/// -/// For other implementations, the proto-id carries no information -/// (it's `()`, say), and this `IdentityHandler` type takes care of -/// allocating a fresh true id. -/// -/// See the module-level documentation for details. -pub trait IdentityHandler: Debug { - /// The type of proto-id consumed by this filter, to produce a true id. - type Input: Clone + Debug; - /// Given a proto-id value `id`, return a true id for `backend`. - fn process(&self, id: Self::Input, backend: Backend) -> I; + pub fn count(&self) -> usize { + self.count + } +} - /// Free the true id `id`. - fn free(&self, id: I); +#[derive(Debug)] +pub struct IdentityManager { + pub(super) values: Mutex, + _phantom: PhantomData, } -impl IdentityHandler for Mutex { - type Input = (); - fn process(&self, _id: Self::Input, backend: Backend) -> I { - self.lock().alloc(backend) +impl IdentityManager { + pub fn process(&self, backend: Backend) -> I { + self.values.lock().alloc(backend) + } + pub fn free(&self, id: I) { + self.values.lock().release(id) } - fn free(&self, id: I) { - self.lock().free(id) +} + +impl IdentityManager { + pub fn new() -> Self { + Self { + values: Mutex::new(IdentityValues::default()), + _phantom: PhantomData, + } } } /// A type that can produce [`IdentityHandler`] filters for ids of type `I`. /// /// See the module-level documentation for details. -pub trait IdentityHandlerFactory { - /// The type of filter this factory constructs. - /// - /// "Filter" and "handler" seem to both mean the same thing here: - /// something that can produce true ids from proto-ids. - type Filter: IdentityHandler; - +pub trait IdentityHandlerFactory { + type Input: Copy; /// Create an [`IdentityHandler`] implementation that can /// transform proto-ids into ids of type `I`. /// /// [`IdentityHandler`]: IdentityHandler - fn spawn(&self) -> Self::Filter; + fn spawn(&self) -> Option>>; + + fn input_to_id(id_in: Self::Input) -> I; } /// A global identity handler factory based on [`IdentityManager`]. @@ -134,10 +132,15 @@ pub trait IdentityHandlerFactory { #[derive(Debug)] pub struct IdentityManagerFactory; -impl IdentityHandlerFactory for IdentityManagerFactory { - type Filter = Mutex; - fn spawn(&self) -> Self::Filter { - Mutex::new(IdentityManager::default()) +impl IdentityHandlerFactory for IdentityManagerFactory { + type Input = (); + + fn spawn(&self) -> Option>> { + Some(Arc::new(IdentityManager::new())) + } + + fn input_to_id(_id_in: Self::Input) -> I { + unreachable!("It should not be called") } } @@ -162,27 +165,22 @@ pub trait GlobalIdentityHandlerFactory: + IdentityHandlerFactory + IdentityHandlerFactory { - fn ids_are_generated_in_wgpu() -> bool; } -impl GlobalIdentityHandlerFactory for IdentityManagerFactory { - fn ids_are_generated_in_wgpu() -> bool { - true - } -} +impl GlobalIdentityHandlerFactory for IdentityManagerFactory {} -pub type Input = <>::Filter as IdentityHandler>::Input; +pub type Input = >::Input; #[test] fn test_epoch_end_of_life() { use id::TypedId as _; - let mut man = IdentityManager::default(); - man.epochs.push(id::EPOCH_MASK); - man.free.push(0); - let id1 = man.alloc::(Backend::Empty); + let man = IdentityManager::::new(); + man.values.lock().epochs.push(id::EPOCH_MASK); + man.values.lock().free.push(0); + let id1 = man.values.lock().alloc::(Backend::Empty); assert_eq!(id1.unzip().0, 0); - man.free(id1); - let id2 = man.alloc::(Backend::Empty); + man.values.lock().release(id1); + let id2 = man.values.lock().alloc::(Backend::Empty); // confirm that the index 0 is no longer re-used assert_eq!(id2.unzip().0, 1); } diff --git a/wgpu-core/src/init_tracker/buffer.rs b/wgpu-core/src/init_tracker/buffer.rs index ea9b9f6a8d..2c0fa8d372 100644 --- a/wgpu-core/src/init_tracker/buffer.rs +++ b/wgpu-core/src/init_tracker/buffer.rs @@ -1,10 +1,10 @@ use super::{InitTracker, MemoryInitKind}; -use crate::id::BufferId; -use std::ops::Range; +use crate::{hal_api::HalApi, resource::Buffer}; +use std::{ops::Range, sync::Arc}; #[derive(Debug, Clone)] -pub(crate) struct BufferInitTrackerAction { - pub id: BufferId, +pub(crate) struct BufferInitTrackerAction { + pub buffer: Arc>, pub range: Range, pub kind: MemoryInitKind, } @@ -14,22 +14,26 @@ pub(crate) type BufferInitTracker = InitTracker; impl BufferInitTracker { /// Checks if an action has/requires any effect on the initialization status /// and shrinks its range if possible. - pub(crate) fn check_action( + pub(crate) fn check_action( &self, - action: &BufferInitTrackerAction, - ) -> Option { - self.create_action(action.id, action.range.clone(), action.kind) + action: &BufferInitTrackerAction, + ) -> Option> { + self.create_action(&action.buffer, action.range.clone(), action.kind) } /// Creates an action if it would have any effect on the initialization /// status and shrinks the range if possible. - pub(crate) fn create_action( + pub(crate) fn create_action( &self, - id: BufferId, + buffer: &Arc>, query_range: Range, kind: MemoryInitKind, - ) -> Option { + ) -> Option> { self.check(query_range) - .map(|range| BufferInitTrackerAction { id, range, kind }) + .map(|range| BufferInitTrackerAction { + buffer: buffer.clone(), + range, + kind, + }) } } diff --git a/wgpu-core/src/init_tracker/texture.rs b/wgpu-core/src/init_tracker/texture.rs index 17368e1014..a859b5f784 100644 --- a/wgpu-core/src/init_tracker/texture.rs +++ b/wgpu-core/src/init_tracker/texture.rs @@ -1,7 +1,7 @@ use super::{InitTracker, MemoryInitKind}; -use crate::{id::TextureId, track::TextureSelector}; +use crate::{hal_api::HalApi, resource::Texture, track::TextureSelector}; use arrayvec::ArrayVec; -use std::ops::Range; +use std::{ops::Range, sync::Arc}; #[derive(Debug, Clone)] pub(crate) struct TextureInitRange { @@ -35,8 +35,8 @@ impl From for TextureInitRange { } #[derive(Debug, Clone)] -pub(crate) struct TextureInitTrackerAction { - pub(crate) id: TextureId, +pub(crate) struct TextureInitTrackerAction { + pub(crate) texture: Arc>, pub(crate) range: TextureInitRange, pub(crate) kind: MemoryInitKind, } @@ -57,10 +57,10 @@ impl TextureInitTracker { } } - pub(crate) fn check_action( + pub(crate) fn check_action( &self, - action: &TextureInitTrackerAction, - ) -> Option { + action: &TextureInitTrackerAction, + ) -> Option> { let mut mip_range_start = std::usize::MAX; let mut mip_range_end = std::usize::MIN; let mut layer_range_start = std::u32::MAX; @@ -85,7 +85,7 @@ impl TextureInitTracker { if mip_range_start < mip_range_end && layer_range_start < layer_range_end { Some(TextureInitTrackerAction { - id: action.id, + texture: action.texture.clone(), range: TextureInitRange { mip_range: mip_range_start as u32..mip_range_end as u32, layer_range: layer_range_start..layer_range_end, diff --git a/wgpu-core/src/instance.rs b/wgpu-core/src/instance.rs index 91c85727bc..18fa39ba2f 100644 --- a/wgpu-core/src/instance.rs +++ b/wgpu-core/src/instance.rs @@ -428,10 +428,10 @@ pub enum AdapterInputs<'a, I> { Mask(Backends, fn(Backend) -> I), } -impl AdapterInputs<'_, I> { +impl AdapterInputs<'_, I> { fn find(&self, b: Backend) -> Option { match *self { - Self::IdSet(ids, ref fun) => ids.iter().find(|id| fun(id) == b).cloned(), + Self::IdSet(ids, ref fun) => ids.iter().find(|id| fun(id) == b).copied(), Self::Mask(bits, ref fun) => { if bits.contains(b.into()) { Some(fun(b)) @@ -530,7 +530,7 @@ impl Global { raw: hal_surface.unwrap(), }; - let (id, _) = self.surfaces.prepare(id_in).assign(surface); + let (id, _) = self.surfaces.prepare::(id_in).assign(surface); id } @@ -565,7 +565,7 @@ impl Global { }, }; - let (id, _) = self.surfaces.prepare(id_in).assign(surface); + let (id, _) = self.surfaces.prepare::(id_in).assign(surface); id } @@ -601,7 +601,7 @@ impl Global { }, }; - let (id, _) = self.surfaces.prepare(id_in).assign(surface); + let (id, _) = self.surfaces.prepare::(id_in).assign(surface); Ok(id) } @@ -637,7 +637,7 @@ impl Global { }, }; - let (id, _) = self.surfaces.prepare(id_in).assign(surface); + let (id, _) = self.surfaces.prepare::(id_in).assign(surface); Ok(id) } @@ -668,7 +668,7 @@ impl Global { }, }; - let (id, _) = self.surfaces.prepare(id_in).assign(surface); + let (id, _) = self.surfaces.prepare::(id_in).assign(surface); id } @@ -701,7 +701,7 @@ impl Global { }, }; - let (id, _) = self.surfaces.prepare(id_in).assign(surface); + let (id, _) = self.surfaces.prepare::(id_in).assign(surface); id } @@ -765,7 +765,7 @@ impl Global { for raw in hal_adapters { let adapter = Adapter::new(raw); log::info!("Adapter {:?} {:?}", A::VARIANT, adapter.raw.info); - let (id, _) = hub.adapters.prepare(id_backend.clone()).assign(adapter); + let (id, _) = hub.adapters.prepare::(id_backend).assign(adapter); list.push(id); } } @@ -815,7 +815,7 @@ impl Global { log::info!("Adapter {:?} {:?}", A::VARIANT, adapter.raw.info); let (id, _) = HalApi::hub(self) .adapters - .prepare(new_id.unwrap()) + .prepare::(new_id.unwrap()) .assign(adapter); Some(id) } @@ -829,7 +829,7 @@ impl Global { ) -> Result { profiling::scope!("Instance::pick_adapter"); - fn gather( + fn gather( _: A, instance: Option<&A::Instance>, inputs: &AdapterInputs, @@ -1007,7 +1007,7 @@ impl Global { ) -> AdapterId { profiling::scope!("Instance::create_adapter_from_hal"); - let fid = A::hub(self).adapters.prepare(input); + let fid = A::hub(self).adapters.prepare::(input); let (id, _adapter): (crate::id::Id>, Arc>) = match A::VARIANT { @@ -1128,8 +1128,8 @@ impl Global { profiling::scope!("Adapter::request_device"); let hub = A::hub(self); - let device_fid = hub.devices.prepare(device_id_in); - let queue_fid = hub.queues.prepare(queue_id_in); + let device_fid = hub.devices.prepare::(device_id_in); + let queue_fid = hub.queues.prepare::(queue_id_in); let error = loop { let adapter = match hub.adapters.get(adapter_id) { @@ -1175,8 +1175,8 @@ impl Global { profiling::scope!("Global::create_device_from_hal"); let hub = A::hub(self); - let devices_fid = hub.devices.prepare(device_id_in); - let queues_fid = hub.queues.prepare(queue_id_in); + let devices_fid = hub.devices.prepare::(device_id_in); + let queues_fid = hub.queues.prepare::(queue_id_in); let error = loop { let adapter = match hub.adapters.get(adapter_id) { diff --git a/wgpu-core/src/present.rs b/wgpu-core/src/present.rs index b768797a37..4ba25db38f 100644 --- a/wgpu-core/src/present.rs +++ b/wgpu-core/src/present.rs @@ -117,7 +117,7 @@ impl Global { let hub = A::hub(self); - let fid = hub.textures.prepare(texture_id_in); + let fid = hub.textures.prepare::(texture_id_in); let surface = self .surfaces diff --git a/wgpu-core/src/registry.rs b/wgpu-core/src/registry.rs index 627ef0def9..770738977c 100644 --- a/wgpu-core/src/registry.rs +++ b/wgpu-core/src/registry.rs @@ -5,11 +5,27 @@ use wgt::Backend; use crate::{ id, - identity::{IdentityHandler, IdentityHandlerFactory}, + identity::{IdentityHandlerFactory, IdentityManager}, resource::Resource, - storage::{InvalidId, Storage, StorageReport}, + storage::{Element, InvalidId, Storage}, }; +#[derive(Copy, Clone, Debug, Default, PartialEq, Eq)] +pub struct RegistryReport { + pub num_allocated: usize, + pub num_kept_from_user: usize, + pub num_released_from_user: usize, + pub num_error: usize, + pub element_size: usize, +} + +impl RegistryReport { + pub fn is_empty(&self) -> bool { + self.num_allocated + self.num_kept_from_user + self.num_released_from_user + self.num_error + == 0 + } +} + /// Registry is the primary holder of each resource type /// Every resource is now arcanized so the last arc released /// will in the end free the memory and release the inner raw resource @@ -22,14 +38,14 @@ use crate::{ /// any other dependent resource /// #[derive(Debug)] -pub struct Registry, F: IdentityHandlerFactory> { - identity: F::Filter, +pub struct Registry> { + identity: Option>>, storage: RwLock>, backend: Backend, } -impl, F: IdentityHandlerFactory> Registry { - pub(crate) fn new(backend: Backend, factory: &F) -> Self { +impl> Registry { + pub(crate) fn new>(backend: Backend, factory: &F) -> Self { Self { identity: factory.spawn(), storage: RwLock::new(Storage::new()), @@ -37,7 +53,7 @@ impl, F: IdentityHandlerFactory> Registry Self { + pub(crate) fn without_backend>(factory: &F) -> Self { Self::new(Backend::Empty, factory) } } @@ -45,6 +61,7 @@ impl, F: IdentityHandlerFactory> Registry> { id: I, + identity: Option>>, data: &'a RwLock>, } @@ -59,27 +76,41 @@ impl> FutureId<'_, I, T> { } pub fn assign(self, mut value: T) -> (I, Arc) { - value.as_info_mut().set_id(self.id); + value.as_info_mut().set_id(self.id, &self.identity); self.data.write().insert(self.id, Arc::new(value)); (self.id, self.data.read().get(self.id).unwrap().clone()) } + pub fn assign_existing(self, value: &Arc) -> I { + #[cfg(debug_assertions)] + debug_assert!(!self.data.read().contains(self.id)); + self.data.write().insert(self.id, value.clone()); + self.id + } + pub fn assign_error(self, label: &str) -> I { self.data.write().insert_error(self.id, label); self.id } } -impl, F: IdentityHandlerFactory> Registry { - pub(crate) fn prepare( - &self, - id_in: >::Input, - ) -> FutureId { +impl> Registry { + pub(crate) fn prepare(&self, id_in: F::Input) -> FutureId + where + F: IdentityHandlerFactory, + { FutureId { - id: self.identity.process(id_in, self.backend), + id: match self.identity.as_ref() { + Some(identity) => identity.process(self.backend), + _ => F::input_to_id(id_in), + }, + identity: self.identity.clone(), data: &self.storage, } } + pub(crate) fn contains(&self, id: I) -> bool { + self.read().contains(id) + } pub(crate) fn try_get(&self, id: I) -> Result>, InvalidId> { self.read().try_get(id).map(|o| o.cloned()) } @@ -93,16 +124,15 @@ impl, F: IdentityHandlerFactory> Regist self.storage.write() } pub fn unregister_locked(&self, id: I, storage: &mut Storage) -> Option> { - let value = storage.remove(id); - //Note: careful about the order here! - self.identity.free(id); - //Returning None is legal if it's an error ID - value + storage.remove(id) + } + pub fn force_replace(&self, id: I, mut value: T) { + let mut storage = self.storage.write(); + value.as_info_mut().set_id(id, &self.identity); + storage.force_replace(id, value) } pub(crate) fn unregister(&self, id: I) -> Option> { let value = self.storage.write().remove(id); - //Note: careful about the order here! - self.identity.free(id); //Returning None is legal if it's an error ID value } @@ -128,7 +158,22 @@ impl, F: IdentityHandlerFactory> Regist } } - pub(crate) fn generate_report(&self) -> StorageReport { - self.storage.read().generate_report() + pub(crate) fn generate_report(&self) -> RegistryReport { + let storage = self.storage.read(); + let mut report = RegistryReport { + element_size: std::mem::size_of::(), + ..Default::default() + }; + if let Some(identity) = self.identity.as_ref() { + report.num_allocated = identity.values.lock().count(); + } + for element in storage.map.iter() { + match *element { + Element::Occupied(..) => report.num_kept_from_user += 1, + Element::Vacant => report.num_released_from_user += 1, + Element::Error(..) => report.num_error += 1, + } + } + report } } diff --git a/wgpu-core/src/resource.rs b/wgpu-core/src/resource.rs index 4632473092..7b582e5cf7 100644 --- a/wgpu-core/src/resource.rs +++ b/wgpu-core/src/resource.rs @@ -1,18 +1,25 @@ +#[cfg(feature = "trace")] +use crate::device::trace; use crate::{ - device::{Device, DeviceError, HostMap, MissingDownlevelFlags, MissingFeatures}, + device::{ + queue, BufferMapPendingClosure, Device, DeviceError, HostMap, MissingDownlevelFlags, + MissingFeatures, + }, global::Global, hal_api::HalApi, id::{ AdapterId, BufferId, DeviceId, QuerySetId, SamplerId, StagingBufferId, SurfaceId, TextureId, TextureViewId, TypedId, }, - identity::GlobalIdentityHandlerFactory, + identity::{GlobalIdentityHandlerFactory, IdentityManager}, init_tracker::{BufferInitTracker, TextureInitTracker}, + resource, track::TextureSelector, validation::MissingBufferUsageError, Label, SubmissionIndex, }; +use hal::CommandEncoder; use parking_lot::{Mutex, RwLock}; use smallvec::SmallVec; use thiserror::Error; @@ -20,6 +27,7 @@ use thiserror::Error; use std::{ borrow::Borrow, fmt::Debug, + iter, mem, ops::Range, ptr::NonNull, sync::{ @@ -50,6 +58,7 @@ use std::{ #[derive(Debug)] pub struct ResourceInfo { id: Option, + identity: Option>>, /// The index of the last queue submission in which the resource /// was used. /// @@ -64,11 +73,22 @@ pub struct ResourceInfo { pub(crate) label: String, } +impl Drop for ResourceInfo { + fn drop(&mut self) { + if let Some(identity) = self.identity.as_ref() { + let id = self.id.as_ref().unwrap(); + identity.free(*id); + log::info!("Freeing {:?}", self.label()); + } + } +} + impl ResourceInfo { #[allow(unused_variables)] pub(crate) fn new(label: &str) -> Self { Self { id: None, + identity: None, submission_index: AtomicUsize::new(0), #[cfg(debug_assertions)] label: label.to_string(), @@ -95,8 +115,9 @@ impl ResourceInfo { self.id.unwrap() } - pub(crate) fn set_id(&mut self, id: Id) { + pub(crate) fn set_id(&mut self, id: Id, identity: &Option>>) { self.id = Some(id); + self.identity = identity.clone(); } /// Record that this resource will be used by the queue submission with the @@ -419,6 +440,157 @@ impl Buffer { pub(crate) fn raw(&self) -> &A::Buffer { self.raw.as_ref().unwrap() } + + pub(crate) fn buffer_unmap_inner( + self: &Arc, + ) -> Result, BufferAccessError> { + use hal::Device; + + let device = &self.device; + let buffer_id = self.info.id(); + log::debug!("Buffer {:?} map state -> Idle", buffer_id); + match mem::replace(&mut *self.map_state.lock(), resource::BufferMapState::Idle) { + resource::BufferMapState::Init { + ptr, + stage_buffer, + needs_flush, + } => { + #[cfg(feature = "trace")] + if let Some(ref mut trace) = *device.trace.lock() { + let data = trace.make_binary("bin", unsafe { + std::slice::from_raw_parts(ptr.as_ptr(), self.size as usize) + }); + trace.add(trace::Action::WriteBuffer { + id: buffer_id, + data, + range: 0..self.size, + queued: true, + }); + } + let _ = ptr; + if needs_flush { + unsafe { + device + .raw() + .flush_mapped_ranges(stage_buffer.raw(), iter::once(0..self.size)); + } + } + + let raw_buf = self.raw.as_ref().ok_or(BufferAccessError::Destroyed)?; + + self.info + .use_at(device.active_submission_index.load(Ordering::Relaxed) + 1); + let region = wgt::BufferSize::new(self.size).map(|size| hal::BufferCopy { + src_offset: 0, + dst_offset: 0, + size, + }); + let transition_src = hal::BufferBarrier { + buffer: stage_buffer.raw(), + usage: hal::BufferUses::MAP_WRITE..hal::BufferUses::COPY_SRC, + }; + let transition_dst = hal::BufferBarrier { + buffer: raw_buf, + usage: hal::BufferUses::empty()..hal::BufferUses::COPY_DST, + }; + let mut pending_writes = device.pending_writes.lock(); + let pending_writes = pending_writes.as_mut().unwrap(); + let encoder = pending_writes.activate(); + unsafe { + encoder.transition_buffers( + iter::once(transition_src).chain(iter::once(transition_dst)), + ); + if self.size > 0 { + encoder.copy_buffer_to_buffer( + stage_buffer.raw(), + raw_buf, + region.into_iter(), + ); + } + } + pending_writes.consume_temp(queue::TempResource::Buffer(stage_buffer)); + pending_writes.dst_buffers.insert(buffer_id, self.clone()); + } + resource::BufferMapState::Idle => { + return Err(BufferAccessError::NotMapped); + } + resource::BufferMapState::Waiting(pending) => { + return Ok(Some((pending.op, Err(BufferAccessError::MapAborted)))); + } + resource::BufferMapState::Active { ptr, range, host } => { + if host == HostMap::Write { + #[cfg(feature = "trace")] + if let Some(ref mut trace) = *device.trace.lock() { + let size = range.end - range.start; + let data = trace.make_binary("bin", unsafe { + std::slice::from_raw_parts(ptr.as_ptr(), size as usize) + }); + trace.add(trace::Action::WriteBuffer { + id: buffer_id, + data, + range: range.clone(), + queued: false, + }); + } + let _ = (ptr, range); + } + unsafe { + device + .raw() + .unmap_buffer(self.raw()) + .map_err(DeviceError::from)? + }; + } + } + Ok(None) + } + + pub(crate) fn destroy(self: &Arc) -> Result<(), DestroyError> { + let map_closure; + // Restrict the locks to this scope. + { + let device = &self.device; + let buffer_id = self.info.id(); + + map_closure = match &*self.map_state.lock() { + &BufferMapState::Waiting(..) // To get the proper callback behavior. + | &BufferMapState::Init { .. } + | &BufferMapState::Active { .. } + => { + self.buffer_unmap_inner() + .unwrap_or(None) + } + _ => None, + }; + + #[cfg(feature = "trace")] + if let Some(ref mut trace) = *device.trace.lock() { + trace.add(trace::Action::FreeBuffer(buffer_id)); + } + if self.raw.is_none() { + return Err(resource::DestroyError::AlreadyDestroyed); + } + + let temp = queue::TempResource::Buffer(self.clone()); + let mut pending_writes = device.pending_writes.lock(); + let pending_writes = pending_writes.as_mut().unwrap(); + if pending_writes.dst_buffers.contains_key(&buffer_id) { + pending_writes.temp_resources.push(temp); + } else { + let last_submit_index = self.info.submission_index(); + device + .lock_life() + .schedule_resource_destruction(temp, last_submit_index); + } + } + + // Note: outside the scope where locks are held when calling the callback + if let Some((operation, status)) = map_closure { + operation.callback.call(status); + } + + Ok(()) + } } #[derive(Clone, Debug, Error)] @@ -855,9 +1027,6 @@ pub struct TextureView { pub(crate) raw: Option, // if it's a surface texture - it's none pub(crate) parent: Option>>, - // The parent's refcount is held alive, but the parent may still be deleted - // if it's a surface texture. TODO: make this cleaner. - pub(crate) parent_id: TextureId, pub(crate) device: Arc>, //TODO: store device_id for quick access? pub(crate) desc: HalTextureViewDescriptor, diff --git a/wgpu-core/src/storage.rs b/wgpu-core/src/storage.rs index dbf85482df..de78905a27 100644 --- a/wgpu-core/src/storage.rs +++ b/wgpu-core/src/storage.rs @@ -1,4 +1,4 @@ -use std::{marker::PhantomData, mem, ops, sync::Arc}; +use std::{marker::PhantomData, ops, sync::Arc}; use wgt::Backend; @@ -21,20 +21,6 @@ pub(crate) enum Element { Error(Epoch, String), } -#[derive(Copy, Clone, Debug, Default, PartialEq, Eq)] -pub struct StorageReport { - pub num_occupied: usize, - pub num_vacant: usize, - pub num_error: usize, - pub element_size: usize, -} - -impl StorageReport { - pub fn is_empty(&self) -> bool { - self.num_occupied + self.num_vacant + self.num_error == 0 - } -} - #[derive(Clone, Debug)] pub(crate) struct InvalidId; @@ -83,6 +69,7 @@ where T: Resource, I: id::TypedId, { + #[allow(dead_code)] pub(crate) fn contains(&self, id: I) -> bool { let (index, epoch, _) = id.unzip(); match self.map.get(index as usize) { @@ -117,24 +104,6 @@ where result } - /// Get refcount of an item with specified ID - /// And return true if it's 1 or false otherwise - pub(crate) fn is_unique(&self, id: I) -> Result { - let (index, epoch, _) = id.unzip(); - let (result, storage_epoch) = match self.map.get(index as usize) { - Some(&Element::Occupied(ref v, epoch)) => (Ok(v.is_unique()), epoch), - Some(&Element::Vacant) => panic!("{}[{:?}] does not exist", self.kind, id), - Some(&Element::Error(epoch, ..)) => (Err(InvalidId), epoch), - None => return Err(InvalidId), - }; - assert_eq!( - epoch, storage_epoch, - "{}[{:?}] is no longer alive", - self.kind, id - ); - result - } - /// Get a reference to an item behind a potentially invalid ID. /// Panics if there is an epoch mismatch, or the entry is empty. pub(crate) fn get(&self, id: I) -> Result<&Arc, InvalidId> { @@ -153,14 +122,6 @@ where result } - pub(crate) unsafe fn get_unchecked(&self, id: u32) -> &Arc { - match self.map[id as usize] { - Element::Occupied(ref v, _) => v, - Element::Vacant => panic!("{}[{}] does not exist", self.kind, id), - Element::Error(_, _) => panic!(""), - } - } - pub(crate) fn label_for_invalid_id(&self, id: I) -> &str { let (index, _, _) = id.unzip(); match self.map.get(index as usize) { @@ -180,22 +141,25 @@ where } pub(crate) fn insert(&mut self, id: I, value: Arc) { + log::info!("User is inserting {}{:?}", T::TYPE, id); let (index, epoch, _backend) = id.unzip(); self.insert_impl(index as usize, Element::Occupied(value, epoch)) } pub(crate) fn insert_error(&mut self, id: I, label: &str) { + log::info!("User is insering as error {}{:?}", T::TYPE, id); let (index, epoch, _) = id.unzip(); self.insert_impl(index as usize, Element::Error(epoch, label.to_string())) } - pub(crate) fn force_replace(&mut self, id: I, mut value: T) { + pub(crate) fn force_replace(&mut self, id: I, value: T) { + log::info!("User is replacing {}{:?}", T::TYPE, id); let (index, epoch, _) = id.unzip(); - value.as_info_mut().set_id(id); self.map[index as usize] = Element::Occupied(Arc::new(value), epoch); } pub(crate) fn remove(&mut self, id: I) -> Option> { + log::info!("User is removing {}{:?}", T::TYPE, id); let (index, epoch, _) = id.unzip(); match std::mem::replace(&mut self.map[index as usize], Element::Vacant) { Element::Occupied(value, storage_epoch) => { @@ -207,21 +171,6 @@ where } } - // Prevents panic on out of range access, allows Vacant elements. - pub(crate) fn _try_remove(&mut self, id: I) -> Option> { - let (index, epoch, _) = id.unzip(); - if index as usize >= self.map.len() { - None - } else if let Element::Occupied(value, storage_epoch) = - std::mem::replace(&mut self.map[index as usize], Element::Vacant) - { - assert_eq!(epoch, storage_epoch); - Some(value) - } else { - None - } - } - pub(crate) fn iter(&self, backend: Backend) -> impl Iterator)> { self.map .iter() @@ -241,19 +190,4 @@ where pub(crate) fn len(&self) -> usize { self.map.len() } - - pub(crate) fn generate_report(&self) -> StorageReport { - let mut report = StorageReport { - element_size: mem::size_of::(), - ..Default::default() - }; - for element in self.map.iter() { - match *element { - Element::Occupied(..) => report.num_occupied += 1, - Element::Vacant => report.num_vacant += 1, - Element::Error(..) => report.num_error += 1, - } - } - report - } } diff --git a/wgpu-core/src/track/buffer.rs b/wgpu-core/src/track/buffer.rs index 32e5dce28b..973684419b 100644 --- a/wgpu-core/src/track/buffer.rs +++ b/wgpu-core/src/track/buffer.rs @@ -5,20 +5,20 @@ * one subresource, they have no selector. !*/ -use std::{borrow::Cow, marker::PhantomData, sync::Arc, vec::Drain}; +use std::{borrow::Cow, marker::PhantomData, sync::Arc}; use super::PendingTransition; use crate::{ hal_api::HalApi, id::{BufferId, TypedId}, - resource::Buffer, + resource::{Buffer, Resource}, storage::Storage, track::{ invalid_resource_state, skip_barrier, ResourceMetadata, ResourceMetadataProvider, ResourceUses, UsageConflict, }, }; -use hal::BufferUses; +use hal::{BufferBarrier, BufferUses}; use wgt::{strict_assert, strict_assert_eq}; impl ResourceUses for BufferUses { @@ -43,7 +43,7 @@ impl ResourceUses for BufferUses { /// Stores all the buffers that a bind group stores. #[derive(Debug)] pub(crate) struct BufferBindGroupState { - buffers: Vec<(BufferId, Arc>, BufferUses)>, + buffers: Vec<(Arc>, BufferUses)>, _phantom: PhantomData, } @@ -60,19 +60,22 @@ impl BufferBindGroupState { /// /// When this list of states is merged into a tracker, the memory /// accesses will be in a constant assending order. + #[allow(clippy::pattern_type_mismatch)] pub(crate) fn optimize(&mut self) { self.buffers - .sort_unstable_by_key(|&(id, _, _)| id.unzip().0); + .sort_unstable_by_key(|(b, _)| b.as_info().id().unzip().0); } /// Returns a list of all buffers tracked. May contain duplicates. + #[allow(clippy::pattern_type_mismatch)] pub fn used_ids(&self) -> impl Iterator + '_ { - self.buffers.iter().map(|&(id, _, _)| id) + self.buffers.iter().map(|(ref b, _)| b.as_info().id()) } /// Returns a list of all buffers tracked. May contain duplicates. + #[allow(clippy::pattern_type_mismatch)] pub fn used_resources(&self) -> impl Iterator>> + '_ { - self.buffers.iter().map(|&(_id, ref buffer, _u)| buffer) + self.buffers.iter().map(|(ref buffer, _u)| buffer) } /// Adds the given resource with the given state. @@ -81,10 +84,10 @@ impl BufferBindGroupState { storage: &'a Storage, BufferId>, id: BufferId, state: BufferUses, - ) -> Option<&'a Buffer> { + ) -> Option<&'a Arc>> { let buffer = storage.get(id).ok()?; - self.buffers.push((id, buffer.clone(), state)); + self.buffers.push((buffer.clone(), state)); Some(buffer) } @@ -133,6 +136,20 @@ impl BufferUsageScope { self.metadata.owned_resources() } + pub fn get(&self, id: BufferId) -> Option<&Arc>> { + let index = id.unzip().0 as usize; + if index > self.metadata.size() { + return None; + } + self.tracker_assert_in_bounds(index); + unsafe { + if self.metadata.contains_unchecked(index) { + return Some(self.metadata.get_resource_unchecked(index)); + } + } + None + } + /// Merge the list of buffer states in the given bind group into this usage scope. /// /// If any of the resulting states is invalid, stops the merge and returns a usage @@ -149,8 +166,8 @@ impl BufferUsageScope { &mut self, bind_group: &BufferBindGroupState, ) -> Result<(), UsageConflict> { - for &(id, ref resource, state) in &bind_group.buffers { - let index = id.unzip().0 as usize; + for &(ref resource, state) in &bind_group.buffers { + let index = resource.as_info().id().unzip().0 as usize; unsafe { insert_or_merge( @@ -219,7 +236,7 @@ impl BufferUsageScope { storage: &'a Storage, BufferId>, id: BufferId, new_state: BufferUses, - ) -> Result<&'a Buffer, UsageConflict> { + ) -> Result<&'a Arc>, UsageConflict> { let buffer = storage .get(id) .map_err(|_| UsageConflict::BufferInvalid { id })?; @@ -302,8 +319,12 @@ impl BufferTracker { } /// Drains all currently pending transitions. - pub fn drain(&mut self) -> Drain<'_, PendingTransition> { - self.temp.drain(..) + pub fn drain_transitions(&mut self) -> impl Iterator> { + let buffer_barriers = self.temp.drain(..).map(|pending| { + let buf = unsafe { self.metadata.get_resource_unchecked(pending.id as _) }; + pending.into_hal(buf) + }); + buffer_barriers } /// Inserts a single buffer and its state into the resource tracker. @@ -347,15 +368,8 @@ impl BufferTracker { /// /// If the ID is higher than the length of internal vectors, /// the vectors will be extended. A call to set_size is not needed. - pub fn set_single<'a>( - &mut self, - storage: &'a Storage, BufferId>, - id: BufferId, - state: BufferUses, - ) -> SetSingleResult { - let buffer = storage.get(id).ok()?; - - let index = id.unzip().0 as usize; + pub fn set_single(&mut self, buffer: &Arc>, state: BufferUses) -> SetSingleResult { + let index: usize = buffer.as_info().id().unzip().0 as usize; self.allow_index(index); @@ -385,7 +399,7 @@ impl BufferTracker { /// /// If a transition is needed to get the buffers into the needed state, /// those transitions are stored within the tracker. A subsequent - /// call to [`Self::drain`] is needed to get those transitions. + /// call to [`Self::drain_transitions`] is needed to get those transitions. /// /// If the ID is higher than the length of internal vectors, /// the vectors will be extended. A call to set_size is not needed. @@ -423,7 +437,7 @@ impl BufferTracker { /// /// If a transition is needed to get the buffers into the needed state, /// those transitions are stored within the tracker. A subsequent - /// call to [`Self::drain`] is needed to get those transitions. + /// call to [`Self::drain_transitions`] is needed to get those transitions. /// /// If the ID is higher than the length of internal vectors, /// the vectors will be extended. A call to set_size is not needed. @@ -461,7 +475,7 @@ impl BufferTracker { /// /// If a transition is needed to get the buffers into the needed state, /// those transitions are stored within the tracker. A subsequent - /// call to [`Self::drain`] is needed to get those transitions. + /// call to [`Self::drain_transitions`] is needed to get those transitions. /// /// This is a really funky method used by Compute Passes to generate /// barriers after a call to dispatch without needing to iterate @@ -513,6 +527,21 @@ impl BufferTracker { } } + #[allow(dead_code)] + pub fn get(&self, id: BufferId) -> Option<&Arc>> { + let index = id.unzip().0 as usize; + if index > self.metadata.size() { + return None; + } + self.tracker_assert_in_bounds(index); + unsafe { + if self.metadata.contains_unchecked(index) { + return Some(self.metadata.get_resource_unchecked(index)); + } + } + None + } + /// Removes the buffer `id` from this tracker if it is otherwise unused. /// /// A buffer is 'otherwise unused' when the only references to it are: @@ -534,7 +563,7 @@ impl BufferTracker { /// [`Device::trackers`]: crate::device::Device /// [`self.metadata`]: BufferTracker::metadata /// [`Hub::buffers`]: crate::hub::Hub::buffers - pub fn remove_abandoned(&mut self, id: BufferId) -> bool { + pub fn remove_abandoned(&mut self, id: BufferId, is_in_registry: bool) -> bool { let index = id.unzip().0 as usize; if index > self.metadata.size() { @@ -546,7 +575,10 @@ impl BufferTracker { unsafe { if self.metadata.contains_unchecked(index) { let existing_ref_count = self.metadata.get_ref_count_unchecked(index); - if existing_ref_count <= 3 { + //2 ref count if only in Device Tracker and suspected resource itself and already released from user + //so not appearing in Registry + let min_ref_count = if is_in_registry { 3 } else { 2 }; + if existing_ref_count <= min_ref_count { self.metadata.remove(index); return true; } else { diff --git a/wgpu-core/src/track/mod.rs b/wgpu-core/src/track/mod.rs index 71936dbb86..f51d736a1f 100644 --- a/wgpu-core/src/track/mod.rs +++ b/wgpu-core/src/track/mod.rs @@ -401,14 +401,10 @@ impl RenderBundleScope { /// length of the storage given at the call to `new`. pub unsafe fn merge_bind_group( &mut self, - textures: &Storage, id::TextureId>, bind_group: &BindGroupStates, ) -> Result<(), UsageConflict> { unsafe { self.buffers.merge_bind_group(&bind_group.buffers)? }; - unsafe { - self.textures - .merge_bind_group(textures, &bind_group.textures)? - }; + unsafe { self.textures.merge_bind_group(&bind_group.textures)? }; Ok(()) } @@ -450,13 +446,11 @@ impl UsageScope { /// length of the storage given at the call to `new`. pub unsafe fn merge_bind_group( &mut self, - textures: &Storage, id::TextureId>, bind_group: &BindGroupStates, ) -> Result<(), UsageConflict> { unsafe { self.buffers.merge_bind_group(&bind_group.buffers)?; - self.textures - .merge_bind_group(textures, &bind_group.textures)?; + self.textures.merge_bind_group(&bind_group.textures)?; } Ok(()) @@ -473,12 +467,10 @@ impl UsageScope { /// length of the storage given at the call to `new`. pub unsafe fn merge_render_bundle( &mut self, - textures: &Storage, id::TextureId>, render_bundle: &RenderBundleScope, ) -> Result<(), UsageConflict> { self.buffers.merge_usage_scope(&render_bundle.buffers)?; - self.textures - .merge_usage_scope(textures, &render_bundle.textures)?; + self.textures.merge_usage_scope(&render_bundle.textures)?; Ok(()) } @@ -578,7 +570,6 @@ impl Tracker { /// value given to `set_size` pub unsafe fn set_and_remove_from_usage_scope_sparse( &mut self, - textures: &Storage, id::TextureId>, scope: &mut UsageScope, bind_group: &BindGroupStates, ) { @@ -589,11 +580,8 @@ impl Tracker { ) }; unsafe { - self.textures.set_and_remove_from_usage_scope_sparse( - textures, - &mut scope.textures, - &bind_group.textures, - ) + self.textures + .set_and_remove_from_usage_scope_sparse(&mut scope.textures, &bind_group.textures) }; } diff --git a/wgpu-core/src/track/stateless.rs b/wgpu-core/src/track/stateless.rs index 08d05c4703..f565932f8b 100644 --- a/wgpu-core/src/track/stateless.rs +++ b/wgpu-core/src/track/stateless.rs @@ -32,15 +32,8 @@ impl> StatelessBindGroupSate { } /// Returns a list of all resources tracked. May contain duplicates. - pub fn used(&self) -> impl Iterator + '_ { - self.resources.iter().map(|&(id, _)| id) - } - - /// Returns a list of all resources tracked. May contain duplicates. - pub fn used_resources(&self) -> impl Iterator> + '_ { - self.resources - .iter() - .map(|&(_, ref resource)| resource.clone()) + pub fn used_resources(&self) -> impl Iterator> + '_ { + self.resources.iter().map(|&(_, ref resource)| resource) } /// Adds the given resource. @@ -158,6 +151,20 @@ impl> StatelessTracker { } } + pub fn get(&self, id: Id) -> Option<&Arc> { + let index = id.unzip().0 as usize; + if index > self.metadata.size() { + return None; + } + self.tracker_assert_in_bounds(index); + unsafe { + if self.metadata.contains_unchecked(index) { + return Some(self.metadata.get_resource_unchecked(index)); + } + } + None + } + /// Removes the given resource from the tracker iff we have the last reference to the /// resource and the epoch matches. /// @@ -165,7 +172,7 @@ impl> StatelessTracker { /// /// If the ID is higher than the length of internal vectors, /// false will be returned. - pub fn remove_abandoned(&mut self, id: Id) -> bool { + pub fn remove_abandoned(&mut self, id: Id, is_in_registry: bool) -> bool { let index = id.unzip().0 as usize; if index > self.metadata.size() { @@ -177,8 +184,10 @@ impl> StatelessTracker { unsafe { if self.metadata.contains_unchecked(index) { let existing_ref_count = self.metadata.get_ref_count_unchecked(index); - //3 ref count: Registry, Device Tracker and suspected resource itself - if existing_ref_count <= 3 { + //2 ref count if only in Device Tracker and suspected resource itself and already released from user + //so not appearing in Registry + let min_ref_count = if is_in_registry { 3 } else { 2 }; + if existing_ref_count <= min_ref_count { self.metadata.remove(index); return true; } else { diff --git a/wgpu-core/src/track/texture.rs b/wgpu-core/src/track/texture.rs index 6666f74872..a90888a86d 100644 --- a/wgpu-core/src/track/texture.rs +++ b/wgpu-core/src/track/texture.rs @@ -23,14 +23,13 @@ use super::{range::RangedStates, PendingTransition}; use crate::{ hal_api::HalApi, id::{TextureId, TypedId}, - resource::Texture, - storage::Storage, + resource::{Resource, Texture}, track::{ invalid_resource_state, skip_barrier, ResourceMetadata, ResourceMetadataProvider, ResourceUses, UsageConflict, }, }; -use hal::TextureUses; +use hal::{TextureBarrier, TextureUses}; use arrayvec::ArrayVec; use naga::FastHashMap; @@ -150,7 +149,6 @@ impl ComplexTextureState { #[derive(Debug)] struct TextureBindGroupStateData { - id: TextureId, selector: Option, texture: Arc>, usage: TextureUses, @@ -173,7 +171,8 @@ impl TextureBindGroupState { /// When this list of states is merged into a tracker, the memory /// accesses will be in a constant assending order. pub(crate) fn optimize(&mut self) { - self.textures.sort_unstable_by_key(|v| v.id.unzip().0); + self.textures + .sort_unstable_by_key(|v| v.texture.as_info().id().unzip().0); } /// Returns a list of all textures tracked. May contain duplicates. @@ -184,21 +183,16 @@ impl TextureBindGroupState { /// Adds the given resource with the given state. pub fn add_single<'a>( &mut self, - storage: &'a Storage, TextureId>, - id: TextureId, + texture: &'a Arc>, selector: Option, state: TextureUses, - ) -> Option<&'a Texture> { - let resource = storage.get(id).ok()?; - + ) -> Option<&'a Arc>> { self.textures.push(TextureBindGroupStateData { - id, selector, - texture: resource.clone(), + texture: texture.clone(), usage: state, }); - - Some(resource) + Some(texture) } } @@ -280,11 +274,7 @@ impl TextureUsageScope { /// /// If the given tracker uses IDs higher than the length of internal vectors, /// the vectors will be extended. A call to set_size is not needed. - pub fn merge_usage_scope( - &mut self, - storage: &Storage, TextureId>, - scope: &Self, - ) -> Result<(), UsageConflict> { + pub fn merge_usage_scope(&mut self, scope: &Self) -> Result<(), UsageConflict> { let incoming_size = scope.set.simple.len(); if incoming_size > self.set.simple.len() { self.set_size(incoming_size); @@ -294,7 +284,8 @@ impl TextureUsageScope { self.tracker_assert_in_bounds(index); scope.tracker_assert_in_bounds(index); - let texture_selector = unsafe { texture_selector_from_texture(storage, index) }; + let texture_selector = + unsafe { &scope.metadata.get_resource_unchecked(index).full_range }; unsafe { insert_or_merge( texture_selector, @@ -326,11 +317,10 @@ impl TextureUsageScope { /// method is called. pub unsafe fn merge_bind_group( &mut self, - storage: &Storage, TextureId>, bind_group: &TextureBindGroupState, ) -> Result<(), UsageConflict> { for t in &bind_group.textures { - unsafe { self.merge_single(storage, t.id, t.selector.clone(), t.usage)? }; + unsafe { self.merge_single(&t.texture, t.selector.clone(), t.usage)? }; } Ok(()) @@ -351,17 +341,15 @@ impl TextureUsageScope { /// method is called. pub unsafe fn merge_single( &mut self, - storage: &Storage, TextureId>, - id: TextureId, + texture: &Arc>, selector: Option, new_state: TextureUses, ) -> Result<(), UsageConflict> { - let index = id.unzip().0 as usize; - let resource = storage.get(id).unwrap(); + let index = texture.as_info().id().unzip().0 as usize; self.tracker_assert_in_bounds(index); - let texture_selector = unsafe { texture_selector_from_texture(storage, index) }; + let texture_selector = &texture.full_range; unsafe { insert_or_merge( texture_selector, @@ -370,7 +358,7 @@ impl TextureUsageScope { index, TextureStateProvider::from_option(selector, new_state), ResourceMetadataProvider::Direct { - resource: Cow::Borrowed(resource), + resource: Cow::Borrowed(texture), }, )? }; @@ -450,8 +438,12 @@ impl TextureTracker { } /// Drains all currently pending transitions. - pub fn drain(&mut self) -> Drain> { - self.temp.drain(..) + pub fn drain_transitions(&mut self) -> impl Iterator> { + let texture_barriers = self.temp.drain(..).map(|pending| { + let tex = unsafe { self.metadata.get_resource_unchecked(pending.id as _) }; + pending.into_hal(tex) + }); + texture_barriers } /// Inserts a single texture and a state into the resource tracker. @@ -499,11 +491,10 @@ impl TextureTracker { pub fn set_single( &mut self, texture: &Arc>, - id: TextureId, selector: TextureSelector, new_state: TextureUses, ) -> Option>> { - let index = id.unzip().0 as usize; + let index = texture.as_info().id().unzip().0 as usize; self.allow_index(index); @@ -535,11 +526,11 @@ impl TextureTracker { /// /// If a transition is needed to get the texture into the needed state, /// those transitions are stored within the tracker. A subsequent - /// call to [`Self::drain`] is needed to get those transitions. + /// call to [`Self::drain_transitions`] is needed to get those transitions. /// /// If the ID is higher than the length of internal vectors, /// the vectors will be extended. A call to set_size is not needed. - pub fn set_from_tracker(&mut self, storage: &Storage, TextureId>, tracker: &Self) { + pub fn set_from_tracker(&mut self, tracker: &Self) { let incoming_size = tracker.start_set.simple.len(); if incoming_size > self.start_set.simple.len() { self.set_size(incoming_size); @@ -549,7 +540,7 @@ impl TextureTracker { self.tracker_assert_in_bounds(index); tracker.tracker_assert_in_bounds(index); unsafe { - let texture_selector = texture_selector_from_texture(storage, index); + let texture_selector = &tracker.metadata.get_resource_unchecked(index).full_range; insert_or_barrier_update( texture_selector, Some(&mut self.start_set), @@ -575,15 +566,11 @@ impl TextureTracker { /// /// If a transition is needed to get the textures into the needed state, /// those transitions are stored within the tracker. A subsequent - /// call to [`Self::drain`] is needed to get those transitions. + /// call to [`Self::drain_transitions`] is needed to get those transitions. /// /// If the ID is higher than the length of internal vectors, /// the vectors will be extended. A call to set_size is not needed. - pub fn set_from_usage_scope( - &mut self, - storage: &Storage, TextureId>, - scope: &TextureUsageScope, - ) { + pub fn set_from_usage_scope(&mut self, scope: &TextureUsageScope) { let incoming_size = scope.set.simple.len(); if incoming_size > self.start_set.simple.len() { self.set_size(incoming_size); @@ -593,7 +580,7 @@ impl TextureTracker { self.tracker_assert_in_bounds(index); scope.tracker_assert_in_bounds(index); unsafe { - let texture_selector = texture_selector_from_texture(storage, index); + let texture_selector = &scope.metadata.get_resource_unchecked(index).full_range; insert_or_barrier_update( texture_selector, Some(&mut self.start_set), @@ -617,7 +604,7 @@ impl TextureTracker { /// /// If a transition is needed to get the textures into the needed state, /// those transitions are stored within the tracker. A subsequent - /// call to [`Self::drain`] is needed to get those transitions. + /// call to [`Self::drain_transitions`] is needed to get those transitions. /// /// This is a really funky method used by Compute Passes to generate /// barriers after a call to dispatch without needing to iterate @@ -631,7 +618,6 @@ impl TextureTracker { /// method is called. pub unsafe fn set_and_remove_from_usage_scope_sparse( &mut self, - storage: &Storage, TextureId>, scope: &mut TextureUsageScope, bind_group_state: &TextureBindGroupState, ) { @@ -641,13 +627,13 @@ impl TextureTracker { } for t in bind_group_state.textures.iter() { - let index = t.id.unzip().0 as usize; + let index = t.texture.as_info().id().unzip().0 as usize; scope.tracker_assert_in_bounds(index); if unsafe { !scope.metadata.contains_unchecked(index) } { continue; } - let texture_selector = unsafe { texture_selector_from_texture(storage, index) }; + let texture_selector = &t.texture.full_range; unsafe { insert_or_barrier_update( texture_selector, @@ -702,7 +688,7 @@ impl TextureTracker { /// /// If the ID is higher than the length of internal vectors, /// false will be returned. - pub fn remove_abandoned(&mut self, id: TextureId) -> bool { + pub fn remove_abandoned(&mut self, id: TextureId, is_in_registry: bool) -> bool { let index = id.unzip().0 as usize; if index > self.metadata.size() { @@ -714,8 +700,10 @@ impl TextureTracker { unsafe { if self.metadata.contains_unchecked(index) { let existing_ref_count = self.metadata.get_ref_count_unchecked(index); - //3 ref count: Registry, Device Tracker and suspected resource itself - if existing_ref_count <= 3 { + //2 ref count if only in Device Tracker and suspected resource itself and already released from user + //so not appearing in Registry + let min_ref_count = if is_in_registry { 3 } else { 2 }; + if existing_ref_count <= min_ref_count { self.start_set.complex.remove(&index); self.end_set.complex.remove(&index); self.metadata.remove(index); @@ -835,17 +823,6 @@ impl<'a> TextureStateProvider<'a> { } } -/// Helper function that gets what is needed from the texture storage -/// out of the texture storage. -#[inline(always)] -unsafe fn texture_selector_from_texture( - storage: &Storage, TextureId>, - index: usize, -) -> &TextureSelector { - let texture = unsafe { storage.get_unchecked(index as _) }; - &texture.full_range -} - /// Does an insertion operation if the index isn't tracked /// in the current metadata, otherwise merges the given state /// with the current state. If the merging would cause From 490b2b7ca02c805a3941abeae1c742f30c3de954 Mon Sep 17 00:00:00 2001 From: gents83 Date: Sat, 9 Sep 2023 00:36:48 +0200 Subject: [PATCH 093/132] Fixing integration issues and merge conflicts --- tests/src/lib.rs | 3 +-- wgpu-core/src/device/global.rs | 6 ++++-- wgpu-core/src/present.rs | 8 ++++---- wgpu-core/src/resource.rs | 13 ++++++------- wgpu-hal/examples/halmark/main.rs | 2 +- wgpu-hal/src/vulkan/instance.rs | 3 +-- 6 files changed, 17 insertions(+), 18 deletions(-) diff --git a/tests/src/lib.rs b/tests/src/lib.rs index 030224fabd..06352e4e97 100644 --- a/tests/src/lib.rs +++ b/tests/src/lib.rs @@ -405,7 +405,6 @@ pub fn initialize_test(parameters: TestParameters, test_function: impl FnOnce(Te return; } - // Determine if we expect this test to fail, and if so, why. let expected_failure_reason = parameters .failures @@ -482,7 +481,7 @@ pub fn initialize_test(parameters: TestParameters, test_function: impl FnOnce(Te } } -fn initialize_adapter() -> (Adapter, Option) { +fn initialize_adapter() -> (Instance, Adapter, Option) { let instance = initialize_instance(); let surface_guard: Option; let compatible_surface; diff --git a/wgpu-core/src/device/global.rs b/wgpu-core/src/device/global.rs index 260177b833..bc84865d40 100644 --- a/wgpu-core/src/device/global.rs +++ b/wgpu-core/src/device/global.rs @@ -1936,12 +1936,14 @@ impl Global { } // Wait for all work to finish before configuring the surface. - if let Err(e) = device.maintain(hub, wgt::Maintain::Wait, &mut token) { + let fence = device.fence.read(); + let fence = fence.as_ref().unwrap(); + if let Err(e) = device.maintain(hub, fence, wgt::Maintain::Wait) { break e.into(); } // All textures must be destroyed before the surface can be re-configured. - if let Some(present) = surface.presentation.take() { + if let Some(present) = surface.presentation.lock().take() { if present.acquired_texture.is_some() { break E::PreviousOutputExists; } diff --git a/wgpu-core/src/present.rs b/wgpu-core/src/present.rs index ed7e20a7ea..4a88b05753 100644 --- a/wgpu-core/src/present.rs +++ b/wgpu-core/src/present.rs @@ -311,7 +311,7 @@ impl Global { let texture = hub.textures.unregister(texture_id); if let Some(texture) = texture { if let Ok(mut texture) = Arc::try_unwrap(texture) { - texture.clear_mode.destroy_clear_views(device.raw()); + texture.clear_mode.write().destroy_clear_views(device.raw()); let suf = A::get_surface(&surface); match texture.inner.take().unwrap() { @@ -396,15 +396,15 @@ impl Global { // and now we are moving it away. log::debug!( "Removing swapchain texture {:?} from the device tracker", - texture_id.value + texture_id ); device.trackers.lock().textures.remove(texture_id); let texture = hub.textures.unregister(texture_id); if let Some(texture) = texture { if let Ok(mut texture) = Arc::try_unwrap(texture) { - texture.clear_mode.destroy_clear_views(device.raw()); - + texture.clear_mode.write().destroy_clear_views(device.raw()); + let suf = A::get_surface(&surface); match texture.inner.take().unwrap() { resource::TextureInner::Surface { diff --git a/wgpu-core/src/resource.rs b/wgpu-core/src/resource.rs index 01872cd928..82dd9600b5 100644 --- a/wgpu-core/src/resource.rs +++ b/wgpu-core/src/resource.rs @@ -720,13 +720,12 @@ pub enum TextureClearMode { None, } -impl TextureClearMode { - pub(crate) fn destroy_clear_views(self, device: &A::Device) { - if let TextureClearMode::RenderPass { clear_views, .. } = self { - for clear_view in clear_views { - unsafe { - hal::Device::destroy_texture_view(device, clear_view); - } +impl TextureClearMode { + pub(crate) fn destroy_clear_views(&mut self, device: &A::Device) { + if let TextureClearMode::Surface { ref mut clear_view } = *self { + unsafe { + let view = clear_view.take().unwrap(); + hal::Device::destroy_texture_view(device, view); } } } diff --git a/wgpu-hal/examples/halmark/main.rs b/wgpu-hal/examples/halmark/main.rs index 20e646863d..e716ab0d93 100644 --- a/wgpu-hal/examples/halmark/main.rs +++ b/wgpu-hal/examples/halmark/main.rs @@ -113,7 +113,7 @@ impl Example { let exposed = adapters.swap_remove(0); (exposed.adapter, exposed.capabilities) }; - + let surface_caps = unsafe { adapter.surface_capabilities(&surface) } .ok_or("failed to get surface capabilities")?; log::info!("Surface caps: {:#?}", surface_caps); diff --git a/wgpu-hal/src/vulkan/instance.rs b/wgpu-hal/src/vulkan/instance.rs index 97ec8fd624..77d696f268 100644 --- a/wgpu-hal/src/vulkan/instance.rs +++ b/wgpu-hal/src/vulkan/instance.rs @@ -570,7 +570,7 @@ impl crate::Instance for super::Instance { let entry = unsafe { ash::Entry::load() }.map_err(|err| { crate::InstanceError::with_source(String::from("missing Vulkan entry points"), err) })?; - + let driver_api_version = match entry.try_enumerate_instance_version() { // Vulkan 1.1+ Ok(Some(version)) => version, @@ -830,7 +830,6 @@ impl crate::Surface for super::Surface { device: &super::Device, config: &crate::SurfaceConfiguration, ) -> Result<(), crate::SurfaceError> { - // Safety: `configure`'s contract guarantees there are no resources derived from the swapchain in use. let mut swap_chain = self.swapchain.write(); let old = swap_chain From 451dfba55890769e48d4b25abdb66af8ce388b39 Mon Sep 17 00:00:00 2001 From: gents83 Date: Sat, 9 Sep 2023 00:46:33 +0200 Subject: [PATCH 094/132] Fixing clippy wasm32 --- wgpu-core/src/device/queue.rs | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/wgpu-core/src/device/queue.rs b/wgpu-core/src/device/queue.rs index 46908b9f27..3a0fe9e50d 100644 --- a/wgpu-core/src/device/queue.rs +++ b/wgpu-core/src/device/queue.rs @@ -944,7 +944,6 @@ impl Global { let src_width = source.source.width(); let src_height = source.source.height(); - let texture_guard = hub.textures.read(); let dst = hub.textures.get(destination.texture).unwrap(); if !conv::is_valid_external_image_copy_dst_texture_format(dst.desc.format) { @@ -1045,8 +1044,7 @@ impl Global { { let mut trackers = device.trackers.lock(); crate::command::clear_texture( - &*texture_guard, - destination.texture, + &dst, TextureInitRange { mip_range: destination.mip_level..(destination.mip_level + 1), layer_range, @@ -1090,7 +1088,6 @@ impl Global { .textures .set_single( &dst, - destination.texture, selector, hal::TextureUses::COPY_DST, ) From 6839dbab65a18765d078c6fe3e154ead75294436 Mon Sep 17 00:00:00 2001 From: gents83 Date: Sat, 9 Sep 2023 00:51:49 +0200 Subject: [PATCH 095/132] Fixing mem_leaks on wasm --- tests/tests/mem_leaks.rs | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/tests/tests/mem_leaks.rs b/tests/tests/mem_leaks.rs index 2db005c4e0..0293e93d4f 100644 --- a/tests/tests/mem_leaks.rs +++ b/tests/tests/mem_leaks.rs @@ -1,9 +1,4 @@ -use std::num::NonZeroU64; -use wasm_bindgen_test::*; -use wgpu::util::DeviceExt; - -use wgpu_test::{initialize_test, TestParameters, TestingContext}; #[cfg(any( not(target_arch = "wasm32"), @@ -11,10 +6,14 @@ use wgpu_test::{initialize_test, TestParameters, TestingContext}; feature = "webgl" ))] fn draw_test_with_reports( - ctx: TestingContext, + ctx: wgpu_test::TestingContext, expected: &[u32], function: impl FnOnce(&mut wgpu::RenderPass<'_>), ) { + use std::num::NonZeroU64; + + use wgpu::util::DeviceExt; + let global_report = ctx.instance.generate_report(); let report = global_report.hub_report(); assert_eq!(report.adapters.num_allocated, 1); @@ -234,13 +233,14 @@ fn draw_test_with_reports( } #[test] -#[wasm_bindgen_test] #[cfg(any( not(target_arch = "wasm32"), target_os = "emscripten", feature = "webgl" ))] fn simple_draw_leaks() { + use wgpu_test::{initialize_test, TestParameters}; + initialize_test(TestParameters::default().test_features_limits(), |ctx| { draw_test_with_reports(ctx, &[0, 1, 2, 3, 4, 5], |cmb| { cmb.draw(0..6, 0..1); From 058c8af70b42a3a62b7b1bf0570703b83a0d4a72 Mon Sep 17 00:00:00 2001 From: gents83 Date: Sat, 9 Sep 2023 01:07:35 +0200 Subject: [PATCH 096/132] Updating doc --- wgpu-core/src/hub.rs | 55 ++++++--------------------------------- wgpu-core/src/identity.rs | 10 ++++--- 2 files changed, 14 insertions(+), 51 deletions(-) diff --git a/wgpu-core/src/hub.rs b/wgpu-core/src/hub.rs index 62cdfc2f0e..6771f6380d 100644 --- a/wgpu-core/src/hub.rs +++ b/wgpu-core/src/hub.rs @@ -57,54 +57,15 @@ itself to choose ids always pass `()`. In either case, the id ultimately assigned is returned as the first element of the tuple. Producing true identifiers from `id_in` values is the job of an -[`crate::identity::IdentityHandler`] implementation, which has an associated type -[`Input`] saying what type of `id_in` values it accepts, and a -[`process`] method that turns such values into true identifiers of -type `I`. There are two kinds of `IdentityHandler`s: - -- Users that want `wgpu_core` to assign ids generally use - [`crate::identity::IdentityManager`] ([wrapped in a mutex]). Its `Input` type is - `()`, and it tracks assigned ids and generation numbers as - necessary. (This is what `wgpu` does.) - -- Users that want to assign ids themselves use an `IdentityHandler` - whose `Input` type is `I` itself, and whose `process` method simply - passes the `id_in` argument through unchanged. For example, the - `player` crate uses an `IdentityPassThrough` type whose `process` - method simply adjusts the id's backend (since recordings can be - replayed on a different backend than the one they were created on) - but passes the rest of the id's content through unchanged. - -Because an `IdentityHandler` can only create ids for a single -resource type `I`, constructing a [`crate::global::Global`] entails constructing a -separate `IdentityHandler` for each resource type `I` that the -`Global` will manage: an `IdentityHandler`, an -`IdentityHandler`, and so on. - -The [`crate::global::Global::new`] function could simply take a large collection of -`IdentityHandler` implementations as arguments, but that would be -ungainly. Instead, `Global::new` expects a `factory` argument that +[`crate::identity::IdentityManager`], but only if the `IdentityHandlerFactory` +create it and then generated by it, otherwise ids will be received from outside. + +`Global::new` expects a `factory` argument that implements the [`GlobalIdentityHandlerFactory`] trait, which extends [`crate::identity::IdentityHandlerFactory`] for each resource id type `I`. This trait, in turn, has a `spawn` method that constructs an -`IdentityHandler` for the `Global` to use. - -What this means is that the types of resource creation functions' -`id_in` arguments depend on the `Global`'s `G` type parameter. A -`Global`'s `IdentityHandler` implementation is: - -```ignore ->::Filter -``` - -where `Filter` is an associated type of the `IdentityHandlerFactory` trait. -Thus, its `id_in` type is: - -```ignore -<>::Filter as IdentityHandler>::Input -``` - -The [`crate::identity::Input`] type is an alias for this construction. +`crate::identity::IdentityManager` for the `Global` to use, +if ids should be generated by wgpu or will return None otherwise. ## Id allocation and streaming @@ -141,8 +102,8 @@ as much, allowing subsequent operations using that id to be properly flagged as errors as well. [`gfx_select`]: crate::gfx_select -[`Input`]: crate::identity::IdentityHandler::Input -[`process`]: crate::identity::IdentityHandler::process +[`Input`]: crate::identity::IdentityHandlerFactory::Input +[`process`]: crate::identity::IdentityManager::process [`Id`]: crate::id::Id [wrapped in a mutex]: trait.IdentityHandler.html#impl-IdentityHandler%3CI%3E-for-Mutex%3CIdentityManager%3E [WebGPU]: https://www.w3.org/TR/webgpu/ diff --git a/wgpu-core/src/identity.rs b/wgpu-core/src/identity.rs index 54facc7410..b6c4b35d1c 100644 --- a/wgpu-core/src/identity.rs +++ b/wgpu-core/src/identity.rs @@ -110,15 +110,17 @@ impl IdentityManager { } } -/// A type that can produce [`IdentityHandler`] filters for ids of type `I`. +/// A type that can produce [`IdentityManager`] filters for ids of type `I`. /// /// See the module-level documentation for details. pub trait IdentityHandlerFactory { type Input: Copy; - /// Create an [`IdentityHandler`] implementation that can + /// Create an [`IdentityManager`] implementation that can /// transform proto-ids into ids of type `I`. + /// It can return None if ids are passed from outside + /// and are not generated by wgpu /// - /// [`IdentityHandler`]: IdentityHandler + /// [`IdentityManager`]: IdentityManager fn spawn(&self) -> Option>>; fn input_to_id(id_in: Self::Input) -> I; @@ -144,7 +146,7 @@ impl IdentityHandlerFactory for IdentityManagerFactory { } } -/// A factory that can build [`IdentityHandler`]s for all resource +/// A factory that can build [`IdentityManager`]s for all resource /// types. pub trait GlobalIdentityHandlerFactory: IdentityHandlerFactory From 6a667468ff81f1776bd49fdc8ac73af13972f6a9 Mon Sep 17 00:00:00 2001 From: gents83 Date: Sat, 9 Sep 2023 07:38:35 +0200 Subject: [PATCH 097/132] Fix format --- wgpu-core/src/device/queue.rs | 6 +----- wgpu-core/src/identity.rs | 2 +- 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/wgpu-core/src/device/queue.rs b/wgpu-core/src/device/queue.rs index 3a0fe9e50d..e450cbe090 100644 --- a/wgpu-core/src/device/queue.rs +++ b/wgpu-core/src/device/queue.rs @@ -1086,11 +1086,7 @@ impl Global { let mut trackers = device.trackers.lock(); let transitions = trackers .textures - .set_single( - &dst, - selector, - hal::TextureUses::COPY_DST, - ) + .set_single(&dst, selector, hal::TextureUses::COPY_DST) .ok_or(TransferError::InvalidTexture(destination.texture))?; encoder.transition_textures(transitions.map(|pending| pending.into_hal(&dst))); encoder.copy_external_image_to_texture( diff --git a/wgpu-core/src/identity.rs b/wgpu-core/src/identity.rs index b6c4b35d1c..e5a8b79665 100644 --- a/wgpu-core/src/identity.rs +++ b/wgpu-core/src/identity.rs @@ -117,7 +117,7 @@ pub trait IdentityHandlerFactory { type Input: Copy; /// Create an [`IdentityManager`] implementation that can /// transform proto-ids into ids of type `I`. - /// It can return None if ids are passed from outside + /// It can return None if ids are passed from outside /// and are not generated by wgpu /// /// [`IdentityManager`]: IdentityManager From f83fdad4bb3e974b90f2f449e4d12e7513a2acdb Mon Sep 17 00:00:00 2001 From: gents83 Date: Sat, 9 Sep 2023 07:40:17 +0200 Subject: [PATCH 098/132] Fix fmt --- tests/tests/mem_leaks.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/tests/tests/mem_leaks.rs b/tests/tests/mem_leaks.rs index 0293e93d4f..798a8ff1ca 100644 --- a/tests/tests/mem_leaks.rs +++ b/tests/tests/mem_leaks.rs @@ -1,5 +1,3 @@ - - #[cfg(any( not(target_arch = "wasm32"), target_os = "emscripten", From c5aa05f674ccc1a1fc88d1b8458f87c8e95eade3 Mon Sep 17 00:00:00 2001 From: gents83 Date: Sun, 10 Sep 2023 13:43:22 +0200 Subject: [PATCH 099/132] Improving resource dropping in device poll --- tests/tests/mem_leaks.rs | 39 +++++- wgpu-core/src/device/life.rs | 230 +++++++++++++++++++++++++------ wgpu-core/src/device/resource.rs | 6 + wgpu-core/src/pipeline.rs | 2 + 4 files changed, 229 insertions(+), 48 deletions(-) diff --git a/tests/tests/mem_leaks.rs b/tests/tests/mem_leaks.rs index 798a8ff1ca..2e364a850f 100644 --- a/tests/tests/mem_leaks.rs +++ b/tests/tests/mem_leaks.rs @@ -86,6 +86,7 @@ fn draw_test_with_reports( let global_report = ctx.instance.generate_report(); let report = global_report.hub_report(); + assert_eq!(report.buffers.num_allocated, 1); assert_eq!(report.pipeline_layouts.num_allocated, 1); assert_eq!(report.render_pipelines.num_allocated, 0); assert_eq!(report.compute_pipelines.num_allocated, 0); @@ -120,10 +121,18 @@ fn draw_test_with_reports( assert_eq!(report.buffers.num_allocated, 1); assert_eq!(report.bind_groups.num_allocated, 1); assert_eq!(report.bind_group_layouts.num_allocated, 1); + assert_eq!(report.shader_modules.num_allocated, 1); assert_eq!(report.pipeline_layouts.num_allocated, 1); assert_eq!(report.render_pipelines.num_allocated, 1); assert_eq!(report.compute_pipelines.num_allocated, 0); + drop(shader); + + let global_report = ctx.instance.generate_report(); + let report = global_report.hub_report(); + assert_eq!(report.shader_modules.num_allocated, 1); + assert_eq!(report.shader_modules.num_kept_from_user, 0); + let texture = ctx.device.create_texture_with_data( &ctx.queue, &wgpu::TextureDescriptor { @@ -156,6 +165,7 @@ fn draw_test_with_reports( let report = global_report.hub_report(); assert_eq!(report.buffers.num_allocated, 1); assert_eq!(report.texture_views.num_allocated, 1); + assert_eq!(report.texture_views.num_kept_from_user, 1); assert_eq!(report.textures.num_allocated, 1); assert_eq!(report.textures.num_kept_from_user, 0); @@ -201,12 +211,22 @@ fn draw_test_with_reports( drop(rpass); drop(pipeline); drop(texture_view); + drop(ppl); + drop(bgl); + drop(bg); let global_report = ctx.instance.generate_report(); let report = global_report.hub_report(); assert_eq!(report.command_buffers.num_allocated, 1); + assert_eq!(report.command_buffers.num_kept_from_user, 1); assert_eq!(report.render_pipelines.num_allocated, 1); assert_eq!(report.render_pipelines.num_kept_from_user, 0); + assert_eq!(report.pipeline_layouts.num_allocated, 1); + assert_eq!(report.pipeline_layouts.num_kept_from_user, 0); + assert_eq!(report.bind_group_layouts.num_allocated, 1); + assert_eq!(report.bind_group_layouts.num_kept_from_user, 0); + assert_eq!(report.bind_groups.num_allocated, 1); + assert_eq!(report.bind_groups.num_kept_from_user, 0); assert_eq!(report.texture_views.num_allocated, 1); assert_eq!(report.texture_views.num_kept_from_user, 0); assert_eq!(report.textures.num_allocated, 1); @@ -222,12 +242,23 @@ fn draw_test_with_reports( let global_report = ctx.instance.generate_report(); let report = global_report.hub_report(); + assert_eq!(report.render_pipelines.num_allocated, 0); - assert_eq!(report.buffers.num_allocated, 1); - assert_eq!(report.bind_groups.num_allocated, 1); - assert_eq!(report.bind_group_layouts.num_allocated, 1); - assert_eq!(report.pipeline_layouts.num_allocated, 1); + assert_eq!(report.bind_groups.num_allocated, 0); + assert_eq!(report.bind_group_layouts.num_allocated, 0); + assert_eq!(report.pipeline_layouts.num_allocated, 0); assert_eq!(report.texture_views.num_allocated, 0); + assert_eq!(report.textures.num_allocated, 1); + assert_eq!(report.buffers.num_allocated, 1); + + drop(ctx.queue); + drop(ctx.device); + + let global_report = ctx.instance.generate_report(); + let report = global_report.hub_report(); + + assert_eq!(report.devices.num_kept_from_user, 0); + assert_eq!(report.queues.num_kept_from_user, 0); } #[test] diff --git a/wgpu-core/src/device/life.rs b/wgpu-core/src/device/life.rs index a3dd69cf66..8559c60369 100644 --- a/wgpu-core/src/device/life.rs +++ b/wgpu-core/src/device/life.rs @@ -523,10 +523,11 @@ impl LifetimeTracker { hub: &Hub, trackers: &Mutex>, mut f: F, - ) -> &mut Self + ) -> Vec where F: FnMut(&id::BindGroupId), { + let mut submit_indices = Vec::new(); self.suspected_resources .bind_groups .retain(|&bind_group_id, bind_group| { @@ -566,6 +567,9 @@ impl LifetimeTracker { .insert(bind_group.layout.as_info().id(), bind_group.layout.clone()); let submit_index = bind_group.info.submission_index(); + if !submit_indices.contains(&submit_index) { + submit_indices.push(submit_index); + } self.active .iter_mut() .find(|a| a.index == submit_index) @@ -575,7 +579,7 @@ impl LifetimeTracker { } !is_removed }); - self + submit_indices } fn triage_suspected_texture_views( @@ -583,10 +587,11 @@ impl LifetimeTracker { hub: &Hub, trackers: &Mutex>, mut f: F, - ) -> &mut Self + ) -> Vec where F: FnMut(&id::TextureViewId), { + let mut submit_indices = Vec::new(); self.suspected_resources .texture_views .retain(|&view_id, view| { @@ -606,6 +611,9 @@ impl LifetimeTracker { .insert(parent_texture.as_info().id(), parent_texture.clone()); } let submit_index = view.info.submission_index(); + if !submit_indices.contains(&submit_index) { + submit_indices.push(submit_index); + } self.active .iter_mut() .find(|a| a.index == submit_index) @@ -615,7 +623,7 @@ impl LifetimeTracker { } !is_removed }); - self + submit_indices } fn triage_suspected_textures( @@ -667,10 +675,11 @@ impl LifetimeTracker { hub: &Hub, trackers: &Mutex>, mut f: F, - ) -> &mut Self + ) -> Vec where F: FnMut(&id::SamplerId), { + let mut submit_indices = Vec::new(); self.suspected_resources .samplers .retain(|&sampler_id, sampler| { @@ -685,6 +694,9 @@ impl LifetimeTracker { f(&sampler_id); let submit_index = sampler.info.submission_index(); + if !submit_indices.contains(&submit_index) { + submit_indices.push(submit_index); + } self.active .iter_mut() .find(|a| a.index == submit_index) @@ -694,7 +706,7 @@ impl LifetimeTracker { } !is_removed }); - self + submit_indices } fn triage_suspected_buffers( @@ -702,10 +714,11 @@ impl LifetimeTracker { hub: &Hub, trackers: &Mutex>, mut f: F, - ) -> &mut Self + ) -> Vec where F: FnMut(&id::BufferId), { + let mut submit_indices = Vec::new(); self.suspected_resources .buffers .retain(|&buffer_id, buffer| { @@ -720,6 +733,9 @@ impl LifetimeTracker { f(&buffer_id); let submit_index = buffer.info.submission_index(); + if !submit_indices.contains(&submit_index) { + submit_indices.push(submit_index); + } if let resource::BufferMapState::Init { ref stage_buffer, .. } = *buffer.map_state.lock() @@ -735,7 +751,7 @@ impl LifetimeTracker { } !is_removed }); - self + submit_indices } fn triage_suspected_compute_pipelines( @@ -743,10 +759,11 @@ impl LifetimeTracker { hub: &Hub, trackers: &Mutex>, mut f: F, - ) -> &mut Self + ) -> Vec where F: FnMut(&id::ComputePipelineId), { + let mut submit_indices = Vec::new(); self.suspected_resources.compute_pipelines.retain( |&compute_pipeline_id, compute_pipeline| { let is_removed = { @@ -763,7 +780,15 @@ impl LifetimeTracker { ); f(&compute_pipeline_id); + self.suspected_resources.pipeline_layouts.insert( + compute_pipeline.layout.as_info().id(), + compute_pipeline.layout.clone(), + ); + let submit_index = compute_pipeline.info.submission_index(); + if !submit_indices.contains(&submit_index) { + submit_indices.push(submit_index); + } self.active .iter_mut() .find(|a| a.index == submit_index) @@ -774,7 +799,7 @@ impl LifetimeTracker { !is_removed }, ); - self + submit_indices } fn triage_suspected_render_pipelines( @@ -782,10 +807,11 @@ impl LifetimeTracker { hub: &Hub, trackers: &Mutex>, mut f: F, - ) -> &mut Self + ) -> Vec where F: FnMut(&id::RenderPipelineId), { + let mut submit_indices = Vec::new(); self.suspected_resources .render_pipelines .retain(|&render_pipeline_id, render_pipeline| { @@ -803,7 +829,15 @@ impl LifetimeTracker { ); f(&render_pipeline_id); + self.suspected_resources.pipeline_layouts.insert( + render_pipeline.layout.as_info().id(), + render_pipeline.layout.clone(), + ); + let submit_index = render_pipeline.info.submission_index(); + if !submit_indices.contains(&submit_index) { + submit_indices.push(submit_index); + } self.active .iter_mut() .find(|a| a.index == submit_index) @@ -813,10 +847,14 @@ impl LifetimeTracker { } !is_removed }); - self + submit_indices } - fn triage_suspected_pipeline_layouts(&mut self, mut f: F) -> &mut Self + fn triage_suspected_pipeline_layouts( + &mut self, + pipeline_submit_indices: &[u64], + mut f: F, + ) -> &mut Self where F: FnMut(&id::PipelineLayoutId), { @@ -824,7 +862,33 @@ impl LifetimeTracker { .pipeline_layouts .retain(|pipeline_layout_id, pipeline_layout| { //Note: this has to happen after all the suspected pipelines are destroyed - if pipeline_layout.is_unique() { + + let mut num_ref_in_nonreferenced_resources = 0; + pipeline_submit_indices.iter().for_each(|submit_index| { + let resources = self + .active + .iter() + .find(|a| a.index == *submit_index) + .map_or(&self.free_resources, |a| &a.last_resources); + + resources.compute_pipes.iter().for_each(|p| { + if p.layout.as_info().id() == *pipeline_layout_id { + num_ref_in_nonreferenced_resources += 1; + } + }); + resources.render_pipes.iter().for_each(|p| { + if p.layout.as_info().id() == *pipeline_layout_id { + num_ref_in_nonreferenced_resources += 1; + } + }); + }); + + if pipeline_layout.ref_count() == (1 + num_ref_in_nonreferenced_resources) { + log::debug!( + "PipelineLayout {:?} is not tracked anymore", + pipeline_layout_id + ); + f(pipeline_layout_id); for bgl in &pipeline_layout.bind_group_layouts { @@ -837,13 +901,24 @@ impl LifetimeTracker { .push(pipeline_layout.clone()); return false; + } else { + log::info!( + "PipelineLayout {:?} is still referenced from {}", + pipeline_layout_id, + pipeline_layout.ref_count() + ); } true }); self } - fn triage_suspected_bind_group_layouts(&mut self, mut f: F) -> &mut Self + fn triage_suspected_bind_group_layouts( + &mut self, + bind_group_submit_indices: &[u64], + pipeline_submit_indices: &[u64], + mut f: F, + ) -> &mut Self where F: FnMut(&id::BindGroupLayoutId), { @@ -853,15 +928,63 @@ impl LifetimeTracker { //Note: nothing else can bump the refcount since the guard is locked exclusively //Note: same BGL can appear multiple times in the list, but only the last // encounter could drop the refcount to 0. + let mut num_ref_in_nonreferenced_resources = 0; + bind_group_submit_indices.iter().for_each(|submit_index| { + let resources = self + .active + .iter() + .find(|a| a.index == *submit_index) + .map_or(&self.free_resources, |a| &a.last_resources); + + resources.bind_groups.iter().for_each(|b| { + if b.layout.as_info().id() == *bind_group_layout_id { + num_ref_in_nonreferenced_resources += 1; + } + }); + resources.bind_group_layouts.iter().for_each(|b| { + if b.as_info().id() == *bind_group_layout_id { + num_ref_in_nonreferenced_resources += 1; + } + }); + }); + pipeline_submit_indices.iter().for_each(|submit_index| { + let resources = self + .active + .iter() + .find(|a| a.index == *submit_index) + .map_or(&self.free_resources, |a| &a.last_resources); + + resources.compute_pipes.iter().for_each(|p| { + p.layout.bind_group_layouts.iter().for_each(|b| { + if b.as_info().id() == *bind_group_layout_id { + num_ref_in_nonreferenced_resources += 1; + } + }); + }); + resources.render_pipes.iter().for_each(|p| { + p.layout.bind_group_layouts.iter().for_each(|b| { + if b.as_info().id() == *bind_group_layout_id { + num_ref_in_nonreferenced_resources += 1; + } + }); + }); + resources.pipeline_layouts.iter().for_each(|p| { + p.bind_group_layouts.iter().for_each(|b| { + if b.as_info().id() == *bind_group_layout_id { + num_ref_in_nonreferenced_resources += 1; + } + }); + }); + }); //Note: this has to happen after all the suspected pipelines are destroyed - if bind_group_layout.is_unique() { + if bind_group_layout.ref_count() == (1 + num_ref_in_nonreferenced_resources) { // If This layout points to a compatible one, go over the latter // to decrement the ref count and potentially destroy it. //bgl_to_check = bind_group_layout.compatible_layout; log::debug!( - "BindGroupLayout {:?} will be removed from registry", + "BindGroupLayout {:?} is not tracked anymore", bind_group_layout_id ); f(bind_group_layout_id); @@ -871,6 +994,12 @@ impl LifetimeTracker { .push(bind_group_layout.clone()); return false; + } else { + log::info!( + "BindGroupLayout {:?} is still referenced from {}", + bind_group_layout_id, + bind_group_layout.ref_count() + ); } true }, @@ -882,7 +1011,8 @@ impl LifetimeTracker { &mut self, hub: &Hub, trackers: &Mutex>, - ) -> &mut Self { + ) -> Vec { + let mut submit_indices = Vec::new(); self.suspected_resources .query_sets .retain(|&query_set_id, query_set| { @@ -898,6 +1028,9 @@ impl LifetimeTracker { // trace.map(|t| t.add(trace::Action::DestroyComputePipeline(id))); let submit_index = query_set.info.submission_index(); + if !submit_indices.contains(&submit_index) { + submit_indices.push(submit_index); + } self.active .iter_mut() .find(|a| a.index == submit_index) @@ -907,7 +1040,7 @@ impl LifetimeTracker { } !is_removed }); - self + submit_indices } /// Identify resources to free, according to `trackers` and `self.suspected_resources`. @@ -963,24 +1096,45 @@ impl LifetimeTracker { t.add(trace::Action::DestroyRenderBundle(*_id)); } }); - self.triage_suspected_bind_groups(hub, trackers, |_id| { + let compute_pipeline_indices = + self.triage_suspected_compute_pipelines(hub, trackers, |_id| { + #[cfg(feature = "trace")] + if let Some(ref mut t) = trace { + t.add(trace::Action::DestroyComputePipeline(*_id)); + } + }); + let render_pipeline_indices = + self.triage_suspected_render_pipelines(hub, trackers, |_id| { + #[cfg(feature = "trace")] + if let Some(ref mut t) = trace { + t.add(trace::Action::DestroyRenderPipeline(*_id)); + } + }); + let mut pipeline_submit_indices = Vec::new(); + pipeline_submit_indices.extend(compute_pipeline_indices); + pipeline_submit_indices.extend(render_pipeline_indices); + let bind_group_submit_indices = self.triage_suspected_bind_groups(hub, trackers, |_id| { #[cfg(feature = "trace")] if let Some(ref mut t) = trace { t.add(trace::Action::DestroyBindGroup(*_id)); } }); - self.triage_suspected_texture_views(hub, trackers, |_id| { + self.triage_suspected_pipeline_layouts(&pipeline_submit_indices, |_id| { #[cfg(feature = "trace")] if let Some(ref mut t) = trace { - t.add(trace::Action::DestroyTextureView(*_id)); - } - }); - self.triage_suspected_textures(hub, trackers, |_id| { - #[cfg(feature = "trace")] - if let Some(ref mut t) = trace { - t.add(trace::Action::DestroyTexture(*_id)); + t.add(trace::Action::DestroyPipelineLayout(*_id)); } }); + self.triage_suspected_bind_group_layouts( + &bind_group_submit_indices, + &pipeline_submit_indices, + |_id| { + #[cfg(feature = "trace")] + if let Some(ref mut t) = trace { + t.add(trace::Action::DestroyBindGroupLayout(*_id)); + } + }, + ); self.triage_suspected_samplers(hub, trackers, |_id| { #[cfg(feature = "trace")] if let Some(ref mut t) = trace { @@ -993,28 +1147,16 @@ impl LifetimeTracker { t.add(trace::Action::DestroyBuffer(*_id)); } }); - self.triage_suspected_compute_pipelines(hub, trackers, |_id| { - #[cfg(feature = "trace")] - if let Some(ref mut t) = trace { - t.add(trace::Action::DestroyComputePipeline(*_id)); - } - }); - self.triage_suspected_render_pipelines(hub, trackers, |_id| { - #[cfg(feature = "trace")] - if let Some(ref mut t) = trace { - t.add(trace::Action::DestroyRenderPipeline(*_id)); - } - }); - self.triage_suspected_pipeline_layouts(|_id| { + self.triage_suspected_texture_views(hub, trackers, |_id| { #[cfg(feature = "trace")] if let Some(ref mut t) = trace { - t.add(trace::Action::DestroyPipelineLayout(*_id)); + t.add(trace::Action::DestroyTextureView(*_id)); } }); - self.triage_suspected_bind_group_layouts(|_id| { + self.triage_suspected_textures(hub, trackers, |_id| { #[cfg(feature = "trace")] if let Some(ref mut t) = trace { - t.add(trace::Action::DestroyBindGroupLayout(*_id)); + t.add(trace::Action::DestroyTexture(*_id)); } }); self.triage_suspected_query_sets(hub, trackers); diff --git a/wgpu-core/src/device/resource.rs b/wgpu-core/src/device/resource.rs index 3b8d51cda5..c95c7a8274 100644 --- a/wgpu-core/src/device/resource.rs +++ b/wgpu-core/src/device/resource.rs @@ -2457,6 +2457,7 @@ impl Device { raw: Some(raw), layout: layout.clone(), device: self.clone(), + _shader_module: shader_module, late_sized_buffer_groups, info: ResourceInfo::new(desc.label.borrow_or_default()), }; @@ -2472,6 +2473,8 @@ impl Device { ) -> Result, pipeline::CreateRenderPipelineError> { use wgt::TextureFormatFeatureFlags as Tfff; + let mut shader_modules = Vec::new(); + // This has to be done first, or otherwise the IDs may be pointing to entries // that are not even in the storage. if let Some(ref ids) = implicit_context { @@ -2739,6 +2742,7 @@ impl Device { error: validation::StageError::InvalidModule, } })?; + shader_modules.push(shader_module.clone()); let pipeline_layout_guard = hub.pipeline_layouts.read(); let provided_layouts = match desc.layout { @@ -2788,6 +2792,7 @@ impl Device { stage: flag, error: validation::StageError::InvalidModule, })?; + shader_modules.push(shader_module.clone()); let pipeline_layout_guard = hub.pipeline_layouts.read(); let provided_layouts = match desc.layout { @@ -2976,6 +2981,7 @@ impl Device { layout: layout.clone(), device: self.clone(), pass_context, + _shader_modules: shader_modules, flags, strip_index_format: desc.primitive.strip_index_format, vertex_steps, diff --git a/wgpu-core/src/pipeline.rs b/wgpu-core/src/pipeline.rs index 2ec01dc287..d62e32f8b7 100644 --- a/wgpu-core/src/pipeline.rs +++ b/wgpu-core/src/pipeline.rs @@ -246,6 +246,7 @@ pub struct ComputePipeline { pub(crate) raw: Option, pub(crate) layout: Arc>, pub(crate) device: Arc>, + pub(crate) _shader_module: Arc>, pub(crate) late_sized_buffer_groups: ArrayVec, pub(crate) info: ResourceInfo, } @@ -473,6 +474,7 @@ pub struct RenderPipeline { pub(crate) raw: Option, pub(crate) device: Arc>, pub(crate) layout: Arc>, + pub(crate) _shader_modules: Vec>>, pub(crate) pass_context: RenderPassContext, pub(crate) flags: PipelineFlags, pub(crate) strip_index_format: Option, From 9a24345d12354b4b9a2eb1588410eded0c683c5a Mon Sep 17 00:00:00 2001 From: gents83 Date: Wed, 13 Sep 2023 08:11:24 +0200 Subject: [PATCH 100/132] Add release of buffer and description on texture --- tests/tests/mem_leaks.rs | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/tests/tests/mem_leaks.rs b/tests/tests/mem_leaks.rs index 2e364a850f..05a7026a40 100644 --- a/tests/tests/mem_leaks.rs +++ b/tests/tests/mem_leaks.rs @@ -214,6 +214,7 @@ fn draw_test_with_reports( drop(ppl); drop(bgl); drop(bg); + drop(buffer); let global_report = ctx.instance.generate_report(); let report = global_report.hub_report(); @@ -229,6 +230,8 @@ fn draw_test_with_reports( assert_eq!(report.bind_groups.num_kept_from_user, 0); assert_eq!(report.texture_views.num_allocated, 1); assert_eq!(report.texture_views.num_kept_from_user, 0); + assert_eq!(report.buffers.num_allocated, 1); + assert_eq!(report.buffers.num_kept_from_user, 0); assert_eq!(report.textures.num_allocated, 1); assert_eq!(report.textures.num_kept_from_user, 0); @@ -248,17 +251,24 @@ fn draw_test_with_reports( assert_eq!(report.bind_group_layouts.num_allocated, 0); assert_eq!(report.pipeline_layouts.num_allocated, 0); assert_eq!(report.texture_views.num_allocated, 0); - assert_eq!(report.textures.num_allocated, 1); - assert_eq!(report.buffers.num_allocated, 1); + assert_eq!(report.buffers.num_allocated, 0); drop(ctx.queue); drop(ctx.device); + drop(ctx.adapter); let global_report = ctx.instance.generate_report(); let report = global_report.hub_report(); - assert_eq!(report.devices.num_kept_from_user, 0); assert_eq!(report.queues.num_kept_from_user, 0); + assert_eq!(report.queues.num_allocated, 0); + //Still one texture alive because surface is not dropped till the end + assert_eq!(report.textures.num_allocated, 1); + //that is keeping still the device alive + assert_eq!(report.devices.num_allocated, 1); + assert_eq!(report.textures.num_kept_from_user, 0); + assert_eq!(report.devices.num_kept_from_user, 0); + } #[test] From e52c747a01ae9fe3e36a483ff2ce52df73a6e24d Mon Sep 17 00:00:00 2001 From: gents83 Date: Sat, 16 Sep 2023 09:52:56 +0200 Subject: [PATCH 101/132] Fix format --- tests/tests/mem_leaks.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tests/tests/mem_leaks.rs b/tests/tests/mem_leaks.rs index 05a7026a40..7cf167288a 100644 --- a/tests/tests/mem_leaks.rs +++ b/tests/tests/mem_leaks.rs @@ -268,7 +268,6 @@ fn draw_test_with_reports( assert_eq!(report.devices.num_allocated, 1); assert_eq!(report.textures.num_kept_from_user, 0); assert_eq!(report.devices.num_kept_from_user, 0); - } #[test] @@ -277,7 +276,7 @@ fn draw_test_with_reports( target_os = "emscripten", feature = "webgl" ))] -fn simple_draw_leaks() { +fn simple_draw_check_mem_leaks() { use wgpu_test::{initialize_test, TestParameters}; initialize_test(TestParameters::default().test_features_limits(), |ctx| { From 45a618f8782e1a0aba7c7b168ce6035c0e4bb2c0 Mon Sep 17 00:00:00 2001 From: gents83 Date: Sat, 16 Sep 2023 20:00:21 +0200 Subject: [PATCH 102/132] Fix merge conflicts and improve id mngmnt --- player/src/lib.rs | 11 +- tests/tests/device.rs | 2 +- tests/tests/mem_leaks.rs | 13 +- wgpu-core/src/device/global.rs | 220 +++++++++++++++-------------- wgpu-core/src/device/life.rs | 233 ++++++++++--------------------- wgpu-core/src/device/queue.rs | 91 ++++++------ wgpu-core/src/device/resource.rs | 23 ++- wgpu-core/src/identity.rs | 84 +++++------ wgpu-core/src/registry.rs | 38 +++-- wgpu-core/src/resource.rs | 4 +- 10 files changed, 335 insertions(+), 384 deletions(-) diff --git a/player/src/lib.rs b/player/src/lib.rs index 063c614bd1..ec8b91371a 100644 --- a/player/src/lib.rs +++ b/player/src/lib.rs @@ -8,21 +8,22 @@ #![cfg(not(target_arch = "wasm32"))] #![warn(unsafe_op_in_unsafe_fn)] -use wgc::{device::trace, identity::IdentityManager}; +use wgc::device::trace; -use std::{borrow::Cow, fs, path::Path, sync::Arc}; +use std::{borrow::Cow, fs, path::Path}; pub struct IdentityPassThroughFactory; impl wgc::identity::IdentityHandlerFactory for IdentityPassThroughFactory { type Input = I; - fn spawn(&self) -> Option>> { - None - } fn input_to_id(id_in: Self::Input) -> I { id_in } + + fn autogenerate_ids() -> bool { + false + } } impl wgc::identity::GlobalIdentityHandlerFactory for IdentityPassThroughFactory {} diff --git a/tests/tests/device.rs b/tests/tests/device.rs index 28ba9465e8..1b8c4d765c 100644 --- a/tests/tests/device.rs +++ b/tests/tests/device.rs @@ -104,7 +104,7 @@ fn request_device_error_on_native() { async fn request_device_error_message() { // Not using initialize_test() because that doesn't let us catch the error // nor .await anything - let (adapter, _surface_guard) = wgpu_test::initialize_adapter(); + let (_instance, adapter, _surface_guard) = wgpu_test::initialize_adapter(); let device_error = adapter .request_device( diff --git a/tests/tests/mem_leaks.rs b/tests/tests/mem_leaks.rs index 7cf167288a..dd1b93e7af 100644 --- a/tests/tests/mem_leaks.rs +++ b/tests/tests/mem_leaks.rs @@ -156,7 +156,8 @@ fn draw_test_with_reports( let global_report = ctx.instance.generate_report(); let report = global_report.hub_report(); assert_eq!(report.buffers.num_allocated, 1); - assert_eq!(report.texture_views.num_allocated, 1); + //1 is clear_view and 1 is user's texture_view + assert_eq!(report.texture_views.num_allocated, 2); assert_eq!(report.textures.num_allocated, 1); drop(texture); @@ -164,7 +165,7 @@ fn draw_test_with_reports( let global_report = ctx.instance.generate_report(); let report = global_report.hub_report(); assert_eq!(report.buffers.num_allocated, 1); - assert_eq!(report.texture_views.num_allocated, 1); + assert_eq!(report.texture_views.num_allocated, 2); assert_eq!(report.texture_views.num_kept_from_user, 1); assert_eq!(report.textures.num_allocated, 1); assert_eq!(report.textures.num_kept_from_user, 0); @@ -203,7 +204,7 @@ fn draw_test_with_reports( assert_eq!(report.compute_pipelines.num_allocated, 0); assert_eq!(report.command_buffers.num_allocated, 1); assert_eq!(report.render_bundles.num_allocated, 0); - assert_eq!(report.texture_views.num_allocated, 1); + assert_eq!(report.texture_views.num_allocated, 2); assert_eq!(report.textures.num_allocated, 1); function(&mut rpass); @@ -228,7 +229,7 @@ fn draw_test_with_reports( assert_eq!(report.bind_group_layouts.num_kept_from_user, 0); assert_eq!(report.bind_groups.num_allocated, 1); assert_eq!(report.bind_groups.num_kept_from_user, 0); - assert_eq!(report.texture_views.num_allocated, 1); + assert_eq!(report.texture_views.num_allocated, 2); assert_eq!(report.texture_views.num_kept_from_user, 0); assert_eq!(report.buffers.num_allocated, 1); assert_eq!(report.buffers.num_kept_from_user, 0); @@ -250,8 +251,10 @@ fn draw_test_with_reports( assert_eq!(report.bind_groups.num_allocated, 0); assert_eq!(report.bind_group_layouts.num_allocated, 0); assert_eq!(report.pipeline_layouts.num_allocated, 0); - assert_eq!(report.texture_views.num_allocated, 0); assert_eq!(report.buffers.num_allocated, 0); + //surface is still there + assert_eq!(report.texture_views.num_allocated, 1); + assert_eq!(report.textures.num_allocated, 1); drop(ctx.queue); drop(ctx.device); diff --git a/wgpu-core/src/device/global.rs b/wgpu-core/src/device/global.rs index bc84865d40..39b351ae9b 100644 --- a/wgpu-core/src/device/global.rs +++ b/wgpu-core/src/device/global.rs @@ -22,7 +22,7 @@ use smallvec::SmallVec; use wgt::{BufferAddress, TextureFormat}; -use std::{borrow::Cow, iter, ops::Range, ptr, sync::Arc}; +use std::{borrow::Cow, iter, ops::Range, ptr}; use super::{ImplicitPipelineIds, InvalidDevice, UserClosures}; @@ -132,122 +132,133 @@ impl Global { let hub = A::hub(self); let fid = hub.buffers.prepare::(id_in); - let error = loop { - let device = match hub.devices.get(device_id) { - Ok(device) => device, - Err(_) => break DeviceError::Invalid.into(), - }; + let device = match hub.devices.get(device_id) { + Ok(device) => device, + Err(_) => { + let id = fid.assign_error(desc.label.borrow_or_default()); + return (id, Some(DeviceError::Invalid.into())); + } + }; - if desc.usage.is_empty() { - // Per spec, `usage` must not be zero. - break resource::CreateBufferError::InvalidUsage(desc.usage); + if desc.usage.is_empty() { + // Per spec, `usage` must not be zero. + let id = fid.assign_error(desc.label.borrow_or_default()); + return ( + id, + Some(resource::CreateBufferError::InvalidUsage(desc.usage)), + ); + } + + #[cfg(feature = "trace")] + if let Some(ref mut trace) = *device.trace.lock() { + let mut desc = desc.clone(); + let mapped_at_creation = std::mem::replace(&mut desc.mapped_at_creation, false); + if mapped_at_creation && !desc.usage.contains(wgt::BufferUsages::MAP_WRITE) { + desc.usage |= wgt::BufferUsages::COPY_DST; } + trace.add(trace::Action::CreateBuffer(fid.id(), desc)); + } - #[cfg(feature = "trace")] - if let Some(ref mut trace) = *device.trace.lock() { - let mut desc = desc.clone(); - let mapped_at_creation = std::mem::replace(&mut desc.mapped_at_creation, false); - if mapped_at_creation && !desc.usage.contains(wgt::BufferUsages::MAP_WRITE) { - desc.usage |= wgt::BufferUsages::COPY_DST; - } - trace.add(trace::Action::CreateBuffer(fid.id(), desc)); + let buffer = match device.create_buffer(device_id, desc, false) { + Ok(buffer) => buffer, + Err(e) => { + let id = fid.assign_error(desc.label.borrow_or_default()); + return (id, Some(e)); } + }; - let buffer = match device.create_buffer(device_id, desc, false) { - Ok(buffer) => buffer, - Err(e) => break e, - }; + let (id, resource) = fid.assign(buffer); + log::info!("Created Buffer {:?} with {:?}", id, desc); - let buffer_use = if !desc.mapped_at_creation { - hal::BufferUses::empty() - } else if desc.usage.contains(wgt::BufferUsages::MAP_WRITE) { - // buffer is mappable, so we are just doing that at start - let map_size = buffer.size; - let ptr = if map_size == 0 { - std::ptr::NonNull::dangling() - } else { - match map_buffer(device.raw(), &buffer, 0, map_size, HostMap::Write) { - Ok(ptr) => ptr, - Err(e) => { - device.lock_life().schedule_resource_destruction( - queue::TempResource::Buffer(Arc::new(buffer)), - !0, - ); - break e.into(); - } - } - }; - *buffer.map_state.lock() = resource::BufferMapState::Active { - ptr, - range: 0..map_size, - host: HostMap::Write, - }; - hal::BufferUses::MAP_WRITE + let buffer_use = if !desc.mapped_at_creation { + hal::BufferUses::empty() + } else if desc.usage.contains(wgt::BufferUsages::MAP_WRITE) { + // buffer is mappable, so we are just doing that at start + let map_size = resource.size; + let ptr = if map_size == 0 { + std::ptr::NonNull::dangling() } else { - // buffer needs staging area for initialization only - let stage_desc = wgt::BufferDescriptor { - label: Some(Cow::Borrowed( - "(wgpu internal) initializing unmappable buffer", - )), - size: desc.size, - usage: wgt::BufferUsages::MAP_WRITE | wgt::BufferUsages::COPY_SRC, - mapped_at_creation: false, - }; - let stage = match device.create_buffer(device_id, &stage_desc, true) { - Ok(stage) => stage, + match map_buffer(device.raw(), &resource, 0, map_size, HostMap::Write) { + Ok(ptr) => ptr, Err(e) => { device.lock_life().schedule_resource_destruction( - queue::TempResource::Buffer(Arc::new(buffer)), + queue::TempResource::Buffer(resource), !0, ); - break e; + hub.buffers + .force_replace_with_error(id, desc.label.borrow_or_default()); + return (id, Some(e.into())); } - }; - let mapping = match unsafe { device.raw().map_buffer(stage.raw(), 0..stage.size) } { - Ok(mapping) => mapping, - Err(e) => { - let mut life_lock = device.lock_life(); - life_lock.schedule_resource_destruction( - queue::TempResource::Buffer(Arc::new(buffer)), - !0, - ); - life_lock.schedule_resource_destruction( - queue::TempResource::Buffer(Arc::new(stage)), - !0, - ); - break DeviceError::from(e).into(); - } - }; - - assert_eq!(buffer.size % wgt::COPY_BUFFER_ALIGNMENT, 0); - // Zero initialize memory and then mark both staging and buffer as initialized - // (it's guaranteed that this is the case by the time the buffer is usable) - unsafe { ptr::write_bytes(mapping.ptr.as_ptr(), 0, buffer.size as usize) }; - buffer.initialization_status.write().drain(0..buffer.size); - stage.initialization_status.write().drain(0..buffer.size); - - *buffer.map_state.lock() = resource::BufferMapState::Init { - ptr: mapping.ptr, - needs_flush: !mapping.is_coherent, - stage_buffer: Arc::new(stage), - }; - hal::BufferUses::COPY_DST + } }; + *resource.map_state.lock() = resource::BufferMapState::Active { + ptr, + range: 0..map_size, + host: HostMap::Write, + }; + hal::BufferUses::MAP_WRITE + } else { + // buffer needs staging area for initialization only + let stage_desc = wgt::BufferDescriptor { + label: Some(Cow::Borrowed( + "(wgpu internal) initializing unmappable buffer", + )), + size: desc.size, + usage: wgt::BufferUsages::MAP_WRITE | wgt::BufferUsages::COPY_SRC, + mapped_at_creation: false, + }; + let stage = match device.create_buffer(device_id, &stage_desc, true) { + Ok(stage) => stage, + Err(e) => { + device + .lock_life() + .schedule_resource_destruction(queue::TempResource::Buffer(resource), !0); + hub.buffers + .force_replace_with_error(id, desc.label.borrow_or_default()); + return (id, Some(e)); + } + }; + let stage_fid = hub.buffers.request::(); + let stage = stage_fid.init(stage); - let (id, resource) = fid.assign(buffer); - log::info!("Created Buffer {:?} with {:?}", id, desc); - - device - .trackers - .lock() - .buffers - .insert_single(id, resource, buffer_use); + let mapping = match unsafe { device.raw().map_buffer(stage.raw(), 0..stage.size) } { + Ok(mapping) => mapping, + Err(e) => { + let mut life_lock = device.lock_life(); + life_lock + .schedule_resource_destruction(queue::TempResource::Buffer(resource), !0); + life_lock.schedule_resource_destruction(queue::TempResource::Buffer(stage), !0); + hub.buffers + .force_replace_with_error(id, desc.label.borrow_or_default()); + return (id, Some(DeviceError::from(e).into())); + } + }; - return (id, None); + assert_eq!(resource.size % wgt::COPY_BUFFER_ALIGNMENT, 0); + // Zero initialize memory and then mark both staging and buffer as initialized + // (it's guaranteed that this is the case by the time the buffer is usable) + unsafe { ptr::write_bytes(mapping.ptr.as_ptr(), 0, resource.size as usize) }; + resource + .initialization_status + .write() + .drain(0..resource.size); + stage.initialization_status.write().drain(0..resource.size); + + *resource.map_state.lock() = resource::BufferMapState::Init { + ptr: mapping.ptr, + needs_flush: !mapping.is_coherent, + stage_buffer: stage, + }; + hal::BufferUses::COPY_DST }; - let id = fid.assign_error(desc.label.borrow_or_default()); - (id, Some(error)) + device + .trackers + .lock() + .buffers + .insert_single(id, resource, buffer_use); + + (id, None) } /// Assign `id_in` an error with the given `label`. @@ -455,7 +466,7 @@ impl Global { let hub = A::hub(self); if let Some(buffer) = hub.buffers.unregister(buffer_id) { - if buffer.is_unique() { + if buffer.ref_count() == 1 { buffer.destroy().ok(); } @@ -548,7 +559,9 @@ impl Global { }, }; let view = device.create_texture_view(&resource, &desc).unwrap(); - clear_views.push(Arc::new(view)); + let view_fid = hub.texture_views.request::(); + let view = view_fid.init(view); + clear_views.push(view); } } let mut clear_mode = resource.clear_mode.write(); @@ -798,6 +811,7 @@ impl Global { } } + #[allow(unused_unsafe)] pub fn texture_create_view( &self, texture_id: id::TextureId, @@ -825,7 +839,7 @@ impl Global { }); } - let view = match device.create_texture_view(&texture, desc) { + let view = match unsafe { device.create_texture_view(&texture, desc) } { Ok(view) => view, Err(e) => break e, }; diff --git a/wgpu-core/src/device/life.rs b/wgpu-core/src/device/life.rs index 8559c60369..a2a2c68a56 100644 --- a/wgpu-core/src/device/life.rs +++ b/wgpu-core/src/device/life.rs @@ -11,7 +11,7 @@ use crate::{ hub::Hub, id::{self}, pipeline::{ComputePipeline, RenderPipeline}, - resource::{self, Buffer, QuerySet, Resource, Sampler, Texture, TextureView}, + resource::{self, Buffer, QuerySet, Resource, Sampler, StagingBuffer, Texture, TextureView}, track::Tracker, SubmissionIndex, }; @@ -23,8 +23,9 @@ use thiserror::Error; use std::{collections::HashMap, sync::Arc}; /// A struct that keeps lists of resources that are no longer needed by the user. -pub(crate) struct SuspectedResources { +pub(crate) struct ResourceMaps { pub(crate) buffers: HashMap>>, + pub(crate) staging_buffers: HashMap>>, pub(crate) textures: HashMap>>, pub(crate) texture_views: HashMap>>, pub(crate) samplers: HashMap>>, @@ -37,10 +38,11 @@ pub(crate) struct SuspectedResources { pub(crate) query_sets: HashMap>>, } -impl SuspectedResources { +impl ResourceMaps { pub(crate) fn new() -> Self { Self { buffers: HashMap::new(), + staging_buffers: HashMap::new(), textures: HashMap::new(), texture_views: HashMap::new(), samplers: HashMap::new(), @@ -55,6 +57,7 @@ impl SuspectedResources { } pub(crate) fn clear(&mut self) { self.buffers.clear(); + self.staging_buffers.clear(); self.textures.clear(); self.texture_views.clear(); self.samplers.clear(); @@ -67,128 +70,18 @@ impl SuspectedResources { self.query_sets.clear(); } - pub(crate) fn extend(&mut self, other: &Self) { - other.buffers.iter().for_each(|(id, v)| { - self.buffers.insert(*id, v.clone()); - }); - other.textures.iter().for_each(|(id, v)| { - self.textures.insert(*id, v.clone()); - }); - other.texture_views.iter().for_each(|(id, v)| { - self.texture_views.insert(*id, v.clone()); - }); - other.samplers.iter().for_each(|(id, v)| { - self.samplers.insert(*id, v.clone()); - }); - other.bind_groups.iter().for_each(|(id, v)| { - self.bind_groups.insert(*id, v.clone()); - }); - other.compute_pipelines.iter().for_each(|(id, v)| { - self.compute_pipelines.insert(*id, v.clone()); - }); - other.render_pipelines.iter().for_each(|(id, v)| { - self.render_pipelines.insert(*id, v.clone()); - }); - other.bind_group_layouts.iter().for_each(|(id, v)| { - self.bind_group_layouts.insert(*id, v.clone()); - }); - other.pipeline_layouts.iter().for_each(|(id, v)| { - self.pipeline_layouts.insert(*id, v.clone()); - }); - other.render_bundles.iter().for_each(|(id, v)| { - self.render_bundles.insert(*id, v.clone()); - }); - other.query_sets.iter().for_each(|(id, v)| { - self.query_sets.insert(*id, v.clone()); - }); - } -} - -/// Raw backend resources that should be freed shortly. -#[derive(Debug)] -struct NonReferencedResources { - buffers: Vec>>, - textures: Vec>>, - texture_views: Vec>>, - samplers: Vec>>, - bind_groups: Vec>>, - compute_pipes: Vec>>, - render_pipes: Vec>>, - bind_group_layouts: Vec>>, - pipeline_layouts: Vec>>, - query_sets: Vec>>, -} - -impl NonReferencedResources { - fn new() -> Self { - Self { - buffers: Vec::new(), - textures: Vec::new(), - texture_views: Vec::new(), - samplers: Vec::new(), - bind_groups: Vec::new(), - compute_pipes: Vec::new(), - render_pipes: Vec::new(), - bind_group_layouts: Vec::new(), - pipeline_layouts: Vec::new(), - query_sets: Vec::new(), - } - } - - fn extend(&mut self, other: Self) { + pub(crate) fn extend(&mut self, other: Self) { self.buffers.extend(other.buffers); + self.staging_buffers.extend(other.staging_buffers); self.textures.extend(other.textures); self.texture_views.extend(other.texture_views); self.samplers.extend(other.samplers); self.bind_groups.extend(other.bind_groups); - self.compute_pipes.extend(other.compute_pipes); - self.render_pipes.extend(other.render_pipes); + self.compute_pipelines.extend(other.compute_pipelines); + self.render_pipelines.extend(other.render_pipelines); + self.bind_group_layouts.extend(other.bind_group_layouts); + self.pipeline_layouts.extend(other.pipeline_layouts); self.query_sets.extend(other.query_sets); - assert!(other.bind_group_layouts.is_empty()); - assert!(other.pipeline_layouts.is_empty()); - } - - unsafe fn clean(&mut self) { - if !self.buffers.is_empty() { - profiling::scope!("destroy_buffers"); - self.buffers.clear(); - } - if !self.textures.is_empty() { - profiling::scope!("destroy_textures"); - self.textures.clear(); - } - if !self.texture_views.is_empty() { - profiling::scope!("destroy_texture_views"); - self.texture_views.clear(); - } - if !self.samplers.is_empty() { - profiling::scope!("destroy_samplers"); - self.samplers.clear(); - } - if !self.bind_groups.is_empty() { - profiling::scope!("destroy_bind_groups"); - self.bind_groups.clear(); - } - if !self.compute_pipes.is_empty() { - profiling::scope!("destroy_compute_pipelines"); - self.compute_pipes.clear(); - } - if !self.render_pipes.is_empty() { - profiling::scope!("destroy_render_pipelines"); - self.render_pipes.clear(); - } - if !self.bind_group_layouts.is_empty() { - profiling::scope!("destroy_bind_group_layouts"); - self.bind_group_layouts.clear(); - } - if !self.pipeline_layouts.is_empty() { - profiling::scope!("destroy_pipeline_layouts"); - self.pipeline_layouts.clear(); - } - if !self.query_sets.is_empty() { - profiling::scope!("destroy_query_sets"); - self.query_sets.clear(); - } } } @@ -210,7 +103,7 @@ struct ActiveSubmission { /// This includes things like temporary resources and resources that are /// used by submitted commands but have been dropped by the user (meaning that /// this submission is their last reference.) - last_resources: NonReferencedResources, + last_resources: ResourceMaps, /// Buffers to be mapped once this submission has completed. mapped: Vec>>, @@ -284,7 +177,7 @@ pub(crate) struct LifetimeTracker { /// Resources whose user handle has died (i.e. drop/destroy has been called) /// and will likely be ready for destruction soon. - pub suspected_resources: SuspectedResources, + pub suspected_resources: ResourceMaps, /// Resources used by queue submissions still in flight. One entry per /// submission, with older submissions appearing before younger. @@ -299,7 +192,7 @@ pub(crate) struct LifetimeTracker { /// These are freed by `LifeTracker::cleanup`, which is called from periodic /// maintenance functions like `Global::device_poll`, and when a device is /// destroyed. - free_resources: NonReferencedResources, + free_resources: ResourceMaps, /// Buffers the user has asked us to map, and which are not used by any /// queue submission still in flight. @@ -318,9 +211,9 @@ impl LifetimeTracker { mapped: Vec::new(), future_suspected_buffers: Vec::new(), future_suspected_textures: Vec::new(), - suspected_resources: SuspectedResources::new(), + suspected_resources: ResourceMaps::new(), active: Vec::new(), - free_resources: NonReferencedResources::new(), + free_resources: ResourceMaps::new(), ready_to_map: Vec::new(), work_done_closures: SmallVec::new(), } @@ -338,13 +231,22 @@ impl LifetimeTracker { temp_resources: impl Iterator>, encoders: Vec>, ) { - let mut last_resources = NonReferencedResources::new(); + let mut last_resources = ResourceMaps::new(); for res in temp_resources { match res { - TempResource::Buffer(raw) => last_resources.buffers.push(raw), + TempResource::Buffer(raw) => { + last_resources.buffers.insert(raw.as_info().id(), raw); + } + TempResource::StagingBuffer(raw) => { + last_resources + .staging_buffers + .insert(raw.as_info().id(), raw); + } TempResource::Texture(raw, views) => { - last_resources.textures.push(raw); - last_resources.texture_views.extend(views); + last_resources.textures.insert(raw.as_info().id(), raw); + views.into_iter().for_each(|v| { + last_resources.texture_views.insert(v.as_info().id(), v); + }); } } } @@ -426,9 +328,7 @@ impl LifetimeTracker { pub fn cleanup(&mut self) { profiling::scope!("LifetimeTracker::cleanup"); - unsafe { - self.free_resources.clean(); - } + self.free_resources.clear(); } pub fn schedule_resource_destruction( @@ -442,10 +342,17 @@ impl LifetimeTracker { .find(|a| a.index == last_submit_index) .map_or(&mut self.free_resources, |a| &mut a.last_resources); match temp_resource { - TempResource::Buffer(raw) => resources.buffers.push(raw), + TempResource::Buffer(raw) => { + resources.buffers.insert(raw.as_info().id(), raw); + } + TempResource::StagingBuffer(raw) => { + resources.staging_buffers.insert(raw.as_info().id(), raw); + } TempResource::Texture(raw, views) => { - resources.texture_views.extend(views); - resources.textures.push(raw); + views.into_iter().for_each(|v| { + resources.texture_views.insert(v.as_info().id(), v); + }); + resources.textures.insert(raw.as_info().id(), raw); } } } @@ -575,7 +482,7 @@ impl LifetimeTracker { .find(|a| a.index == submit_index) .map_or(&mut self.free_resources, |a| &mut a.last_resources) .bind_groups - .push(bind_group.clone()); + .insert(bind_group_id, bind_group.clone()); } !is_removed }); @@ -619,7 +526,7 @@ impl LifetimeTracker { .find(|a| a.index == submit_index) .map_or(&mut self.free_resources, |a| &mut a.last_resources) .texture_views - .push(view.clone()); + .insert(view_id, view.clone()); } !is_removed }); @@ -659,11 +566,15 @@ impl LifetimeTracker { ref clear_views, .. } = &*texture.clear_mode.read() { - non_referenced_resources - .texture_views - .extend(clear_views.iter().cloned()); + clear_views.into_iter().for_each(|v| { + non_referenced_resources + .texture_views + .insert(v.as_info().id(), v.clone()); + }); } - non_referenced_resources.textures.push(texture.clone()); + non_referenced_resources + .textures + .insert(texture_id, texture.clone()); } !is_removed }); @@ -702,7 +613,7 @@ impl LifetimeTracker { .find(|a| a.index == submit_index) .map_or(&mut self.free_resources, |a| &mut a.last_resources) .samplers - .push(sampler.clone()); + .insert(sampler_id, sampler.clone()); } !is_removed }); @@ -740,14 +651,16 @@ impl LifetimeTracker { ref stage_buffer, .. } = *buffer.map_state.lock() { - self.free_resources.buffers.push(stage_buffer.clone()); + self.free_resources + .buffers + .insert(stage_buffer.as_info().id(), stage_buffer.clone()); } self.active .iter_mut() .find(|a| a.index == submit_index) .map_or(&mut self.free_resources, |a| &mut a.last_resources) .buffers - .push(buffer.clone()); + .insert(buffer_id, buffer.clone()); } !is_removed }); @@ -793,8 +706,8 @@ impl LifetimeTracker { .iter_mut() .find(|a| a.index == submit_index) .map_or(&mut self.free_resources, |a| &mut a.last_resources) - .compute_pipes - .push(compute_pipeline.clone()); + .compute_pipelines + .insert(compute_pipeline_id, compute_pipeline.clone()); } !is_removed }, @@ -842,8 +755,8 @@ impl LifetimeTracker { .iter_mut() .find(|a| a.index == submit_index) .map_or(&mut self.free_resources, |a| &mut a.last_resources) - .render_pipes - .push(render_pipeline.clone()); + .render_pipelines + .insert(render_pipeline_id, render_pipeline.clone()); } !is_removed }); @@ -871,12 +784,12 @@ impl LifetimeTracker { .find(|a| a.index == *submit_index) .map_or(&self.free_resources, |a| &a.last_resources); - resources.compute_pipes.iter().for_each(|p| { + resources.compute_pipelines.iter().for_each(|(_id, p)| { if p.layout.as_info().id() == *pipeline_layout_id { num_ref_in_nonreferenced_resources += 1; } }); - resources.render_pipes.iter().for_each(|p| { + resources.render_pipelines.iter().for_each(|(_id, p)| { if p.layout.as_info().id() == *pipeline_layout_id { num_ref_in_nonreferenced_resources += 1; } @@ -898,7 +811,7 @@ impl LifetimeTracker { } self.free_resources .pipeline_layouts - .push(pipeline_layout.clone()); + .insert(*pipeline_layout_id, pipeline_layout.clone()); return false; } else { @@ -936,13 +849,13 @@ impl LifetimeTracker { .find(|a| a.index == *submit_index) .map_or(&self.free_resources, |a| &a.last_resources); - resources.bind_groups.iter().for_each(|b| { + resources.bind_groups.iter().for_each(|(_id, b)| { if b.layout.as_info().id() == *bind_group_layout_id { num_ref_in_nonreferenced_resources += 1; } }); - resources.bind_group_layouts.iter().for_each(|b| { - if b.as_info().id() == *bind_group_layout_id { + resources.bind_group_layouts.iter().for_each(|(id, _b)| { + if id == bind_group_layout_id { num_ref_in_nonreferenced_resources += 1; } }); @@ -954,21 +867,21 @@ impl LifetimeTracker { .find(|a| a.index == *submit_index) .map_or(&self.free_resources, |a| &a.last_resources); - resources.compute_pipes.iter().for_each(|p| { + resources.compute_pipelines.iter().for_each(|(_id, p)| { p.layout.bind_group_layouts.iter().for_each(|b| { if b.as_info().id() == *bind_group_layout_id { num_ref_in_nonreferenced_resources += 1; } }); }); - resources.render_pipes.iter().for_each(|p| { + resources.render_pipelines.iter().for_each(|(_id, p)| { p.layout.bind_group_layouts.iter().for_each(|b| { if b.as_info().id() == *bind_group_layout_id { num_ref_in_nonreferenced_resources += 1; } }); }); - resources.pipeline_layouts.iter().for_each(|p| { + resources.pipeline_layouts.iter().for_each(|(_id, p)| { p.bind_group_layouts.iter().for_each(|b| { if b.as_info().id() == *bind_group_layout_id { num_ref_in_nonreferenced_resources += 1; @@ -991,7 +904,7 @@ impl LifetimeTracker { self.free_resources .bind_group_layouts - .push(bind_group_layout.clone()); + .insert(*bind_group_layout_id, bind_group_layout.clone()); return false; } else { @@ -1036,7 +949,7 @@ impl LifetimeTracker { .find(|a| a.index == submit_index) .map_or(&mut self.free_resources, |a| &mut a.last_resources) .query_sets - .push(query_set.clone()); + .insert(query_set_id, query_set.clone()); } !is_removed }); @@ -1217,7 +1130,9 @@ impl LifetimeTracker { if is_removed { *buffer.map_state.lock() = resource::BufferMapState::Idle; log::info!("Buffer {:?} is not tracked anymore", buffer_id); - self.free_resources.buffers.push(buffer.clone()); + self.free_resources + .buffers + .insert(buffer_id, buffer.clone()); } else { let mapping = match std::mem::replace( &mut *buffer.map_state.lock(), diff --git a/wgpu-core/src/device/queue.rs b/wgpu-core/src/device/queue.rs index e450cbe090..8f103a9ae6 100644 --- a/wgpu-core/src/device/queue.rs +++ b/wgpu-core/src/device/queue.rs @@ -6,13 +6,13 @@ use crate::{ ClearError, CommandBuffer, CopySide, ImageCopyTexture, TransferError, }, conv, - device::{DeviceError, WaitIdleError}, + device::{life::ResourceMaps, DeviceError, WaitIdleError}, get_lowest_common_denom, global::Global, hal_api::HalApi, id::{self, QueueId}, identity::{GlobalIdentityHandlerFactory, Input}, - init_tracker::{has_copy_partial_init_tracker_coverage, BufferInitTracker, TextureInitRange}, + init_tracker::{has_copy_partial_init_tracker_coverage, TextureInitRange}, resource::{ Buffer, BufferAccessError, BufferMapState, Resource, ResourceInfo, StagingBuffer, Texture, TextureInner, TextureView, @@ -21,7 +21,7 @@ use crate::{ }; use hal::{CommandEncoder as _, Device as _, Queue as _}; -use parking_lot::{Mutex, RwLock}; +use parking_lot::Mutex; use smallvec::SmallVec; use std::{ iter, mem, ptr, @@ -160,6 +160,7 @@ pub struct WrappedSubmissionIndex { #[derive(Debug)] pub enum TempResource { Buffer(Arc>), + StagingBuffer(Arc>), Texture(Arc>, SmallVec<[Arc>; 1]>), } @@ -235,23 +236,9 @@ impl PendingWrites { self.temp_resources.push(resource); } - fn consume(&mut self, device: &Arc>, buffer: Arc>) { + fn consume(&mut self, buffer: Arc>) { self.temp_resources - .push(TempResource::Buffer(Arc::new(Buffer:: { - raw: buffer.raw.lock().take(), - device: device.clone(), - usage: wgt::BufferUsages::empty(), - size: buffer.size, - initialization_status: RwLock::new(BufferInitTracker::new(buffer.size)), - sync_mapped_writes: Mutex::new(None), - map_state: Mutex::new(crate::resource::BufferMapState::Idle), - info: ResourceInfo::new( - #[cfg(debug_assertions)] - &buffer.info.label, - #[cfg(not(debug_assertions))] - "", - ), - }))); + .push(TempResource::StagingBuffer(buffer)); } #[must_use] @@ -431,12 +418,15 @@ impl Global { let mut pending_writes = device.pending_writes.lock(); let pending_writes = pending_writes.as_mut().unwrap(); + let stage_fid = hub.staging_buffers.request::(); + let staging_buffer = stage_fid.init(staging_buffer); + if let Err(flush_error) = unsafe { profiling::scope!("copy"); ptr::copy_nonoverlapping(data.as_ptr(), staging_buffer_ptr, data.len()); staging_buffer.flush(device.raw()) } { - pending_writes.consume(device, Arc::new(staging_buffer)); + pending_writes.consume(staging_buffer); return Err(flush_error.into()); } @@ -448,7 +438,7 @@ impl Global { buffer_offset, ); - pending_writes.consume(device, Arc::new(staging_buffer)); + pending_writes.consume(staging_buffer); result } @@ -510,7 +500,7 @@ impl Global { // be freed, even if an error occurs. All paths from here must call // `device.pending_writes.consume`. if let Err(flush_error) = unsafe { staging_buffer.flush(device.raw()) } { - pending_writes.consume(device, staging_buffer); + pending_writes.consume(staging_buffer); return Err(flush_error.into()); } @@ -522,7 +512,7 @@ impl Global { buffer_offset, ); - pending_writes.consume(device, staging_buffer); + pending_writes.consume(staging_buffer); result } @@ -820,6 +810,9 @@ impl Global { // `device.pending_writes.consume`. let (staging_buffer, staging_buffer_ptr) = prepare_staging_buffer(device, stage_size)?; + let stage_fid = hub.staging_buffers.request::(); + let staging_buffer = stage_fid.init(staging_buffer); + if stage_bytes_per_row == bytes_per_row { profiling::scope!("copy aligned"); // Fast path if the data is already being aligned optimally. @@ -854,7 +847,7 @@ impl Global { } if let Err(e) = unsafe { staging_buffer.flush(device.raw()) } { - pending_writes.consume(device, Arc::new(staging_buffer)); + pending_writes.consume(staging_buffer); return Err(e.into()); } @@ -893,7 +886,7 @@ impl Global { } } - pending_writes.consume(device, Arc::new(staging_buffer)); + pending_writes.consume(staging_buffer); pending_writes .dst_textures .insert(destination.texture, dst.clone()); @@ -1119,8 +1112,6 @@ impl Global { let mut fence = device.fence.write(); let fence = fence.as_mut().unwrap(); - device.temp_suspected.lock().clear(); - let submit_index = device .active_submission_index .fetch_add(1, Ordering::Relaxed) @@ -1137,6 +1128,12 @@ impl Global { //TODO: if multiple command buffers are submitted, we can re-use the last // native command buffer of the previous chain instead of always creating // a temporary one, since the chains are not finished. + let mut temp_suspected = device.temp_suspected.lock(); + { + let mut suspected = temp_suspected.take().unwrap(); + suspected.clear(); + temp_suspected.replace(ResourceMaps::new()); + } // finish all the command buffers first for &cmb_id in command_buffer_ids { @@ -1200,9 +1197,9 @@ impl Global { unsafe { device.raw().unmap_buffer(raw_buf) } .map_err(DeviceError::from)?; } - device - .temp_suspected - .lock() + temp_suspected + .as_mut() + .unwrap() .buffers .insert(id, buffer.clone()); } else { @@ -1226,9 +1223,9 @@ impl Global { }; texture.info.use_at(submit_index); if texture.is_unique() { - device - .temp_suspected - .lock() + temp_suspected + .as_mut() + .unwrap() .textures .insert(id, texture.clone()); } @@ -1243,9 +1240,9 @@ impl Global { for texture_view in cmd_buf_trackers.views.used_resources() { texture_view.info.use_at(submit_index); if texture_view.is_unique() { - device - .temp_suspected - .lock() + temp_suspected + .as_mut() + .unwrap() .texture_views .insert(texture_view.as_info().id(), texture_view.clone()); } @@ -1263,9 +1260,9 @@ impl Global { sampler.info.use_at(submit_index); } if bg.is_unique() { - device - .temp_suspected - .lock() + temp_suspected + .as_mut() + .unwrap() .bind_groups .insert(bg.as_info().id(), bg.clone()); } @@ -1277,7 +1274,7 @@ impl Global { { compute_pipeline.info.use_at(submit_index); if compute_pipeline.is_unique() { - device.temp_suspected.lock().compute_pipelines.insert( + temp_suspected.as_mut().unwrap().compute_pipelines.insert( compute_pipeline.as_info().id(), compute_pipeline.clone(), ); @@ -1288,7 +1285,7 @@ impl Global { { render_pipeline.info.use_at(submit_index); if render_pipeline.is_unique() { - device.temp_suspected.lock().render_pipelines.insert( + temp_suspected.as_mut().unwrap().render_pipelines.insert( render_pipeline.as_info().id(), render_pipeline.clone(), ); @@ -1297,9 +1294,9 @@ impl Global { for query_set in cmd_buf_trackers.query_sets.used_resources() { query_set.info.use_at(submit_index); if query_set.is_unique() { - device - .temp_suspected - .lock() + temp_suspected + .as_mut() + .unwrap() .query_sets .insert(query_set.as_info().id(), query_set.clone()); } @@ -1317,9 +1314,9 @@ impl Global { query_set.info.use_at(submit_index); } if bundle.is_unique() { - device - .temp_suspected - .lock() + temp_suspected + .as_mut() + .unwrap() .render_bundles .insert(bundle.as_info().id(), bundle.clone()); } diff --git a/wgpu-core/src/device/resource.rs b/wgpu-core/src/device/resource.rs index c95c7a8274..105d8c7f62 100644 --- a/wgpu-core/src/device/resource.rs +++ b/wgpu-core/src/device/resource.rs @@ -51,7 +51,7 @@ use std::{ }; use super::{ - life::{self, SuspectedResources}, + life::{self, ResourceMaps}, queue::{self}, DeviceDescriptor, DeviceError, ImplicitPipelineContext, UserClosures, EP_FAILURE, IMPLICIT_FAILURE, ZERO_BUFFER_SIZE, @@ -104,7 +104,7 @@ pub struct Device { life_tracker: Mutex>, /// Temporary storage for resource management functions. Cleared at the end /// of every call (unless an error occurs). - pub(crate) temp_suspected: Mutex>, + pub(crate) temp_suspected: Mutex>>, pub(crate) alignments: hal::Alignments, pub(crate) limits: wgt::Limits, pub(crate) features: wgt::Features, @@ -240,7 +240,7 @@ impl Device { fence: RwLock::new(Some(fence)), trackers: Mutex::new(Tracker::new()), life_tracker: Mutex::new(life::LifetimeTracker::new()), - temp_suspected: Mutex::new(life::SuspectedResources::new()), + temp_suspected: Mutex::new(Some(life::ResourceMaps::new())), #[cfg(feature = "trace")] trace: Mutex::new(trace_path.and_then(|path| match trace::Trace::new(path) { Ok(mut trace) => { @@ -299,10 +299,11 @@ impl Device { // call, and cleared by the end. But `Global::queue_submit` is // fallible; if it exits early, it may leave some resources in // `temp_suspected`. - life_tracker - .suspected_resources - .extend(&self.temp_suspected.lock()); - self.temp_suspected.lock().clear(); + { + let temp_suspected = self.temp_suspected.lock().take().unwrap(); + life_tracker.suspected_resources.extend(temp_suspected); + } + self.temp_suspected.lock().replace(ResourceMaps::new()); life_tracker.triage_suspected( hub, @@ -356,7 +357,7 @@ impl Device { } pub(crate) fn untrack(&self, trackers: &Tracker) { - let mut temp_suspected = self.temp_suspected.lock(); + let mut temp_suspected = self.temp_suspected.lock().take().unwrap(); temp_suspected.clear(); // As the tracker is cleared/dropped, we need to consider all the resources // that it references for destruction in the next GC pass. @@ -418,10 +419,8 @@ impl Device { } } } - - self.lock_life().suspected_resources.extend(&temp_suspected); - - temp_suspected.clear(); + self.lock_life().suspected_resources.extend(temp_suspected); + self.temp_suspected.lock().replace(ResourceMaps::new()); } pub(crate) fn create_buffer( diff --git a/wgpu-core/src/identity.rs b/wgpu-core/src/identity.rs index e5a8b79665..fa7d057dd5 100644 --- a/wgpu-core/src/identity.rs +++ b/wgpu-core/src/identity.rs @@ -1,8 +1,11 @@ use parking_lot::Mutex; use wgt::Backend; -use crate::{id, Epoch, Index}; -use std::{fmt::Debug, marker::PhantomData, sync::Arc}; +use crate::{ + id::{self}, + Epoch, Index, +}; +use std::{collections::HashMap, fmt::Debug, marker::PhantomData, sync::Arc}; /// A simple structure to allocate [`Id`] identifiers. /// @@ -33,19 +36,9 @@ use std::{fmt::Debug, marker::PhantomData, sync::Arc}; /// [`free`]: IdentityManager::free #[derive(Debug, Default)] pub(super) struct IdentityValues { - /// Available index values. If empty, then `epochs.len()` is the next index - /// to allocate. - free: Vec, - - /// The next or currently-live epoch value associated with each `Id` index. - /// - /// If there is a live id with index `i`, then `epochs[i]` is its epoch; any - /// id with the same index but an older epoch is dead. - /// - /// If index `i` is currently unused, `epochs[i]` is the epoch to use in its - /// next `Id`. - epochs: Vec, - + free: Vec<(Index, Epoch)>, + //sorted by Index + used: HashMap>, count: usize, } @@ -57,28 +50,34 @@ impl IdentityValues { pub fn alloc(&mut self, backend: Backend) -> I { self.count += 1; match self.free.pop() { - Some(index) => I::zip(index, self.epochs[index as usize], backend), + Some((index, epoch)) => I::zip(index, epoch + 1, backend), None => { let epoch = 1; - let id = I::zip(self.epochs.len() as Index, epoch, backend); - self.epochs.push(epoch); - id + let used = self.used.entry(epoch).or_default(); + let index = if let Some(i) = used.iter().max_by_key(|v| *v) { + i + 1 + } else { + 0 + }; + used.push(index); + I::zip(index, epoch, backend) } } } + pub fn mark_as_used(&mut self, id: I) -> I { + self.count += 1; + let (index, epoch, _backend) = id.unzip(); + let used = self.used.entry(epoch).or_default(); + used.push(index); + id + } + /// Free `id`. It will never be returned from `alloc` again. pub fn release(&mut self, id: I) { let (index, epoch, _backend) = id.unzip(); - let pe = &mut self.epochs[index as usize]; - assert_eq!(*pe, epoch); - // If the epoch reaches EOL, the index doesn't go - // into the free list, will never be reused again. - if epoch < id::EPOCH_MASK { - *pe = epoch + 1; - self.free.push(index); - self.count -= 1; - } + self.free.push((index, epoch)); + self.count -= 1; } pub fn count(&self) -> usize { @@ -96,6 +95,9 @@ impl IdentityManager { pub fn process(&self, backend: Backend) -> I { self.values.lock().alloc(backend) } + pub fn mark_as_used(&self, id: I) -> I { + self.values.lock().mark_as_used(id) + } pub fn free(&self, id: I) { self.values.lock().release(id) } @@ -121,8 +123,10 @@ pub trait IdentityHandlerFactory { /// and are not generated by wgpu /// /// [`IdentityManager`]: IdentityManager - fn spawn(&self) -> Option>>; - + fn spawn(&self) -> Arc> { + Arc::new(IdentityManager::new()) + } + fn autogenerate_ids() -> bool; fn input_to_id(id_in: Self::Input) -> I; } @@ -136,9 +140,8 @@ pub struct IdentityManagerFactory; impl IdentityHandlerFactory for IdentityManagerFactory { type Input = (); - - fn spawn(&self) -> Option>> { - Some(Arc::new(IdentityManager::new())) + fn autogenerate_ids() -> bool { + true } fn input_to_id(_id_in: Self::Input) -> I { @@ -177,12 +180,13 @@ pub type Input = >::Input; fn test_epoch_end_of_life() { use id::TypedId as _; let man = IdentityManager::::new(); - man.values.lock().epochs.push(id::EPOCH_MASK); - man.values.lock().free.push(0); - let id1 = man.values.lock().alloc::(Backend::Empty); - assert_eq!(id1.unzip().0, 0); - man.values.lock().release(id1); - let id2 = man.values.lock().alloc::(Backend::Empty); - // confirm that the index 0 is no longer re-used + let forced_id = man.mark_as_used(id::BufferId::zip(0, 1, Backend::Empty)); + assert_eq!(forced_id.unzip().0, 0); + let id1 = man.process(Backend::Empty); + assert_eq!(id1.unzip().0, 1); + man.free(id1); + let id2 = man.process(Backend::Empty); + // confirm that the epoch 1 is no longer re-used assert_eq!(id2.unzip().0, 1); + assert_eq!(id2.unzip().1, 2); } diff --git a/wgpu-core/src/registry.rs b/wgpu-core/src/registry.rs index 770738977c..310c6814a0 100644 --- a/wgpu-core/src/registry.rs +++ b/wgpu-core/src/registry.rs @@ -39,7 +39,7 @@ impl RegistryReport { /// #[derive(Debug)] pub struct Registry> { - identity: Option>>, + identity: Arc>, storage: RwLock>, backend: Backend, } @@ -61,7 +61,7 @@ impl> Registry { #[must_use] pub(crate) struct FutureId<'a, I: id::TypedId, T: Resource> { id: I, - identity: Option>>, + identity: Arc>, data: &'a RwLock>, } @@ -75,9 +75,13 @@ impl> FutureId<'_, I, T> { self.id } - pub fn assign(self, mut value: T) -> (I, Arc) { + pub fn init(&self, mut value: T) -> Arc { value.as_info_mut().set_id(self.id, &self.identity); - self.data.write().insert(self.id, Arc::new(value)); + Arc::new(value) + } + + pub fn assign(self, value: T) -> (I, Arc) { + self.data.write().insert(self.id, self.init(value)); (self.id, self.data.read().get(self.id).unwrap().clone()) } @@ -100,14 +104,25 @@ impl> Registry { F: IdentityHandlerFactory, { FutureId { - id: match self.identity.as_ref() { - Some(identity) => identity.process(self.backend), - _ => F::input_to_id(id_in), + id: if F::autogenerate_ids() { + self.identity.process(self.backend) + } else { + self.identity.mark_as_used(F::input_to_id(id_in)) }, identity: self.identity.clone(), data: &self.storage, } } + pub(crate) fn request(&self) -> FutureId + where + F: IdentityHandlerFactory, + { + FutureId { + id: self.identity.process(self.backend), + identity: self.identity.clone(), + data: &self.storage, + } + } pub(crate) fn contains(&self, id: I) -> bool { self.read().contains(id) } @@ -131,6 +146,11 @@ impl> Registry { value.as_info_mut().set_id(id, &self.identity); storage.force_replace(id, value) } + pub fn force_replace_with_error(&self, id: I, label: &str) { + let mut storage = self.storage.write(); + storage.remove(id); + storage.insert_error(id, label); + } pub(crate) fn unregister(&self, id: I) -> Option> { let value = self.storage.write().remove(id); //Returning None is legal if it's an error ID @@ -164,9 +184,7 @@ impl> Registry { element_size: std::mem::size_of::(), ..Default::default() }; - if let Some(identity) = self.identity.as_ref() { - report.num_allocated = identity.values.lock().count(); - } + report.num_allocated = self.identity.values.lock().count(); for element in storage.map.iter() { match *element { Element::Occupied(..) => report.num_kept_from_user += 1, diff --git a/wgpu-core/src/resource.rs b/wgpu-core/src/resource.rs index 82dd9600b5..bb28ce805e 100644 --- a/wgpu-core/src/resource.rs +++ b/wgpu-core/src/resource.rs @@ -115,9 +115,9 @@ impl ResourceInfo { self.id.unwrap() } - pub(crate) fn set_id(&mut self, id: Id, identity: &Option>>) { + pub(crate) fn set_id(&mut self, id: Id, identity: &Arc>) { self.id = Some(id); - self.identity = identity.clone(); + self.identity = Some(identity.clone()); } /// Record that this resource will be used by the queue submission with the From 88dc35c624e3b66df1258c91772021830f9f07e9 Mon Sep 17 00:00:00 2001 From: gents83 Date: Mon, 18 Sep 2023 19:43:21 +0200 Subject: [PATCH 103/132] Fixing surface present issue --- deno_webgpu/buffer.rs | 2 +- player/tests/test.rs | 4 +- tests/tests/mem_leaks.rs | 1 - wgpu-core/src/command/clear.rs | 6 +- wgpu-core/src/command/mod.rs | 6 +- wgpu-core/src/command/transfer.rs | 25 +- wgpu-core/src/device/global.rs | 15 +- wgpu-core/src/device/life.rs | 659 +++++++++++------------------- wgpu-core/src/device/mod.rs | 6 +- wgpu-core/src/device/queue.rs | 25 +- wgpu-core/src/device/resource.rs | 8 +- wgpu-core/src/present.rs | 92 ++--- wgpu-core/src/resource.rs | 71 ++-- wgpu-core/src/track/buffer.rs | 1 + wgpu-core/src/track/mod.rs | 11 +- wgpu-core/src/track/stateless.rs | 1 + wgpu-core/src/track/texture.rs | 33 +- wgpu/src/backend/direct.rs | 10 +- 18 files changed, 410 insertions(+), 566 deletions(-) diff --git a/deno_webgpu/buffer.rs b/deno_webgpu/buffer.rs index 7c5f9d58c2..92a88716ae 100644 --- a/deno_webgpu/buffer.rs +++ b/deno_webgpu/buffer.rs @@ -106,7 +106,7 @@ pub async fn op_webgpu_buffer_get_map_async( 2 => wgpu_core::device::HostMap::Write, _ => unreachable!(), }, - callback: wgpu_core::resource::BufferMapCallback::from_rust(callback), + callback: Some(wgpu_core::resource::BufferMapCallback::from_rust(callback)), } )) .err(); diff --git a/player/tests/test.rs b/player/tests/test.rs index e3a2a6a796..a9b93618ac 100644 --- a/player/tests/test.rs +++ b/player/tests/test.rs @@ -113,9 +113,9 @@ impl Test<'_> { expect.offset .. expect.offset+expect.data.len() as wgt::BufferAddress, wgc::resource::BufferMapOperation { host: wgc::device::HostMap::Read, - callback: wgc::resource::BufferMapCallback::from_rust( + callback: Some(wgc::resource::BufferMapCallback::from_rust( Box::new(map_callback) - ), + )), } )) .unwrap(); diff --git a/tests/tests/mem_leaks.rs b/tests/tests/mem_leaks.rs index dd1b93e7af..d55e51fbe4 100644 --- a/tests/tests/mem_leaks.rs +++ b/tests/tests/mem_leaks.rs @@ -251,7 +251,6 @@ fn draw_test_with_reports( assert_eq!(report.bind_groups.num_allocated, 0); assert_eq!(report.bind_group_layouts.num_allocated, 0); assert_eq!(report.pipeline_layouts.num_allocated, 0); - assert_eq!(report.buffers.num_allocated, 0); //surface is still there assert_eq!(report.texture_views.num_allocated, 1); assert_eq!(report.textures.num_allocated, 1); diff --git a/wgpu-core/src/command/clear.rs b/wgpu-core/src/command/clear.rs index 26221d19f3..a2e1bba11d 100644 --- a/wgpu-core/src/command/clear.rs +++ b/wgpu-core/src/command/clear.rs @@ -246,8 +246,8 @@ pub(crate) fn clear_texture( alignments: &hal::Alignments, zero_buffer: &A::Buffer, ) -> Result<(), ClearError> { - let dst_raw = dst_texture - .inner + let dst_inner = dst_texture.inner(); + let dst_raw = dst_inner .as_ref() .unwrap() .as_raw() @@ -290,7 +290,7 @@ pub(crate) fn clear_texture( let dst_barrier = texture_tracker .set_single(dst_texture, selector, clear_usage) .unwrap() - .map(|pending| pending.into_hal(dst_texture)); + .map(|pending| pending.into_hal(dst_inner.as_ref().unwrap())); unsafe { encoder.transition_textures(dst_barrier.into_iter()); } diff --git a/wgpu-core/src/command/mod.rs b/wgpu-core/src/command/mod.rs index 9dc8122a03..672e81bb90 100644 --- a/wgpu-core/src/command/mod.rs +++ b/wgpu-core/src/command/mod.rs @@ -217,7 +217,11 @@ impl CommandBuffer { profiling::scope!("drain_barriers"); let buffer_barriers = base.buffers.drain_transitions(); - let texture_barriers = base.textures.drain_transitions(); + let (transitions, textures) = base.textures.drain_transitions(); + let texture_barriers = transitions + .into_iter() + .enumerate() + .map(|(i, p)| p.into_hal(textures[i].as_ref().unwrap())); unsafe { raw.transition_buffers(buffer_barriers); diff --git a/wgpu-core/src/command/transfer.rs b/wgpu-core/src/command/transfer.rs index 4984e3bebf..98b96e2dda 100644 --- a/wgpu-core/src/command/transfer.rs +++ b/wgpu-core/src/command/transfer.rs @@ -801,8 +801,8 @@ impl Global { .textures .set_single(dst_texture, dst_range, hal::TextureUses::COPY_DST) .ok_or(TransferError::InvalidTexture(destination.texture))?; - let dst_raw = dst_texture - .inner + let dst_inner = dst_texture.inner(); + let dst_raw = dst_inner .as_ref() .unwrap() .as_raw() @@ -812,7 +812,7 @@ impl Global { TransferError::MissingCopyDstUsageFlag(None, Some(destination.texture)).into(), ); } - let dst_barrier = dst_pending.map(|pending| pending.into_hal(dst_texture)); + let dst_barrier = dst_pending.map(|pending| pending.into_hal(dst_inner.as_ref().unwrap())); if !dst_base.aspect.is_one() { return Err(TransferError::CopyAspectNotOne.into()); @@ -932,8 +932,8 @@ impl Global { .textures .set_single(src_texture, src_range, hal::TextureUses::COPY_SRC) .ok_or(TransferError::InvalidTexture(source.texture))?; - let src_raw = src_texture - .inner + let src_inner = src_texture.inner(); + let src_raw = src_inner .as_ref() .unwrap() .as_raw() @@ -954,7 +954,7 @@ impl Global { } .into()); } - let src_barrier = src_pending.map(|pending| pending.into_hal(src_texture)); + let src_barrier = src_pending.map(|pending| pending.into_hal(src_inner.as_ref().unwrap())); let (dst_buffer, dst_pending) = { let buffer_guard = hub.buffers.read(); @@ -1075,9 +1075,11 @@ impl Global { let src_texture = texture_guard .get(source.texture) .map_err(|_| TransferError::InvalidTexture(source.texture))?; + let src_inner = src_texture.inner(); let dst_texture = texture_guard .get(destination.texture) .map_err(|_| TransferError::InvalidTexture(source.texture))?; + let dst_inner = dst_texture.inner(); // src and dst texture format must be copy-compatible // https://gpuweb.github.io/gpuweb/#copy-compatible @@ -1139,8 +1141,7 @@ impl Global { .textures .set_single(src_texture, src_range, hal::TextureUses::COPY_SRC) .ok_or(TransferError::InvalidTexture(source.texture))?; - let src_raw = src_texture - .inner + let src_raw = src_inner .as_ref() .unwrap() .as_raw() @@ -1152,7 +1153,7 @@ impl Global { //TODO: try to avoid this the collection. It's needed because both // `src_pending` and `dst_pending` try to hold `trackers.textures` mutably. let mut barriers: ArrayVec<_, 2> = src_pending - .map(|pending| pending.into_hal(src_texture)) + .map(|pending| pending.into_hal(src_inner.as_ref().unwrap())) .collect(); let dst_pending = cmd_buf_data @@ -1160,8 +1161,7 @@ impl Global { .textures .set_single(dst_texture, dst_range, hal::TextureUses::COPY_DST) .ok_or(TransferError::InvalidTexture(destination.texture))?; - let dst_raw = dst_texture - .inner + let dst_raw = dst_inner .as_ref() .unwrap() .as_raw() @@ -1172,7 +1172,7 @@ impl Global { ); } - barriers.extend(dst_pending.map(|pending| pending.into_hal(dst_texture))); + barriers.extend(dst_pending.map(|pending| pending.into_hal(dst_inner.as_ref().unwrap()))); let hal_copy_size = hal::CopyExtent { width: src_copy_size.width.min(dst_copy_size.width), @@ -1200,6 +1200,7 @@ impl Global { regions, ); } + Ok(()) } } diff --git a/wgpu-core/src/device/global.rs b/wgpu-core/src/device/global.rs index 39b351ae9b..08a31ea685 100644 --- a/wgpu-core/src/device/global.rs +++ b/wgpu-core/src/device/global.rs @@ -748,7 +748,7 @@ impl Global { resource::TextureClearMode::None => SmallVec::new(), }; - match *texture.inner.as_ref().unwrap() { + match *texture.inner().as_ref().unwrap() { resource::TextureInner::Native { ref raw } => { if !raw.is_none() { let temp = queue::TempResource::Texture(texture.clone(), clear_views); @@ -2186,9 +2186,10 @@ impl Global { // User callbacks must not be called while holding buffer_map_async_inner's locks, so we // defer the error callback if it needs to be called immediately (typically when running // into errors). - if let Err((op, err)) = self.buffer_map_async_inner::(buffer_id, range, op) { - op.callback.call(Err(err.clone())); - + if let Err((mut operation, err)) = self.buffer_map_async_inner::(buffer_id, range, op) { + if let Some(callback) = operation.callback.take() { + callback.call(Err(err.clone())); + } return Err(err); } @@ -2370,8 +2371,10 @@ impl Global { } // Note: outside the scope where locks are held when calling the callback - if let Some((operation, status)) = closure? { - operation.callback.call(status); + if let Some((mut operation, status)) = closure? { + if let Some(callback) = operation.callback.take() { + callback.call(status); + } } Ok(()) } diff --git a/wgpu-core/src/device/life.rs b/wgpu-core/src/device/life.rs index a2a2c68a56..22f0b2a5da 100644 --- a/wgpu-core/src/device/life.rs +++ b/wgpu-core/src/device/life.rs @@ -384,41 +384,37 @@ impl LifetimeTracker { self.suspected_resources .render_bundles .retain(|&bundle_id, bundle| { - let is_removed = { - let mut trackers = trackers.lock(); - trackers - .bundles - .remove_abandoned(bundle_id, hub.render_bundles.contains(bundle_id)) - }; - if is_removed { - log::info!("Bundle {:?} is not tracked anymore", bundle_id); - f(&bundle_id); + let mut trackers = trackers.lock(); + let is_removed = trackers + .bundles + .remove_abandoned(bundle_id, hub.render_bundles.contains(bundle_id)); - for v in bundle.used.buffers.used_resources() { - self.suspected_resources - .buffers - .insert(v.as_info().id(), v.clone()); - } - for v in bundle.used.textures.used_resources() { - self.suspected_resources - .textures - .insert(v.as_info().id(), v.clone()); - } - for v in bundle.used.bind_groups.used_resources() { - self.suspected_resources - .bind_groups - .insert(v.as_info().id(), v.clone()); - } - for v in bundle.used.render_pipelines.used_resources() { - self.suspected_resources - .render_pipelines - .insert(v.as_info().id(), v.clone()); - } - for v in bundle.used.query_sets.used_resources() { - self.suspected_resources - .query_sets - .insert(v.as_info().id(), v.clone()); - } + f(&bundle_id); + + for v in bundle.used.buffers.used_resources() { + self.suspected_resources + .buffers + .insert(v.as_info().id(), v.clone()); + } + for v in bundle.used.textures.used_resources() { + self.suspected_resources + .textures + .insert(v.as_info().id(), v.clone()); + } + for v in bundle.used.bind_groups.used_resources() { + self.suspected_resources + .bind_groups + .insert(v.as_info().id(), v.clone()); + } + for v in bundle.used.render_pipelines.used_resources() { + self.suspected_resources + .render_pipelines + .insert(v.as_info().id(), v.clone()); + } + for v in bundle.used.query_sets.used_resources() { + self.suspected_resources + .query_sets + .insert(v.as_info().id(), v.clone()); } !is_removed }); @@ -438,52 +434,48 @@ impl LifetimeTracker { self.suspected_resources .bind_groups .retain(|&bind_group_id, bind_group| { - let is_removed = { - let mut trackers = trackers.lock(); - trackers - .bind_groups - .remove_abandoned(bind_group_id, hub.bind_groups.contains(bind_group_id)) - }; - if is_removed { - log::info!("BindGroup {:?} is not tracked anymore", bind_group_id); - f(&bind_group_id); + let mut trackers = trackers.lock(); + let is_removed = trackers + .bind_groups + .remove_abandoned(bind_group_id, hub.bind_groups.contains(bind_group_id)); - for v in bind_group.used.buffers.used_resources() { - self.suspected_resources - .buffers - .insert(v.as_info().id(), v.clone()); - } - for v in bind_group.used.textures.used_resources() { - self.suspected_resources - .textures - .insert(v.as_info().id(), v.clone()); - } - for v in bind_group.used.views.used_resources() { - self.suspected_resources - .texture_views - .insert(v.as_info().id(), v.clone()); - } - for v in bind_group.used.samplers.used_resources() { - self.suspected_resources - .samplers - .insert(v.as_info().id(), v.clone()); - } + f(&bind_group_id); + for v in bind_group.used.buffers.used_resources() { self.suspected_resources - .bind_group_layouts - .insert(bind_group.layout.as_info().id(), bind_group.layout.clone()); + .buffers + .insert(v.as_info().id(), v.clone()); + } + for v in bind_group.used.textures.used_resources() { + self.suspected_resources + .textures + .insert(v.as_info().id(), v.clone()); + } + for v in bind_group.used.views.used_resources() { + self.suspected_resources + .texture_views + .insert(v.as_info().id(), v.clone()); + } + for v in bind_group.used.samplers.used_resources() { + self.suspected_resources + .samplers + .insert(v.as_info().id(), v.clone()); + } - let submit_index = bind_group.info.submission_index(); - if !submit_indices.contains(&submit_index) { - submit_indices.push(submit_index); - } - self.active - .iter_mut() - .find(|a| a.index == submit_index) - .map_or(&mut self.free_resources, |a| &mut a.last_resources) - .bind_groups - .insert(bind_group_id, bind_group.clone()); + self.suspected_resources + .bind_group_layouts + .insert(bind_group.layout.as_info().id(), bind_group.layout.clone()); + + let submit_index = bind_group.info.submission_index(); + if !submit_indices.contains(&submit_index) { + submit_indices.push(submit_index); } + self.active + .iter_mut() + .find(|a| a.index == submit_index) + .map_or(&mut self.free_resources, |a| &mut a.last_resources) + .bind_groups + .insert(bind_group_id, bind_group.clone()); !is_removed }); submit_indices @@ -502,32 +494,28 @@ impl LifetimeTracker { self.suspected_resources .texture_views .retain(|&view_id, view| { - let is_removed = { - let mut trackers = trackers.lock(); - trackers - .views - .remove_abandoned(view_id, hub.texture_views.contains(view_id)) - }; - if is_removed { - log::info!("TextureView {:?} is not tracked anymore", view_id); - f(&view_id); - - if let Some(parent_texture) = view.parent.as_ref() { - self.suspected_resources - .textures - .insert(parent_texture.as_info().id(), parent_texture.clone()); - } - let submit_index = view.info.submission_index(); - if !submit_indices.contains(&submit_index) { - submit_indices.push(submit_index); - } - self.active - .iter_mut() - .find(|a| a.index == submit_index) - .map_or(&mut self.free_resources, |a| &mut a.last_resources) - .texture_views - .insert(view_id, view.clone()); + let mut trackers = trackers.lock(); + let is_removed = trackers + .views + .remove_abandoned(view_id, hub.texture_views.contains(view_id)); + + f(&view_id); + + if let Some(parent_texture) = view.parent.as_ref() { + self.suspected_resources + .textures + .insert(parent_texture.as_info().id(), parent_texture.clone()); } + let submit_index = view.info.submission_index(); + if !submit_indices.contains(&submit_index) { + submit_indices.push(submit_index); + } + self.active + .iter_mut() + .find(|a| a.index == submit_index) + .map_or(&mut self.free_resources, |a| &mut a.last_resources) + .texture_views + .insert(view_id, view.clone()); !is_removed }); submit_indices @@ -545,37 +533,33 @@ impl LifetimeTracker { self.suspected_resources .textures .retain(|&texture_id, texture| { - let is_removed = { - let mut trackers = trackers.lock(); - trackers - .textures - .remove_abandoned(texture_id, hub.textures.contains(texture_id)) - }; - if is_removed { - log::info!("Texture {:?} is not tracked anymore", texture_id); - f(&texture_id); - - let submit_index = texture.info.submission_index(); - let non_referenced_resources = self - .active - .iter_mut() - .find(|a| a.index == submit_index) - .map_or(&mut self.free_resources, |a| &mut a.last_resources); - - if let &resource::TextureClearMode::RenderPass { - ref clear_views, .. - } = &*texture.clear_mode.read() - { - clear_views.into_iter().for_each(|v| { - non_referenced_resources - .texture_views - .insert(v.as_info().id(), v.clone()); - }); - } - non_referenced_resources - .textures - .insert(texture_id, texture.clone()); + let mut trackers = trackers.lock(); + let is_removed = trackers + .textures + .remove_abandoned(texture_id, hub.textures.contains(texture_id)); + + f(&texture_id); + + let submit_index = texture.info.submission_index(); + let non_referenced_resources = self + .active + .iter_mut() + .find(|a| a.index == submit_index) + .map_or(&mut self.free_resources, |a| &mut a.last_resources); + + if let &resource::TextureClearMode::RenderPass { + ref clear_views, .. + } = &*texture.clear_mode.read() + { + clear_views.into_iter().for_each(|v| { + non_referenced_resources + .texture_views + .insert(v.as_info().id(), v.clone()); + }); } + non_referenced_resources + .textures + .insert(texture_id, texture.clone()); !is_removed }); self @@ -594,27 +578,23 @@ impl LifetimeTracker { self.suspected_resources .samplers .retain(|&sampler_id, sampler| { - let is_removed = { - let mut trackers = trackers.lock(); - trackers - .samplers - .remove_abandoned(sampler_id, hub.samplers.contains(sampler_id)) - }; - if is_removed { - log::info!("Sampler {:?} is not tracked anymore", sampler_id); - f(&sampler_id); + let mut trackers = trackers.lock(); + let is_removed = trackers + .samplers + .remove_abandoned(sampler_id, hub.samplers.contains(sampler_id)); - let submit_index = sampler.info.submission_index(); - if !submit_indices.contains(&submit_index) { - submit_indices.push(submit_index); - } - self.active - .iter_mut() - .find(|a| a.index == submit_index) - .map_or(&mut self.free_resources, |a| &mut a.last_resources) - .samplers - .insert(sampler_id, sampler.clone()); + f(&sampler_id); + + let submit_index = sampler.info.submission_index(); + if !submit_indices.contains(&submit_index) { + submit_indices.push(submit_index); } + self.active + .iter_mut() + .find(|a| a.index == submit_index) + .map_or(&mut self.free_resources, |a| &mut a.last_resources) + .samplers + .insert(sampler_id, sampler.clone()); !is_removed }); submit_indices @@ -633,35 +613,31 @@ impl LifetimeTracker { self.suspected_resources .buffers .retain(|&buffer_id, buffer| { - let is_removed = { - let mut trackers = trackers.lock(); - trackers - .buffers - .remove_abandoned(buffer_id, hub.buffers.contains(buffer_id)) - }; - if is_removed { - log::info!("Buffer {:?} is not tracked anymore", buffer_id); - f(&buffer_id); + let mut trackers = trackers.lock(); + let is_removed = trackers + .buffers + .remove_abandoned(buffer_id, hub.buffers.contains(buffer_id)); - let submit_index = buffer.info.submission_index(); - if !submit_indices.contains(&submit_index) { - submit_indices.push(submit_index); - } - if let resource::BufferMapState::Init { - ref stage_buffer, .. - } = *buffer.map_state.lock() - { - self.free_resources - .buffers - .insert(stage_buffer.as_info().id(), stage_buffer.clone()); - } - self.active - .iter_mut() - .find(|a| a.index == submit_index) - .map_or(&mut self.free_resources, |a| &mut a.last_resources) + f(&buffer_id); + + let submit_index = buffer.info.submission_index(); + if !submit_indices.contains(&submit_index) { + submit_indices.push(submit_index); + } + if let resource::BufferMapState::Init { + ref stage_buffer, .. + } = *buffer.map_state.lock() + { + self.free_resources .buffers - .insert(buffer_id, buffer.clone()); + .insert(stage_buffer.as_info().id(), stage_buffer.clone()); } + self.active + .iter_mut() + .find(|a| a.index == submit_index) + .map_or(&mut self.free_resources, |a| &mut a.last_resources) + .buffers + .insert(buffer_id, buffer.clone()); !is_removed }); submit_indices @@ -679,36 +655,29 @@ impl LifetimeTracker { let mut submit_indices = Vec::new(); self.suspected_resources.compute_pipelines.retain( |&compute_pipeline_id, compute_pipeline| { - let is_removed = { - let mut trackers = trackers.lock(); - trackers.compute_pipelines.remove_abandoned( - compute_pipeline_id, - hub.compute_pipelines.contains(compute_pipeline_id), - ) - }; - if is_removed { - log::info!( - "ComputePipeline {:?} is not tracked anymore", - compute_pipeline_id - ); - f(&compute_pipeline_id); - - self.suspected_resources.pipeline_layouts.insert( - compute_pipeline.layout.as_info().id(), - compute_pipeline.layout.clone(), - ); - - let submit_index = compute_pipeline.info.submission_index(); - if !submit_indices.contains(&submit_index) { - submit_indices.push(submit_index); - } - self.active - .iter_mut() - .find(|a| a.index == submit_index) - .map_or(&mut self.free_resources, |a| &mut a.last_resources) - .compute_pipelines - .insert(compute_pipeline_id, compute_pipeline.clone()); + let mut trackers = trackers.lock(); + let is_removed = trackers.compute_pipelines.remove_abandoned( + compute_pipeline_id, + hub.compute_pipelines.contains(compute_pipeline_id), + ); + + f(&compute_pipeline_id); + + self.suspected_resources.pipeline_layouts.insert( + compute_pipeline.layout.as_info().id(), + compute_pipeline.layout.clone(), + ); + + let submit_index = compute_pipeline.info.submission_index(); + if !submit_indices.contains(&submit_index) { + submit_indices.push(submit_index); } + self.active + .iter_mut() + .find(|a| a.index == submit_index) + .map_or(&mut self.free_resources, |a| &mut a.last_resources) + .compute_pipelines + .insert(compute_pipeline_id, compute_pipeline.clone()); !is_removed }, ); @@ -728,46 +697,35 @@ impl LifetimeTracker { self.suspected_resources .render_pipelines .retain(|&render_pipeline_id, render_pipeline| { - let is_removed = { - let mut trackers = trackers.lock(); - trackers.render_pipelines.remove_abandoned( - render_pipeline_id, - hub.render_pipelines.contains(render_pipeline_id), - ) - }; - if is_removed { - log::info!( - "RenderPipeline {:?} is not tracked anymore", - render_pipeline_id - ); - f(&render_pipeline_id); - - self.suspected_resources.pipeline_layouts.insert( - render_pipeline.layout.as_info().id(), - render_pipeline.layout.clone(), - ); - - let submit_index = render_pipeline.info.submission_index(); - if !submit_indices.contains(&submit_index) { - submit_indices.push(submit_index); - } - self.active - .iter_mut() - .find(|a| a.index == submit_index) - .map_or(&mut self.free_resources, |a| &mut a.last_resources) - .render_pipelines - .insert(render_pipeline_id, render_pipeline.clone()); + let mut trackers = trackers.lock(); + let is_removed = trackers.render_pipelines.remove_abandoned( + render_pipeline_id, + hub.render_pipelines.contains(render_pipeline_id), + ); + + f(&render_pipeline_id); + + self.suspected_resources.pipeline_layouts.insert( + render_pipeline.layout.as_info().id(), + render_pipeline.layout.clone(), + ); + + let submit_index = render_pipeline.info.submission_index(); + if !submit_indices.contains(&submit_index) { + submit_indices.push(submit_index); } + self.active + .iter_mut() + .find(|a| a.index == submit_index) + .map_or(&mut self.free_resources, |a| &mut a.last_resources) + .render_pipelines + .insert(render_pipeline_id, render_pipeline.clone()); !is_removed }); submit_indices } - fn triage_suspected_pipeline_layouts( - &mut self, - pipeline_submit_indices: &[u64], - mut f: F, - ) -> &mut Self + fn triage_suspected_pipeline_layouts(&mut self, mut f: F) -> &mut Self where F: FnMut(&id::PipelineLayoutId), { @@ -775,63 +733,22 @@ impl LifetimeTracker { .pipeline_layouts .retain(|pipeline_layout_id, pipeline_layout| { //Note: this has to happen after all the suspected pipelines are destroyed + f(pipeline_layout_id); - let mut num_ref_in_nonreferenced_resources = 0; - pipeline_submit_indices.iter().for_each(|submit_index| { - let resources = self - .active - .iter() - .find(|a| a.index == *submit_index) - .map_or(&self.free_resources, |a| &a.last_resources); - - resources.compute_pipelines.iter().for_each(|(_id, p)| { - if p.layout.as_info().id() == *pipeline_layout_id { - num_ref_in_nonreferenced_resources += 1; - } - }); - resources.render_pipelines.iter().for_each(|(_id, p)| { - if p.layout.as_info().id() == *pipeline_layout_id { - num_ref_in_nonreferenced_resources += 1; - } - }); - }); - - if pipeline_layout.ref_count() == (1 + num_ref_in_nonreferenced_resources) { - log::debug!( - "PipelineLayout {:?} is not tracked anymore", - pipeline_layout_id - ); - - f(pipeline_layout_id); - - for bgl in &pipeline_layout.bind_group_layouts { - self.suspected_resources - .bind_group_layouts - .insert(bgl.as_info().id(), bgl.clone()); - } - self.free_resources - .pipeline_layouts - .insert(*pipeline_layout_id, pipeline_layout.clone()); - - return false; - } else { - log::info!( - "PipelineLayout {:?} is still referenced from {}", - pipeline_layout_id, - pipeline_layout.ref_count() - ); + for bgl in &pipeline_layout.bind_group_layouts { + self.suspected_resources + .bind_group_layouts + .insert(bgl.as_info().id(), bgl.clone()); } - true + self.free_resources + .pipeline_layouts + .insert(*pipeline_layout_id, pipeline_layout.clone()); + false }); self } - fn triage_suspected_bind_group_layouts( - &mut self, - bind_group_submit_indices: &[u64], - pipeline_submit_indices: &[u64], - mut f: F, - ) -> &mut Self + fn triage_suspected_bind_group_layouts(&mut self, mut f: F) -> &mut Self where F: FnMut(&id::BindGroupLayoutId), { @@ -841,80 +758,12 @@ impl LifetimeTracker { //Note: nothing else can bump the refcount since the guard is locked exclusively //Note: same BGL can appear multiple times in the list, but only the last // encounter could drop the refcount to 0. - let mut num_ref_in_nonreferenced_resources = 0; - bind_group_submit_indices.iter().for_each(|submit_index| { - let resources = self - .active - .iter() - .find(|a| a.index == *submit_index) - .map_or(&self.free_resources, |a| &a.last_resources); - - resources.bind_groups.iter().for_each(|(_id, b)| { - if b.layout.as_info().id() == *bind_group_layout_id { - num_ref_in_nonreferenced_resources += 1; - } - }); - resources.bind_group_layouts.iter().for_each(|(id, _b)| { - if id == bind_group_layout_id { - num_ref_in_nonreferenced_resources += 1; - } - }); - }); - pipeline_submit_indices.iter().for_each(|submit_index| { - let resources = self - .active - .iter() - .find(|a| a.index == *submit_index) - .map_or(&self.free_resources, |a| &a.last_resources); - - resources.compute_pipelines.iter().for_each(|(_id, p)| { - p.layout.bind_group_layouts.iter().for_each(|b| { - if b.as_info().id() == *bind_group_layout_id { - num_ref_in_nonreferenced_resources += 1; - } - }); - }); - resources.render_pipelines.iter().for_each(|(_id, p)| { - p.layout.bind_group_layouts.iter().for_each(|b| { - if b.as_info().id() == *bind_group_layout_id { - num_ref_in_nonreferenced_resources += 1; - } - }); - }); - resources.pipeline_layouts.iter().for_each(|(_id, p)| { - p.bind_group_layouts.iter().for_each(|b| { - if b.as_info().id() == *bind_group_layout_id { - num_ref_in_nonreferenced_resources += 1; - } - }); - }); - }); - - //Note: this has to happen after all the suspected pipelines are destroyed - if bind_group_layout.ref_count() == (1 + num_ref_in_nonreferenced_resources) { - // If This layout points to a compatible one, go over the latter - // to decrement the ref count and potentially destroy it. - //bgl_to_check = bind_group_layout.compatible_layout; - - log::debug!( - "BindGroupLayout {:?} is not tracked anymore", - bind_group_layout_id - ); - f(bind_group_layout_id); + f(bind_group_layout_id); - self.free_resources - .bind_group_layouts - .insert(*bind_group_layout_id, bind_group_layout.clone()); - - return false; - } else { - log::info!( - "BindGroupLayout {:?} is still referenced from {}", - bind_group_layout_id, - bind_group_layout.ref_count() - ); - } - true + self.free_resources + .bind_group_layouts + .insert(*bind_group_layout_id, bind_group_layout.clone()); + false }, ); self @@ -929,28 +778,23 @@ impl LifetimeTracker { self.suspected_resources .query_sets .retain(|&query_set_id, query_set| { - let is_removed = { - let mut trackers = trackers.lock(); - trackers - .query_sets - .remove_abandoned(query_set_id, hub.query_sets.contains(query_set_id)) - }; - if is_removed { - log::info!("QuerySet {:?} is not tracked anymore", query_set_id); - // #[cfg(feature = "trace")] - // trace.map(|t| t.add(trace::Action::DestroyComputePipeline(id))); - - let submit_index = query_set.info.submission_index(); - if !submit_indices.contains(&submit_index) { - submit_indices.push(submit_index); - } - self.active - .iter_mut() - .find(|a| a.index == submit_index) - .map_or(&mut self.free_resources, |a| &mut a.last_resources) - .query_sets - .insert(query_set_id, query_set.clone()); + let mut trackers = trackers.lock(); + let is_removed = trackers + .query_sets + .remove_abandoned(query_set_id, hub.query_sets.contains(query_set_id)); + // #[cfg(feature = "trace")] + // trace.map(|t| t.add(trace::Action::DestroyComputePipeline(id))); + + let submit_index = query_set.info.submission_index(); + if !submit_indices.contains(&submit_index) { + submit_indices.push(submit_index); } + self.active + .iter_mut() + .find(|a| a.index == submit_index) + .map_or(&mut self.free_resources, |a| &mut a.last_resources) + .query_sets + .insert(query_set_id, query_set.clone()); !is_removed }); submit_indices @@ -1009,45 +853,36 @@ impl LifetimeTracker { t.add(trace::Action::DestroyRenderBundle(*_id)); } }); - let compute_pipeline_indices = - self.triage_suspected_compute_pipelines(hub, trackers, |_id| { - #[cfg(feature = "trace")] - if let Some(ref mut t) = trace { - t.add(trace::Action::DestroyComputePipeline(*_id)); - } - }); - let render_pipeline_indices = - self.triage_suspected_render_pipelines(hub, trackers, |_id| { - #[cfg(feature = "trace")] - if let Some(ref mut t) = trace { - t.add(trace::Action::DestroyRenderPipeline(*_id)); - } - }); - let mut pipeline_submit_indices = Vec::new(); - pipeline_submit_indices.extend(compute_pipeline_indices); - pipeline_submit_indices.extend(render_pipeline_indices); - let bind_group_submit_indices = self.triage_suspected_bind_groups(hub, trackers, |_id| { + self.triage_suspected_compute_pipelines(hub, trackers, |_id| { + #[cfg(feature = "trace")] + if let Some(ref mut t) = trace { + t.add(trace::Action::DestroyComputePipeline(*_id)); + } + }); + self.triage_suspected_render_pipelines(hub, trackers, |_id| { + #[cfg(feature = "trace")] + if let Some(ref mut t) = trace { + t.add(trace::Action::DestroyRenderPipeline(*_id)); + } + }); + self.triage_suspected_bind_groups(hub, trackers, |_id| { #[cfg(feature = "trace")] if let Some(ref mut t) = trace { t.add(trace::Action::DestroyBindGroup(*_id)); } }); - self.triage_suspected_pipeline_layouts(&pipeline_submit_indices, |_id| { + self.triage_suspected_pipeline_layouts(|_id| { #[cfg(feature = "trace")] if let Some(ref mut t) = trace { t.add(trace::Action::DestroyPipelineLayout(*_id)); } }); - self.triage_suspected_bind_group_layouts( - &bind_group_submit_indices, - &pipeline_submit_indices, - |_id| { - #[cfg(feature = "trace")] - if let Some(ref mut t) = trace { - t.add(trace::Action::DestroyBindGroupLayout(*_id)); - } - }, - ); + self.triage_suspected_bind_group_layouts(|_id| { + #[cfg(feature = "trace")] + if let Some(ref mut t) = trace { + t.add(trace::Action::DestroyBindGroupLayout(*_id)); + } + }); self.triage_suspected_samplers(hub, trackers, |_id| { #[cfg(feature = "trace")] if let Some(ref mut t) = trace { diff --git a/wgpu-core/src/device/mod.rs b/wgpu-core/src/device/mod.rs index 1836dd646f..ae53e83f1d 100644 --- a/wgpu-core/src/device/mod.rs +++ b/wgpu-core/src/device/mod.rs @@ -184,8 +184,10 @@ impl UserClosures { // Mappings _must_ be fired before submissions, as the spec requires all mapping callbacks that are registered before // a on_submitted_work_done callback to be fired before the on_submitted_work_done callback. - for (operation, status) in self.mappings { - operation.callback.call(status); + for (mut operation, status) in self.mappings { + if let Some(callback) = operation.callback.take() { + callback.call(status); + } } for closure in self.submissions { closure.call(); diff --git a/wgpu-core/src/device/queue.rs b/wgpu-core/src/device/queue.rs index 8f103a9ae6..9b6cd33fb4 100644 --- a/wgpu-core/src/device/queue.rs +++ b/wgpu-core/src/device/queue.rs @@ -794,8 +794,8 @@ impl Global { dst.info .use_at(device.active_submission_index.load(Ordering::Relaxed) + 1); - let dst_raw = dst - .inner + let dst_inner = dst.inner(); + let dst_raw = dst_inner .as_ref() .unwrap() .as_raw() @@ -880,7 +880,9 @@ impl Global { .set_single(&dst, selector, hal::TextureUses::COPY_DST) .ok_or(TransferError::InvalidTexture(destination.texture))?; unsafe { - encoder.transition_textures(transition.map(|pending| pending.into_hal(&dst))); + encoder.transition_textures( + transition.map(|pending| pending.into_hal(dst_inner.as_ref().unwrap())), + ); encoder.transition_buffers(iter::once(barrier)); encoder.copy_buffer_to_texture(inner_buffer.as_ref().unwrap(), dst_raw, regions); } @@ -1211,7 +1213,7 @@ impl Global { } for texture in cmd_buf_trackers.textures.used_resources() { let id = texture.info.id(); - let should_extend = match *texture.inner.as_ref().unwrap() { + let should_extend = match *texture.inner().as_ref().unwrap() { TextureInner::Native { raw: None } => { return Err(QueueSubmitError::DestroyedTexture(id)); } @@ -1364,7 +1366,11 @@ impl Global { trackers .textures .set_from_usage_scope(&used_surface_textures); - let texture_barriers = trackers.textures.drain_transitions(); + let (transitions, textures) = trackers.textures.drain_transitions(); + let texture_barriers = transitions + .into_iter() + .enumerate() + .map(|(i, p)| p.into_hal(textures[i].as_ref().unwrap())); let present = unsafe { baked.encoder.transition_textures(texture_barriers); baked.encoder.end_encoding().unwrap() @@ -1392,7 +1398,7 @@ impl Global { used_surface_textures.set_size(texture_guard.len()); for (&id, texture) in pending_writes.dst_textures.iter() { - match *texture.inner.as_ref().unwrap() { + match *texture.inner().as_ref().unwrap() { TextureInner::Native { raw: None } => { return Err(QueueSubmitError::DestroyedTexture(id)); } @@ -1414,8 +1420,11 @@ impl Global { trackers .textures .set_from_usage_scope(&used_surface_textures); - let texture_barriers = trackers.textures.drain_transitions(); - + let (transitions, textures) = trackers.textures.drain_transitions(); + let texture_barriers = transitions + .into_iter() + .enumerate() + .map(|(i, p)| p.into_hal(textures[i].as_ref().unwrap())); unsafe { pending_writes .command_encoder diff --git a/wgpu-core/src/device/resource.rs b/wgpu-core/src/device/resource.rs index 105d8c7f62..de2a7c966d 100644 --- a/wgpu-core/src/device/resource.rs +++ b/wgpu-core/src/device/resource.rs @@ -534,9 +534,9 @@ impl Device { debug_assert_eq!(self_id.backend(), A::VARIANT); Texture { - inner: Some(resource::TextureInner::Native { + inner: RwLock::new(Some(resource::TextureInner::Native { raw: Some(hal_texture), - }), + })), device: self.clone(), desc: desc.map_label(|_| ()), hal_usage, @@ -768,8 +768,8 @@ impl Device { texture: &Arc>, desc: &resource::TextureViewDescriptor, ) -> Result, resource::CreateTextureViewError> { - let texture_raw = texture - .inner + let inner = texture.inner(); + let texture_raw = inner .as_ref() .unwrap() .as_raw() diff --git a/wgpu-core/src/present.rs b/wgpu-core/src/present.rs index 4a88b05753..76db63472a 100644 --- a/wgpu-core/src/present.rs +++ b/wgpu-core/src/present.rs @@ -11,10 +11,7 @@ extract it from the hub. use std::{ borrow::Borrow, - sync::{ - atomic::{AtomicBool, Ordering}, - Arc, - }, + sync::atomic::{AtomicBool, Ordering}, }; #[cfg(feature = "trace")] @@ -203,11 +200,11 @@ impl Global { let mut presentation = surface.presentation.lock(); let present = presentation.as_mut().unwrap(); let texture = resource::Texture { - inner: Some(resource::TextureInner::Surface { - raw: ast.texture, + inner: RwLock::new(Some(resource::TextureInner::Surface { + raw: Some(ast.texture), parent_id: surface_id, has_work: AtomicBool::new(false), - }), + })), device: device.clone(), desc: texture_desc, hal_usage, @@ -310,35 +307,34 @@ impl Global { let texture = hub.textures.unregister(texture_id); if let Some(texture) = texture { - if let Ok(mut texture) = Arc::try_unwrap(texture) { - texture.clear_mode.write().destroy_clear_views(device.raw()); - - let suf = A::get_surface(&surface); - match texture.inner.take().unwrap() { - resource::TextureInner::Surface { - raw, - parent_id, - has_work, - } => { - if surface_id != parent_id { - log::error!("Presented frame is from a different surface"); - Err(hal::SurfaceError::Lost) - } else if !has_work.load(Ordering::Relaxed) { - log::error!("No work has been submitted for this frame"); - unsafe { suf.unwrap().raw.discard_texture(raw) }; - Err(hal::SurfaceError::Outdated) - } else { - unsafe { - queue.raw.as_ref().unwrap().present(&suf.unwrap().raw, raw) - } + let suf = A::get_surface(&surface); + let mut inner = texture.inner_mut(); + let inner = inner.as_mut().unwrap(); + + match *inner { + resource::TextureInner::Surface { + ref mut raw, + ref parent_id, + ref has_work, + } => { + if surface_id != *parent_id { + log::error!("Presented frame is from a different surface"); + Err(hal::SurfaceError::Lost) + } else if !has_work.load(Ordering::Relaxed) { + log::error!("No work has been submitted for this frame"); + unsafe { suf.unwrap().raw.discard_texture(raw.take().unwrap()) }; + Err(hal::SurfaceError::Outdated) + } else { + unsafe { + queue + .raw + .as_ref() + .unwrap() + .present(&suf.unwrap().raw, raw.take().unwrap()) } } - resource::TextureInner::Native { .. } => unreachable!(), } - } else { - Err(hal::SurfaceError::Other( - "Surface cannot be destroyed because is still in use", - )) + _ => unreachable!(), } } else { Err(hal::SurfaceError::Outdated) //TODO? @@ -402,26 +398,20 @@ impl Global { let texture = hub.textures.unregister(texture_id); if let Some(texture) = texture { - if let Ok(mut texture) = Arc::try_unwrap(texture) { - texture.clear_mode.write().destroy_clear_views(device.raw()); - - let suf = A::get_surface(&surface); - match texture.inner.take().unwrap() { - resource::TextureInner::Surface { - raw, - parent_id, - has_work: _, - } => { - if surface_id == parent_id { - unsafe { suf.unwrap().raw.discard_texture(raw) }; - } else { - log::warn!("Surface texture is outdated"); - } + let suf = A::get_surface(&surface); + match *texture.inner_mut().as_mut().take().as_mut().unwrap() { + &mut resource::TextureInner::Surface { + ref mut raw, + ref parent_id, + has_work: _, + } => { + if surface_id == *parent_id { + unsafe { suf.unwrap().raw.discard_texture(raw.take().unwrap()) }; + } else { + log::warn!("Surface texture is outdated"); } - resource::TextureInner::Native { .. } => unreachable!(), } - } else { - return Err(SurfaceError::StillReferenced); + _ => unreachable!(), } } } diff --git a/wgpu-core/src/resource.rs b/wgpu-core/src/resource.rs index bb28ce805e..5fcaff1c90 100644 --- a/wgpu-core/src/resource.rs +++ b/wgpu-core/src/resource.rs @@ -20,7 +20,7 @@ use crate::{ }; use hal::CommandEncoder; -use parking_lot::{Mutex, RwLock}; +use parking_lot::{Mutex, RwLock, RwLockReadGuard, RwLockWriteGuard}; use smallvec::SmallVec; use thiserror::Error; @@ -242,7 +242,7 @@ unsafe impl Send for BufferMapCallbackC {} pub struct BufferMapCallback { // We wrap this so creating the enum in the C variant can be unsafe, // allowing our call function to be safe. - inner: Option, + inner: BufferMapCallbackInner, } #[cfg(any( @@ -279,7 +279,7 @@ impl Debug for BufferMapCallbackInner { impl BufferMapCallback { pub fn from_rust(callback: BufferMapCallbackCallback) -> Self { Self { - inner: Some(BufferMapCallbackInner::Rust { callback }), + inner: BufferMapCallbackInner::Rust { callback }, } } @@ -292,17 +292,17 @@ impl BufferMapCallback { /// invoked, which may happen at an unspecified time. pub unsafe fn from_c(inner: BufferMapCallbackC) -> Self { Self { - inner: Some(BufferMapCallbackInner::C { inner }), + inner: BufferMapCallbackInner::C { inner }, } } - pub(crate) fn call(mut self, result: BufferAccessResult) { - match self.inner.take() { - Some(BufferMapCallbackInner::Rust { callback }) => { + pub(crate) fn call(self, result: BufferAccessResult) { + match self.inner { + BufferMapCallbackInner::Rust { callback } => { callback(result); } // SAFETY: the contract of the call to from_c says that this unsafe is sound. - Some(BufferMapCallbackInner::C { inner }) => unsafe { + BufferMapCallbackInner::C { inner } => unsafe { let status = match result { Ok(()) => BufferMapAsyncStatus::Success, Err(BufferAccessError::Device(_)) => BufferMapAsyncStatus::ContextLost, @@ -331,17 +331,6 @@ impl BufferMapCallback { (inner.callback)(status, inner.user_data); }, - None => { - panic!("Map callback invoked twice"); - } - } - } -} - -impl Drop for BufferMapCallback { - fn drop(&mut self) { - if self.inner.is_some() { - panic!("Map callback was leaked"); } } } @@ -349,7 +338,7 @@ impl Drop for BufferMapCallback { #[derive(Debug)] pub struct BufferMapOperation { pub host: HostMap, - pub callback: BufferMapCallback, + pub callback: Option, } #[derive(Clone, Debug, Error)] @@ -585,8 +574,10 @@ impl Buffer { } // Note: outside the scope where locks are held when calling the callback - if let Some((operation, status)) = map_closure { - operation.callback.call(status); + if let Some((mut operation, status)) = map_closure { + if let Some(callback) = operation.callback.take() { + callback.call(status); + } } Ok(()) @@ -688,7 +679,7 @@ pub(crate) enum TextureInner { raw: Option, }, Surface { - raw: A::SurfaceTexture, + raw: Option, parent_id: SurfaceId, has_work: AtomicBool, }, @@ -698,8 +689,10 @@ impl TextureInner { pub fn as_raw(&self) -> Option<&A::Texture> { match *self { Self::Native { raw: Some(ref tex) } => Some(tex), - Self::Native { raw: None } => None, - Self::Surface { ref raw, .. } => Some(raw.borrow()), + Self::Surface { + raw: Some(ref tex), .. + } => Some(tex.borrow()), + _ => None, } } } @@ -720,20 +713,9 @@ pub enum TextureClearMode { None, } -impl TextureClearMode { - pub(crate) fn destroy_clear_views(&mut self, device: &A::Device) { - if let TextureClearMode::Surface { ref mut clear_view } = *self { - unsafe { - let view = clear_view.take().unwrap(); - hal::Device::destroy_texture_view(device, view); - } - } - } -} - #[derive(Debug)] pub struct Texture { - pub(crate) inner: Option>, + pub(crate) inner: RwLock>>, pub(crate) device: Arc>, pub(crate) desc: wgt::TextureDescriptor<(), Vec>, pub(crate) hal_usage: hal::TextureUses, @@ -768,10 +750,10 @@ impl Drop for Texture { } _ => {} }; - if self.inner.is_none() { + if self.inner.read().is_none() { return; } - let inner = self.inner.take().unwrap(); + let inner = self.inner.write().take().unwrap(); if let TextureInner::Native { raw: Some(raw) } = inner { unsafe { self.device.raw().destroy_texture(raw); @@ -781,6 +763,12 @@ impl Drop for Texture { } impl Texture { + pub(crate) fn inner<'a>(&'a self) -> RwLockReadGuard<'a, Option>> { + self.inner.read() + } + pub(crate) fn inner_mut<'a>(&'a self) -> RwLockWriteGuard<'a, Option>> { + self.inner.write() + } pub(crate) fn get_clear_view<'a>( clear_mode: &'a TextureClearMode, desc: &'a wgt::TextureDescriptor<(), Vec>, @@ -824,9 +812,8 @@ impl Global { let hub = A::hub(self); let texture = { hub.textures.try_get(id).ok().flatten() }; - let hal_texture = texture - .as_ref() - .and_then(|tex| tex.inner.as_ref().unwrap().as_raw()); + let inner = texture.as_ref().unwrap().inner(); + let hal_texture = inner.as_ref().unwrap().as_raw(); hal_texture_callback(hal_texture); } diff --git a/wgpu-core/src/track/buffer.rs b/wgpu-core/src/track/buffer.rs index 973684419b..a5c972283f 100644 --- a/wgpu-core/src/track/buffer.rs +++ b/wgpu-core/src/track/buffer.rs @@ -580,6 +580,7 @@ impl BufferTracker { let min_ref_count = if is_in_registry { 3 } else { 2 }; if existing_ref_count <= min_ref_count { self.metadata.remove(index); + log::info!("Buffer {:?} is not tracked anymore", id,); return true; } else { log::info!( diff --git a/wgpu-core/src/track/mod.rs b/wgpu-core/src/track/mod.rs index f51d736a1f..7d85712596 100644 --- a/wgpu-core/src/track/mod.rs +++ b/wgpu-core/src/track/mod.rs @@ -130,6 +130,8 @@ pub(crate) struct PendingTransition { pub usage: ops::Range, } +pub(crate) type PendingTransitionList = Vec>; + impl PendingTransition { /// Produce the hal barrier corresponding to the transition. pub fn into_hal<'a, A: HalApi>( @@ -148,14 +150,9 @@ impl PendingTransition { /// Produce the hal barrier corresponding to the transition. pub fn into_hal<'a, A: HalApi>( self, - tex: &'a resource::Texture, + tex: &'a resource::TextureInner, ) -> hal::TextureBarrier<'a, A> { - let texture = tex - .inner - .as_ref() - .unwrap() - .as_raw() - .expect("Texture is destroyed"); + let texture = tex.as_raw().expect("Texture is destroyed"); // These showing up in a barrier is always a bug strict_assert_ne!(self.usage.start, hal::TextureUses::UNKNOWN); diff --git a/wgpu-core/src/track/stateless.rs b/wgpu-core/src/track/stateless.rs index f565932f8b..3ed5e73415 100644 --- a/wgpu-core/src/track/stateless.rs +++ b/wgpu-core/src/track/stateless.rs @@ -189,6 +189,7 @@ impl> StatelessTracker { let min_ref_count = if is_in_registry { 3 } else { 2 }; if existing_ref_count <= min_ref_count { self.metadata.remove(index); + log::info!("{} {:?} is not tracked anymore", T::TYPE, id,); return true; } else { log::info!( diff --git a/wgpu-core/src/track/texture.rs b/wgpu-core/src/track/texture.rs index a90888a86d..146d0970cd 100644 --- a/wgpu-core/src/track/texture.rs +++ b/wgpu-core/src/track/texture.rs @@ -19,21 +19,22 @@ * will treat the contents as junk. !*/ -use super::{range::RangedStates, PendingTransition}; +use super::{range::RangedStates, PendingTransition, PendingTransitionList}; use crate::{ hal_api::HalApi, id::{TextureId, TypedId}, - resource::{Resource, Texture}, + resource::{Resource, Texture, TextureInner}, track::{ invalid_resource_state, skip_barrier, ResourceMetadata, ResourceMetadataProvider, ResourceUses, UsageConflict, }, }; -use hal::{TextureBarrier, TextureUses}; +use hal::TextureUses; use arrayvec::ArrayVec; use naga::FastHashMap; +use parking_lot::RwLockReadGuard; use wgt::{strict_assert, strict_assert_eq}; use std::{borrow::Cow, iter, marker::PhantomData, ops::Range, sync::Arc, vec::Drain}; @@ -437,13 +438,24 @@ impl TextureTracker { self.metadata.owned_resources() } - /// Drains all currently pending transitions. - pub fn drain_transitions(&mut self) -> impl Iterator> { - let texture_barriers = self.temp.drain(..).map(|pending| { - let tex = unsafe { self.metadata.get_resource_unchecked(pending.id as _) }; - pending.into_hal(tex) - }); - texture_barriers + /// Drain all currently pending transitions. + pub fn drain_transitions<'a>( + &'a mut self, + ) -> ( + PendingTransitionList, + Vec>>>, + ) { + let mut textures = Vec::new(); + let transitions = self + .temp + .drain(..) + .map(|pending| { + let tex = unsafe { self.metadata.get_resource_unchecked(pending.id as _) }; + textures.push(tex.inner()); + pending + }) + .collect(); + (transitions, textures) } /// Inserts a single texture and a state into the resource tracker. @@ -707,6 +719,7 @@ impl TextureTracker { self.start_set.complex.remove(&index); self.end_set.complex.remove(&index); self.metadata.remove(index); + log::info!("Texture {:?} is not tracked anymore", id,); return true; } else { log::info!( diff --git a/wgpu/src/backend/direct.rs b/wgpu/src/backend/direct.rs index 2273e4f884..12a057be92 100644 --- a/wgpu/src/backend/direct.rs +++ b/wgpu/src/backend/direct.rs @@ -1505,10 +1505,12 @@ impl crate::Context for Context { MapMode::Read => wgc::device::HostMap::Read, MapMode::Write => wgc::device::HostMap::Write, }, - callback: wgc::resource::BufferMapCallback::from_rust(Box::new(|status| { - let res = status.map_err(|_| crate::BufferAsyncError); - callback(res); - })), + callback: Some(wgc::resource::BufferMapCallback::from_rust(Box::new( + |status| { + let res = status.map_err(|_| crate::BufferAsyncError); + callback(res); + }, + ))), }; let global = &self.0; From 8ca3cb090346e910a96e4ecd6951ce87015c5780 Mon Sep 17 00:00:00 2001 From: gents83 Date: Tue, 19 Sep 2023 08:15:50 +0200 Subject: [PATCH 104/132] Fixing texture leaking due to clear view --- tests/tests/mem_leaks.rs | 34 ++++++++++++++++++---------------- wgpu-core/src/device/global.rs | 4 +++- wgpu-core/src/device/life.rs | 16 +++++++++++++--- wgpu-core/src/device/queue.rs | 8 +++++--- 4 files changed, 39 insertions(+), 23 deletions(-) diff --git a/tests/tests/mem_leaks.rs b/tests/tests/mem_leaks.rs index d55e51fbe4..b1158f9a96 100644 --- a/tests/tests/mem_leaks.rs +++ b/tests/tests/mem_leaks.rs @@ -132,6 +132,8 @@ fn draw_test_with_reports( let report = global_report.hub_report(); assert_eq!(report.shader_modules.num_allocated, 1); assert_eq!(report.shader_modules.num_kept_from_user, 0); + assert_eq!(report.textures.num_allocated, 0); + assert_eq!(report.texture_views.num_allocated, 0); let texture = ctx.device.create_texture_with_data( &ctx.queue, @@ -219,22 +221,22 @@ fn draw_test_with_reports( let global_report = ctx.instance.generate_report(); let report = global_report.hub_report(); - assert_eq!(report.command_buffers.num_allocated, 1); assert_eq!(report.command_buffers.num_kept_from_user, 1); - assert_eq!(report.render_pipelines.num_allocated, 1); assert_eq!(report.render_pipelines.num_kept_from_user, 0); - assert_eq!(report.pipeline_layouts.num_allocated, 1); assert_eq!(report.pipeline_layouts.num_kept_from_user, 0); - assert_eq!(report.bind_group_layouts.num_allocated, 1); assert_eq!(report.bind_group_layouts.num_kept_from_user, 0); - assert_eq!(report.bind_groups.num_allocated, 1); assert_eq!(report.bind_groups.num_kept_from_user, 0); - assert_eq!(report.texture_views.num_allocated, 2); + assert_eq!(report.buffers.num_kept_from_user, 0); assert_eq!(report.texture_views.num_kept_from_user, 0); + assert_eq!(report.textures.num_kept_from_user, 0); + assert_eq!(report.command_buffers.num_allocated, 1); + assert_eq!(report.render_pipelines.num_allocated, 1); + assert_eq!(report.pipeline_layouts.num_allocated, 1); + assert_eq!(report.bind_group_layouts.num_allocated, 1); + assert_eq!(report.bind_groups.num_allocated, 1); assert_eq!(report.buffers.num_allocated, 1); - assert_eq!(report.buffers.num_kept_from_user, 0); + assert_eq!(report.texture_views.num_allocated, 2); assert_eq!(report.textures.num_allocated, 1); - assert_eq!(report.textures.num_kept_from_user, 0); ctx.queue.submit(Some(encoder.finish())); @@ -251,9 +253,9 @@ fn draw_test_with_reports( assert_eq!(report.bind_groups.num_allocated, 0); assert_eq!(report.bind_group_layouts.num_allocated, 0); assert_eq!(report.pipeline_layouts.num_allocated, 0); - //surface is still there - assert_eq!(report.texture_views.num_allocated, 1); - assert_eq!(report.textures.num_allocated, 1); + assert_eq!(report.texture_views.num_allocated, 0); + assert_eq!(report.textures.num_allocated, 0); + assert_eq!(report.buffers.num_allocated, 0); drop(ctx.queue); drop(ctx.device); @@ -263,13 +265,13 @@ fn draw_test_with_reports( let report = global_report.hub_report(); assert_eq!(report.queues.num_kept_from_user, 0); - assert_eq!(report.queues.num_allocated, 0); - //Still one texture alive because surface is not dropped till the end - assert_eq!(report.textures.num_allocated, 1); - //that is keeping still the device alive - assert_eq!(report.devices.num_allocated, 1); assert_eq!(report.textures.num_kept_from_user, 0); assert_eq!(report.devices.num_kept_from_user, 0); + assert_eq!(report.queues.num_allocated, 0); + assert_eq!(report.buffers.num_allocated, 0); + assert_eq!(report.textures.num_allocated, 0); + assert_eq!(report.texture_views.num_allocated, 0); + assert_eq!(report.devices.num_allocated, 0); } #[test] diff --git a/wgpu-core/src/device/global.rs b/wgpu-core/src/device/global.rs index 08a31ea685..831f755aea 100644 --- a/wgpu-core/src/device/global.rs +++ b/wgpu-core/src/device/global.rs @@ -735,7 +735,9 @@ impl Global { resource::TextureClearMode::None, ) { resource::TextureClearMode::BufferCopy => SmallVec::new(), - resource::TextureClearMode::RenderPass { clear_views, .. } => clear_views, + resource::TextureClearMode::RenderPass { + mut clear_views, .. + } => clear_views.drain(..).collect(), resource::TextureClearMode::Surface { mut clear_view } => { if let Some(view) = clear_view.take() { unsafe { diff --git a/wgpu-core/src/device/life.rs b/wgpu-core/src/device/life.rs index 22f0b2a5da..854b25560e 100644 --- a/wgpu-core/src/device/life.rs +++ b/wgpu-core/src/device/life.rs @@ -328,6 +328,15 @@ impl LifetimeTracker { pub fn cleanup(&mut self) { profiling::scope!("LifetimeTracker::cleanup"); + self.free_resources.textures.iter().for_each(|(_, t)| { + if let &mut resource::TextureClearMode::RenderPass { + ref mut clear_views, + .. + } = &mut *t.clear_mode.write() + { + clear_views.clear(); + } + }); self.free_resources.clear(); } @@ -547,9 +556,10 @@ impl LifetimeTracker { .find(|a| a.index == submit_index) .map_or(&mut self.free_resources, |a| &mut a.last_resources); - if let &resource::TextureClearMode::RenderPass { - ref clear_views, .. - } = &*texture.clear_mode.read() + if let &mut resource::TextureClearMode::RenderPass { + ref mut clear_views, + .. + } = &mut *texture.clear_mode.write() { clear_views.into_iter().for_each(|v| { non_referenced_resources diff --git a/wgpu-core/src/device/queue.rs b/wgpu-core/src/device/queue.rs index 9b6cd33fb4..a0aa14114b 100644 --- a/wgpu-core/src/device/queue.rs +++ b/wgpu-core/src/device/queue.rs @@ -1059,8 +1059,8 @@ impl Global { dst.info .use_at(device.active_submission_index.load(Ordering::Relaxed) + 1); - let dst_raw = dst - .inner + let dst_inner = dst.inner(); + let dst_raw = dst_inner .as_ref() .unwrap() .as_raw() @@ -1083,7 +1083,9 @@ impl Global { .textures .set_single(&dst, selector, hal::TextureUses::COPY_DST) .ok_or(TransferError::InvalidTexture(destination.texture))?; - encoder.transition_textures(transitions.map(|pending| pending.into_hal(&dst))); + encoder.transition_textures( + transitions.map(|pending| pending.into_hal(dst_inner.as_ref().unwrap())), + ); encoder.copy_external_image_to_texture( source, dst_raw, From d1fe60c955111cc1dd637339a8fb88b1838fc423 Mon Sep 17 00:00:00 2001 From: gents83 Date: Tue, 19 Sep 2023 08:34:33 +0200 Subject: [PATCH 105/132] QuerySet error drop not failing --- tests/tests/query_set.rs | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/tests/tests/query_set.rs b/tests/tests/query_set.rs index 16e5094089..d2592908ff 100644 --- a/tests/tests/query_set.rs +++ b/tests/tests/query_set.rs @@ -1,11 +1,8 @@ -use wgpu_test::{initialize_test, FailureCase, TestParameters}; +use wgpu_test::{initialize_test, TestParameters}; #[test] fn drop_failed_timestamp_query_set() { - let parameters = TestParameters::default() - // https://github.com/gfx-rs/wgpu/issues/4139 - .expect_fail(FailureCase::always()); - initialize_test(parameters, |ctx| { + initialize_test(TestParameters::default(), |ctx| { // Enter an error scope, so the validation catch-all doesn't // report the error too early. ctx.device.push_error_scope(wgpu::ErrorFilter::Validation); From fb13cc8110c4060bc48d64b38c9b9d62e42a627b Mon Sep 17 00:00:00 2001 From: gents83 Date: Thu, 21 Sep 2023 20:09:29 +0200 Subject: [PATCH 106/132] Better resource triage_suspected --- tests/tests/mem_leaks.rs | 14 ++--- wgpu-core/src/command/bundle.rs | 24 ++++++--- wgpu-core/src/command/render.rs | 16 +++--- wgpu-core/src/device/global.rs | 92 +++++--------------------------- wgpu-core/src/device/life.rs | 86 +++++++++++------------------ wgpu-core/src/device/queue.rs | 13 ++--- wgpu-core/src/device/resource.rs | 53 ++++++++++++++++-- wgpu-core/src/resource.rs | 14 +++-- wgpu-core/src/track/buffer.rs | 46 ++++++++++------ wgpu-core/src/track/metadata.rs | 19 ++++++- wgpu-core/src/track/mod.rs | 58 ++++++++++++-------- wgpu-core/src/track/stateless.rs | 45 ++++++++++++---- wgpu-core/src/track/texture.rs | 48 +++++++++++------ 13 files changed, 289 insertions(+), 239 deletions(-) diff --git a/tests/tests/mem_leaks.rs b/tests/tests/mem_leaks.rs index b1158f9a96..af7eb1766c 100644 --- a/tests/tests/mem_leaks.rs +++ b/tests/tests/mem_leaks.rs @@ -158,8 +158,7 @@ fn draw_test_with_reports( let global_report = ctx.instance.generate_report(); let report = global_report.hub_report(); assert_eq!(report.buffers.num_allocated, 1); - //1 is clear_view and 1 is user's texture_view - assert_eq!(report.texture_views.num_allocated, 2); + assert_eq!(report.texture_views.num_allocated, 1); assert_eq!(report.textures.num_allocated, 1); drop(texture); @@ -167,7 +166,7 @@ fn draw_test_with_reports( let global_report = ctx.instance.generate_report(); let report = global_report.hub_report(); assert_eq!(report.buffers.num_allocated, 1); - assert_eq!(report.texture_views.num_allocated, 2); + assert_eq!(report.texture_views.num_allocated, 1); assert_eq!(report.texture_views.num_kept_from_user, 1); assert_eq!(report.textures.num_allocated, 1); assert_eq!(report.textures.num_kept_from_user, 0); @@ -206,7 +205,7 @@ fn draw_test_with_reports( assert_eq!(report.compute_pipelines.num_allocated, 0); assert_eq!(report.command_buffers.num_allocated, 1); assert_eq!(report.render_bundles.num_allocated, 0); - assert_eq!(report.texture_views.num_allocated, 2); + assert_eq!(report.texture_views.num_allocated, 1); assert_eq!(report.textures.num_allocated, 1); function(&mut rpass); @@ -235,16 +234,17 @@ fn draw_test_with_reports( assert_eq!(report.bind_group_layouts.num_allocated, 1); assert_eq!(report.bind_groups.num_allocated, 1); assert_eq!(report.buffers.num_allocated, 1); - assert_eq!(report.texture_views.num_allocated, 2); + assert_eq!(report.texture_views.num_allocated, 1); assert_eq!(report.textures.num_allocated, 1); - ctx.queue.submit(Some(encoder.finish())); + let submit_index = ctx.queue.submit(Some(encoder.finish())); let global_report = ctx.instance.generate_report(); let report = global_report.hub_report(); assert_eq!(report.command_buffers.num_allocated, 0); - ctx.device.poll(wgpu::Maintain::Wait); + ctx.device + .poll(wgpu::Maintain::WaitForSubmissionIndex(submit_index)); let global_report = ctx.instance.generate_report(); let report = global_report.hub_report(); diff --git a/wgpu-core/src/command/bundle.rs b/wgpu-core/src/command/bundle.rs index 0da78e04bd..4bc47c7414 100644 --- a/wgpu-core/src/command/bundle.rs +++ b/wgpu-core/src/command/bundle.rs @@ -295,6 +295,7 @@ impl RenderBundleEncoder { let bind_group = state .trackers .bind_groups + .write() .add_single(&*bind_group_guard, bind_group_id) .ok_or(RenderCommandError::InvalidBindGroup(bind_group_id)) .map_pass_err(scope)?; @@ -360,6 +361,7 @@ impl RenderBundleEncoder { let pipeline = state .trackers .render_pipelines + .write() .add_single(&*pipeline_guard, pipeline_id) .ok_or(RenderCommandError::InvalidPipeline(pipeline_id)) .map_pass_err(scope)?; @@ -402,6 +404,7 @@ impl RenderBundleEncoder { let buffer = state .trackers .buffers + .write() .merge_single(&*buffer_guard, buffer_id, hal::BufferUses::INDEX) .map_pass_err(scope)?; self.check_valid_to_use(buffer.device.info.id()) @@ -430,6 +433,7 @@ impl RenderBundleEncoder { let buffer = state .trackers .buffers + .write() .merge_single(&*buffer_guard, buffer_id, hal::BufferUses::VERTEX) .map_pass_err(scope)?; self.check_valid_to_use(buffer.device.info.id()) @@ -565,6 +569,7 @@ impl RenderBundleEncoder { let buffer = state .trackers .buffers + .write() .merge_single(&*buffer_guard, buffer_id, hal::BufferUses::INDIRECT) .map_pass_err(scope)?; self.check_valid_to_use(buffer.device.info.id()) @@ -603,6 +608,7 @@ impl RenderBundleEncoder { let buffer = state .trackers .buffers + .write() .merge_single(&*buffer_guard, buffer_id, hal::BufferUses::INDIRECT) .map_pass_err(scope)?; self.check_valid_to_use(buffer.device.info.id()) @@ -780,7 +786,8 @@ impl RenderBundle { num_dynamic_offsets, bind_group_id, } => { - let bind_group = trackers.bind_groups.get(bind_group_id).unwrap(); + let bind_groups = trackers.bind_groups.read(); + let bind_group = bind_groups.get(bind_group_id).unwrap(); unsafe { raw.set_bind_group( pipeline_layout.as_ref().unwrap().raw(), @@ -792,7 +799,8 @@ impl RenderBundle { offsets = &offsets[num_dynamic_offsets as usize..]; } RenderCommand::SetPipeline(pipeline_id) => { - let pipeline = trackers.render_pipelines.get(pipeline_id).unwrap(); + let render_pipelines = trackers.render_pipelines.read(); + let pipeline = render_pipelines.get(pipeline_id).unwrap(); unsafe { raw.set_render_pipeline(pipeline.raw()) }; pipeline_layout = Some(pipeline.layout.clone()); @@ -803,7 +811,8 @@ impl RenderBundle { offset, size, } => { - let buffer = trackers.buffers.get(buffer_id).unwrap().raw(); + let buffers = trackers.buffers.read(); + let buffer = buffers.get(buffer_id).unwrap().raw(); let bb = hal::BufferBinding { buffer, offset, @@ -817,7 +826,8 @@ impl RenderBundle { offset, size, } => { - let buffer = trackers.buffers.get(buffer_id).unwrap().raw(); + let buffers = trackers.buffers.read(); + let buffer = buffers.get(buffer_id).unwrap().raw(); let bb = hal::BufferBinding { buffer, offset, @@ -895,7 +905,8 @@ impl RenderBundle { count: None, indexed: false, } => { - let buffer = trackers.buffers.get(buffer_id).unwrap().raw(); + let buffers = trackers.buffers.read(); + let buffer = buffers.get(buffer_id).unwrap().raw(); unsafe { raw.draw_indirect(buffer, offset, 1) }; } RenderCommand::MultiDrawIndirect { @@ -904,7 +915,8 @@ impl RenderBundle { count: None, indexed: true, } => { - let buffer = trackers.buffers.get(buffer_id).unwrap().raw(); + let buffers = trackers.buffers.read(); + let buffer = buffers.get(buffer_id).unwrap().raw(); unsafe { raw.draw_indexed_indirect(buffer, offset, 1) }; } RenderCommand::MultiDrawIndirect { .. } diff --git a/wgpu-core/src/command/render.rs b/wgpu-core/src/command/render.rs index 08dd5806be..cbb657f548 100644 --- a/wgpu-core/src/command/render.rs +++ b/wgpu-core/src/command/render.rs @@ -693,7 +693,7 @@ struct RenderAttachment<'a, A: HalApi> { impl TextureView { fn to_render_attachment(&self, usage: hal::TextureUses) -> RenderAttachment { RenderAttachment { - texture: self.parent.as_ref().unwrap().clone(), + texture: self.parent.read().as_ref().unwrap().clone(), selector: &self.selector, usage, } @@ -728,7 +728,7 @@ impl<'a, A: HalApi> RenderPassInfo<'a, A> { if channel.load_op == LoadOp::Load { pending_discard_init_fixups.extend(texture_memory_actions.register_init_action( &TextureInitTrackerAction { - texture: view.parent.as_ref().unwrap().clone(), + texture: view.parent.read().as_ref().unwrap().clone(), range: TextureInitRange::from(view.selector.clone()), // Note that this is needed even if the target is discarded, kind: MemoryInitKind::NeedsInitializedMemory, @@ -737,7 +737,7 @@ impl<'a, A: HalApi> RenderPassInfo<'a, A> { } else if channel.store_op == StoreOp::Store { // Clear + Store texture_memory_actions.register_implicit_init( - view.parent.as_ref().unwrap(), + view.parent.read().as_ref().unwrap(), TextureInitRange::from(view.selector.clone()), ); } @@ -746,7 +746,7 @@ impl<'a, A: HalApi> RenderPassInfo<'a, A> { // discard right away be alright since the texture can't be used // during the pass anyways texture_memory_actions.discard(TextureSurfaceDiscard { - texture: view.parent.as_ref().unwrap().clone(), + texture: view.parent.read().as_ref().unwrap().clone(), mip_level: view.selector.mips.start, layer: view.selector.layers.start, }); @@ -915,7 +915,7 @@ impl<'a, A: HalApi> RenderPassInfo<'a, A> { if need_init_beforehand { pending_discard_init_fixups.extend( texture_memory_actions.register_init_action(&TextureInitTrackerAction { - texture: view.parent.as_ref().unwrap().clone(), + texture: view.parent.read().as_ref().unwrap().clone(), range: TextureInitRange::from(view.selector.clone()), kind: MemoryInitKind::NeedsInitializedMemory, }), @@ -933,7 +933,7 @@ impl<'a, A: HalApi> RenderPassInfo<'a, A> { if at.depth.store_op != at.stencil.store_op { if !need_init_beforehand { texture_memory_actions.register_implicit_init( - view.parent.as_ref().unwrap(), + view.parent.read().as_ref().unwrap(), TextureInitRange::from(view.selector.clone()), ); } @@ -948,7 +948,7 @@ impl<'a, A: HalApi> RenderPassInfo<'a, A> { } else if at.depth.store_op == StoreOp::Discard { // Both are discarded using the regular path. discarded_surfaces.push(TextureSurfaceDiscard { - texture: view.parent.as_ref().unwrap().clone(), + texture: view.parent.read().as_ref().unwrap().clone(), mip_level: view.selector.mips.start, layer: view.selector.layers.start, }); @@ -1074,7 +1074,7 @@ impl<'a, A: HalApi> RenderPassInfo<'a, A> { } texture_memory_actions.register_implicit_init( - resolve_view.parent.as_ref().unwrap(), + resolve_view.parent.read().as_ref().unwrap(), TextureInitRange::from(resolve_view.selector.clone()), ); render_attachments diff --git a/wgpu-core/src/device/global.rs b/wgpu-core/src/device/global.rs index 831f755aea..8a469a40be 100644 --- a/wgpu-core/src/device/global.rs +++ b/wgpu-core/src/device/global.rs @@ -18,7 +18,6 @@ use crate::{ use hal::Device as _; use parking_lot::RwLock; -use smallvec::SmallVec; use wgt::{BufferAddress, TextureFormat}; @@ -529,48 +528,6 @@ impl Global { let (id, resource) = fid.assign(texture); log::info!("Created Texture {:?} with {:?}", id, desc); - let format_features = device - .describe_format_features(&device.adapter, desc.format) - .unwrap(); - let hal_usage = conv::map_texture_usage_for_texture(desc, &format_features); - if hal_usage - .intersects(hal::TextureUses::DEPTH_STENCIL_WRITE | hal::TextureUses::COLOR_TARGET) - { - let is_color = !desc.format.is_depth_stencil_format(); - let dimension = match desc.dimension { - wgt::TextureDimension::D1 => wgt::TextureViewDimension::D1, - wgt::TextureDimension::D2 => wgt::TextureViewDimension::D2, - wgt::TextureDimension::D3 => unreachable!(), - }; - - let mut clear_views = SmallVec::new(); - for mip_level in 0..desc.mip_level_count { - for array_layer in 0..desc.size.depth_or_array_layers { - let desc = resource::TextureViewDescriptor { - label: Some(Cow::Borrowed("(wgpu internal) clear texture view")), - format: Some(desc.format), - dimension: Some(dimension), - range: wgt::ImageSubresourceRange { - aspect: wgt::TextureAspect::All, - base_mip_level: mip_level, - mip_level_count: Some(1), - base_array_layer: array_layer, - array_layer_count: Some(1), - }, - }; - let view = device.create_texture_view(&resource, &desc).unwrap(); - let view_fid = hub.texture_views.request::(); - let view = view_fid.init(view); - clear_views.push(view); - } - } - let mut clear_mode = resource.clear_mode.write(); - *clear_mode = resource::TextureClearMode::RenderPass { - clear_views, - is_color, - }; - } - device.trackers.lock().textures.insert_single( id, resource, @@ -730,45 +687,20 @@ impl Global { let last_submit_index = texture.info.submission_index(); - let mut clear_views = match std::mem::replace( - &mut *texture.clear_mode.write(), - resource::TextureClearMode::None, - ) { - resource::TextureClearMode::BufferCopy => SmallVec::new(), - resource::TextureClearMode::RenderPass { - mut clear_views, .. - } => clear_views.drain(..).collect(), - resource::TextureClearMode::Surface { mut clear_view } => { - if let Some(view) = clear_view.take() { - unsafe { - use hal::Device; - device.raw().destroy_texture_view(view); - } - } - SmallVec::new() - } - resource::TextureClearMode::None => SmallVec::new(), - }; - - match *texture.inner().as_ref().unwrap() { - resource::TextureInner::Native { ref raw } => { - if !raw.is_none() { - let temp = queue::TempResource::Texture(texture.clone(), clear_views); - let mut pending_writes = device.pending_writes.lock(); - let pending_writes = pending_writes.as_mut().unwrap(); - if pending_writes.dst_textures.contains_key(&texture_id) { - pending_writes.temp_resources.push(temp); - } else { - device - .lock_life() - .schedule_resource_destruction(temp, last_submit_index); - } + if let resource::TextureInner::Native { ref raw } = *texture.inner().as_ref().unwrap() { + if !raw.is_none() { + let temp = queue::TempResource::Texture(texture.clone()); + let mut pending_writes = device.pending_writes.lock(); + let pending_writes = pending_writes.as_mut().unwrap(); + if pending_writes.dst_textures.contains_key(&texture_id) { + pending_writes.temp_resources.push(temp); } else { - return Err(resource::DestroyError::AlreadyDestroyed); + device + .lock_life() + .schedule_resource_destruction(temp, last_submit_index); } - } - resource::TextureInner::Surface { .. } => { - clear_views.clear(); + } else { + return Err(resource::DestroyError::AlreadyDestroyed); } } diff --git a/wgpu-core/src/device/life.rs b/wgpu-core/src/device/life.rs index 854b25560e..12f56d1ceb 100644 --- a/wgpu-core/src/device/life.rs +++ b/wgpu-core/src/device/life.rs @@ -56,18 +56,18 @@ impl ResourceMaps { } } pub(crate) fn clear(&mut self) { - self.buffers.clear(); - self.staging_buffers.clear(); - self.textures.clear(); - self.texture_views.clear(); - self.samplers.clear(); + self.render_bundles.clear(); self.bind_groups.clear(); self.compute_pipelines.clear(); self.render_pipelines.clear(); self.bind_group_layouts.clear(); self.pipeline_layouts.clear(); - self.render_bundles.clear(); + self.texture_views.clear(); + self.samplers.clear(); + self.staging_buffers.clear(); self.query_sets.clear(); + self.textures.clear(); + self.buffers.clear(); } pub(crate) fn extend(&mut self, other: Self) { @@ -242,11 +242,8 @@ impl LifetimeTracker { .staging_buffers .insert(raw.as_info().id(), raw); } - TempResource::Texture(raw, views) => { + TempResource::Texture(raw) => { last_resources.textures.insert(raw.as_info().id(), raw); - views.into_iter().for_each(|v| { - last_resources.texture_views.insert(v.as_info().id(), v); - }); } } } @@ -328,15 +325,6 @@ impl LifetimeTracker { pub fn cleanup(&mut self) { profiling::scope!("LifetimeTracker::cleanup"); - self.free_resources.textures.iter().for_each(|(_, t)| { - if let &mut resource::TextureClearMode::RenderPass { - ref mut clear_views, - .. - } = &mut *t.clear_mode.write() - { - clear_views.clear(); - } - }); self.free_resources.clear(); } @@ -357,10 +345,7 @@ impl LifetimeTracker { TempResource::StagingBuffer(raw) => { resources.staging_buffers.insert(raw.as_info().id(), raw); } - TempResource::Texture(raw, views) => { - views.into_iter().for_each(|v| { - resources.texture_views.insert(v.as_info().id(), v); - }); + TempResource::Texture(raw) => { resources.textures.insert(raw.as_info().id(), raw); } } @@ -400,27 +385,27 @@ impl LifetimeTracker { f(&bundle_id); - for v in bundle.used.buffers.used_resources() { + for v in bundle.used.buffers.write().drain_resources() { self.suspected_resources .buffers .insert(v.as_info().id(), v.clone()); } - for v in bundle.used.textures.used_resources() { + for v in bundle.used.textures.write().drain_resources() { self.suspected_resources .textures .insert(v.as_info().id(), v.clone()); } - for v in bundle.used.bind_groups.used_resources() { + for v in bundle.used.bind_groups.write().drain_resources() { self.suspected_resources .bind_groups .insert(v.as_info().id(), v.clone()); } - for v in bundle.used.render_pipelines.used_resources() { + for v in bundle.used.render_pipelines.write().drain_resources() { self.suspected_resources .render_pipelines .insert(v.as_info().id(), v.clone()); } - for v in bundle.used.query_sets.used_resources() { + for v in bundle.used.query_sets.write().drain_resources() { self.suspected_resources .query_sets .insert(v.as_info().id(), v.clone()); @@ -450,22 +435,22 @@ impl LifetimeTracker { f(&bind_group_id); - for v in bind_group.used.buffers.used_resources() { + for v in bind_group.used.buffers.drain_resources() { self.suspected_resources .buffers .insert(v.as_info().id(), v.clone()); } - for v in bind_group.used.textures.used_resources() { + for v in bind_group.used.textures.drain_resources() { self.suspected_resources .textures .insert(v.as_info().id(), v.clone()); } - for v in bind_group.used.views.used_resources() { + for v in bind_group.used.views.drain_resources() { self.suspected_resources .texture_views .insert(v.as_info().id(), v.clone()); } - for v in bind_group.used.samplers.used_resources() { + for v in bind_group.used.samplers.drain_resources() { self.suspected_resources .samplers .insert(v.as_info().id(), v.clone()); @@ -510,10 +495,13 @@ impl LifetimeTracker { f(&view_id); - if let Some(parent_texture) = view.parent.as_ref() { - self.suspected_resources - .textures - .insert(parent_texture.as_info().id(), parent_texture.clone()); + { + let mut lock = view.parent.write(); + if let Some(parent_texture) = lock.take() { + self.suspected_resources + .textures + .insert(parent_texture.as_info().id(), parent_texture); + } } let submit_index = view.info.submission_index(); if !submit_indices.contains(&submit_index) { @@ -555,18 +543,6 @@ impl LifetimeTracker { .iter_mut() .find(|a| a.index == submit_index) .map_or(&mut self.free_resources, |a| &mut a.last_resources); - - if let &mut resource::TextureClearMode::RenderPass { - ref mut clear_views, - .. - } = &mut *texture.clear_mode.write() - { - clear_views.into_iter().for_each(|v| { - non_referenced_resources - .texture_views - .insert(v.as_info().id(), v.clone()); - }); - } non_referenced_resources .textures .insert(texture_id, texture.clone()); @@ -893,18 +869,13 @@ impl LifetimeTracker { t.add(trace::Action::DestroyBindGroupLayout(*_id)); } }); + self.triage_suspected_query_sets(hub, trackers); self.triage_suspected_samplers(hub, trackers, |_id| { #[cfg(feature = "trace")] if let Some(ref mut t) = trace { t.add(trace::Action::DestroySampler(*_id)); } }); - self.triage_suspected_buffers(hub, trackers, |_id| { - #[cfg(feature = "trace")] - if let Some(ref mut t) = trace { - t.add(trace::Action::DestroyBuffer(*_id)); - } - }); self.triage_suspected_texture_views(hub, trackers, |_id| { #[cfg(feature = "trace")] if let Some(ref mut t) = trace { @@ -917,7 +888,12 @@ impl LifetimeTracker { t.add(trace::Action::DestroyTexture(*_id)); } }); - self.triage_suspected_query_sets(hub, trackers); + self.triage_suspected_buffers(hub, trackers, |_id| { + #[cfg(feature = "trace")] + if let Some(ref mut t) = trace { + t.add(trace::Action::DestroyBuffer(*_id)); + } + }); } /// Determine which buffers are ready to map, and which must wait for the diff --git a/wgpu-core/src/device/queue.rs b/wgpu-core/src/device/queue.rs index a0aa14114b..2247933acc 100644 --- a/wgpu-core/src/device/queue.rs +++ b/wgpu-core/src/device/queue.rs @@ -15,14 +15,14 @@ use crate::{ init_tracker::{has_copy_partial_init_tracker_coverage, TextureInitRange}, resource::{ Buffer, BufferAccessError, BufferMapState, Resource, ResourceInfo, StagingBuffer, Texture, - TextureInner, TextureView, + TextureInner, }, track, FastHashMap, SubmissionIndex, }; use hal::{CommandEncoder as _, Device as _, Queue as _}; use parking_lot::Mutex; -use smallvec::SmallVec; + use std::{ iter, mem, ptr, sync::{atomic::Ordering, Arc}, @@ -161,7 +161,7 @@ pub struct WrappedSubmissionIndex { pub enum TempResource { Buffer(Arc>), StagingBuffer(Arc>), - Texture(Arc>, SmallVec<[Arc>; 1]>), + Texture(Arc>), } /// A queue execution for a particular command encoder. @@ -1236,7 +1236,7 @@ impl Global { if should_extend { unsafe { used_surface_textures - .merge_single(texture, None, hal::TextureUses::PRESENT) + .merge_single(&texture, None, hal::TextureUses::PRESENT) .unwrap(); }; } @@ -1310,11 +1310,12 @@ impl Global { // We need to update the submission indices for the contained // state-less (!) resources as well, excluding the bind groups. // They don't get deleted too early if the bundle goes out of scope. - for render_pipeline in bundle.used.render_pipelines.used_resources() + for render_pipeline in + bundle.used.render_pipelines.read().used_resources() { render_pipeline.info.use_at(submit_index); } - for query_set in bundle.used.query_sets.used_resources() { + for query_set in bundle.used.query_sets.read().used_resources() { query_set.info.use_at(submit_index); } if bundle.is_unique() { diff --git a/wgpu-core/src/device/resource.rs b/wgpu-core/src/device/resource.rs index de2a7c966d..67a258b0b8 100644 --- a/wgpu-core/src/device/resource.rs +++ b/wgpu-core/src/device/resource.rs @@ -37,6 +37,7 @@ use arrayvec::ArrayVec; use hal::{CommandEncoder as _, Device as _}; use parking_lot::{Mutex, MutexGuard, RwLock}; +use smallvec::SmallVec; use thiserror::Error; use wgt::{TextureFormat, TextureSampleType, TextureViewDimension}; @@ -750,7 +751,50 @@ impl Device { .map_err(DeviceError::from)? }; - let clear_mode = resource::TextureClearMode::BufferCopy; + let clear_mode = if hal_usage + .intersects(hal::TextureUses::DEPTH_STENCIL_WRITE | hal::TextureUses::COLOR_TARGET) + { + let (is_color, usage) = if desc.format.is_depth_stencil_format() { + (false, hal::TextureUses::DEPTH_STENCIL_WRITE) + } else { + (true, hal::TextureUses::COLOR_TARGET) + }; + let dimension = match desc.dimension { + wgt::TextureDimension::D1 => wgt::TextureViewDimension::D1, + wgt::TextureDimension::D2 => wgt::TextureViewDimension::D2, + wgt::TextureDimension::D3 => unreachable!(), + }; + + let mut clear_views = SmallVec::new(); + for mip_level in 0..desc.mip_level_count { + for array_layer in 0..desc.size.depth_or_array_layers { + let desc = hal::TextureViewDescriptor { + label: Some("(wgpu internal) clear texture view"), + format: desc.format, + dimension, + usage, + range: wgt::ImageSubresourceRange { + aspect: wgt::TextureAspect::All, + base_mip_level: mip_level, + mip_level_count: Some(1), + base_array_layer: array_layer, + array_layer_count: Some(1), + }, + }; + clear_views.push(Some( + unsafe { self.raw().create_texture_view(&raw_texture, &desc) } + .map_err(DeviceError::from)?, + )); + } + } + resource::TextureClearMode::RenderPass { + clear_views, + is_color, + } + } else { + resource::TextureClearMode::BufferCopy + }; + let mut texture = self.create_texture_from_hal( raw_texture, hal_usage, @@ -1049,7 +1093,7 @@ impl Device { Ok(TextureView { raw: Some(raw), - parent: Some(texture.clone()), + parent: RwLock::new(Some(texture.clone())), device: self.clone(), desc: resource::HalTextureViewDescriptor { format: resolved_format, @@ -1752,13 +1796,14 @@ impl Device { used: &mut BindGroupStates, used_texture_ranges: &mut Vec>, ) -> Result<(), binding_model::CreateBindGroupError> { - let texture_id = view.parent.as_ref().unwrap().as_info().id(); + let texture = view.parent.read(); + let texture_id = texture.as_ref().unwrap().as_info().id(); // Careful here: the texture may no longer have its own ref count, // if it was deleted by the user. let texture = used .textures .add_single( - view.parent.as_ref().unwrap(), + texture.as_ref().unwrap(), Some(view.selector.clone()), internal_use, ) diff --git a/wgpu-core/src/resource.rs b/wgpu-core/src/resource.rs index 5fcaff1c90..e005bf7da3 100644 --- a/wgpu-core/src/resource.rs +++ b/wgpu-core/src/resource.rs @@ -702,7 +702,7 @@ pub enum TextureClearMode { BufferCopy, // View for clear via RenderPass for every subsurface (mip/layer/slice) RenderPass { - clear_views: SmallVec<[Arc>; 1]>, + clear_views: SmallVec<[Option; 1]>, is_color: bool, }, Surface { @@ -746,7 +746,13 @@ impl Drop for Texture { ref mut clear_views, .. } => { - clear_views.clear(); + clear_views.iter_mut().for_each(|clear_view| { + if let Some(view) = clear_view.take() { + unsafe { + self.device.raw().destroy_texture_view(view); + } + } + }); } _ => {} }; @@ -793,7 +799,7 @@ impl Texture { } else { mip_level * desc.size.depth_or_array_layers } + depth_or_layer; - clear_views[index as usize].raw() + clear_views[index as usize].as_ref().unwrap() } } } @@ -1024,7 +1030,7 @@ pub enum TextureViewNotRenderableReason { pub struct TextureView { pub(crate) raw: Option, // if it's a surface texture - it's none - pub(crate) parent: Option>>, + pub(crate) parent: RwLock>>>, pub(crate) device: Arc>, //TODO: store device_id for quick access? pub(crate) desc: HalTextureViewDescriptor, diff --git a/wgpu-core/src/track/buffer.rs b/wgpu-core/src/track/buffer.rs index a5c972283f..992b083ba1 100644 --- a/wgpu-core/src/track/buffer.rs +++ b/wgpu-core/src/track/buffer.rs @@ -19,6 +19,7 @@ use crate::{ }, }; use hal::{BufferBarrier, BufferUses}; +use parking_lot::Mutex; use wgt::{strict_assert, strict_assert_eq}; impl ResourceUses for BufferUses { @@ -43,14 +44,14 @@ impl ResourceUses for BufferUses { /// Stores all the buffers that a bind group stores. #[derive(Debug)] pub(crate) struct BufferBindGroupState { - buffers: Vec<(Arc>, BufferUses)>, + buffers: Mutex>, BufferUses)>>, _phantom: PhantomData, } impl BufferBindGroupState { pub fn new() -> Self { Self { - buffers: Vec::new(), + buffers: Mutex::new(Vec::new()), _phantom: PhantomData, } @@ -61,33 +62,43 @@ impl BufferBindGroupState { /// When this list of states is merged into a tracker, the memory /// accesses will be in a constant assending order. #[allow(clippy::pattern_type_mismatch)] - pub(crate) fn optimize(&mut self) { - self.buffers - .sort_unstable_by_key(|(b, _)| b.as_info().id().unzip().0); + pub(crate) fn optimize(&self) { + let mut buffers = self.buffers.lock(); + buffers.sort_unstable_by_key(|(b, _)| b.as_info().id().unzip().0); } /// Returns a list of all buffers tracked. May contain duplicates. #[allow(clippy::pattern_type_mismatch)] pub fn used_ids(&self) -> impl Iterator + '_ { - self.buffers.iter().map(|(ref b, _)| b.as_info().id()) + let buffers = self.buffers.lock(); + buffers + .iter() + .map(|(ref b, _)| b.as_info().id()) + .collect::>() + .into_iter() } /// Returns a list of all buffers tracked. May contain duplicates. - #[allow(clippy::pattern_type_mismatch)] - pub fn used_resources(&self) -> impl Iterator>> + '_ { - self.buffers.iter().map(|(ref buffer, _u)| buffer) + pub fn drain_resources(&self) -> impl Iterator>> + '_ { + let mut buffers = self.buffers.lock(); + buffers + .drain(..) + .map(|(buffer, _u)| buffer) + .collect::>() + .into_iter() } /// Adds the given resource with the given state. pub fn add_single<'a>( - &mut self, + &self, storage: &'a Storage, BufferId>, id: BufferId, state: BufferUses, ) -> Option<&'a Arc>> { let buffer = storage.get(id).ok()?; - self.buffers.push((buffer.clone(), state)); + let mut buffers = self.buffers.lock(); + buffers.push((buffer.clone(), state)); Some(buffer) } @@ -131,9 +142,11 @@ impl BufferUsageScope { } } - /// Returns a list of all buffers tracked. - pub fn used_resources(&self) -> impl Iterator>> + '_ { - self.metadata.owned_resources() + /// Drains all buffers tracked. + pub fn drain_resources(&mut self) -> impl Iterator>> + '_ { + let resources = self.metadata.drain_resources(); + self.state.clear(); + resources.into_iter() } pub fn get(&self, id: BufferId) -> Option<&Arc>> { @@ -166,7 +179,8 @@ impl BufferUsageScope { &mut self, bind_group: &BufferBindGroupState, ) -> Result<(), UsageConflict> { - for &(ref resource, state) in &bind_group.buffers { + let buffers = bind_group.buffers.lock(); + for &(ref resource, state) in &*buffers { let index = resource.as_info().id().unzip().0 as usize; unsafe { @@ -314,7 +328,7 @@ impl BufferTracker { } /// Returns a list of all buffers tracked. - pub fn used_resources(&self) -> impl Iterator>> + '_ { + pub fn used_resources(&self) -> impl Iterator>> + '_ { self.metadata.owned_resources() } diff --git a/wgpu-core/src/track/metadata.rs b/wgpu-core/src/track/metadata.rs index 9d47aeccac..8001776e8c 100644 --- a/wgpu-core/src/track/metadata.rs +++ b/wgpu-core/src/track/metadata.rs @@ -133,16 +133,31 @@ impl> ResourceMetadata { } /// Returns an iterator over the resources owned by `self`. - pub(super) fn owned_resources(&self) -> impl Iterator> + '_ { + pub(super) fn owned_resources(&self) -> impl Iterator> + '_ { if !self.owned.is_empty() { self.tracker_assert_in_bounds(self.owned.len() - 1) }; iterate_bitvec_indices(&self.owned).map(move |index| { let resource = unsafe { self.resources.get_unchecked(index) }; - resource.as_ref().unwrap() + resource.as_ref().unwrap().clone() }) } + /// Returns an iterator over the resources owned by `self`. + pub(super) fn drain_resources(&mut self) -> Vec> { + if !self.owned.is_empty() { + self.tracker_assert_in_bounds(self.owned.len() - 1) + }; + let mut resources = Vec::new(); + iterate_bitvec_indices(&self.owned).for_each(|index| { + let resource = unsafe { self.resources.get_unchecked(index) }; + resources.push(resource.as_ref().unwrap().clone()); + }); + self.owned.clear(); + self.resources.clear(); + resources + } + /// Returns an iterator over the indices of all resources owned by `self`. pub(super) fn owned_indices(&self) -> impl Iterator + '_ { if !self.owned.is_empty() { diff --git a/wgpu-core/src/track/mod.rs b/wgpu-core/src/track/mod.rs index 7d85712596..b5c3861ebb 100644 --- a/wgpu-core/src/track/mod.rs +++ b/wgpu-core/src/track/mod.rs @@ -109,6 +109,7 @@ use crate::{ storage::Storage, }; +use parking_lot::RwLock; use std::{fmt, ops}; use thiserror::Error; @@ -353,12 +354,13 @@ impl BindGroupStates { /// and need to be owned by the render bundles. #[derive(Debug)] pub(crate) struct RenderBundleScope { - pub buffers: BufferUsageScope, - pub textures: TextureUsageScope, + pub buffers: RwLock>, + pub textures: RwLock>, // Don't need to track views and samplers, they are never used directly, only by bind groups. - pub bind_groups: StatelessTracker>, - pub render_pipelines: StatelessTracker>, - pub query_sets: StatelessTracker>, + pub bind_groups: RwLock>>, + pub render_pipelines: + RwLock>>, + pub query_sets: RwLock>>, } impl RenderBundleScope { @@ -370,19 +372,22 @@ impl RenderBundleScope { render_pipelines: &Storage, id::RenderPipelineId>, query_sets: &Storage, id::QuerySetId>, ) -> Self { - let mut value = Self { - buffers: BufferUsageScope::new(), - textures: TextureUsageScope::new(), - bind_groups: StatelessTracker::new(), - render_pipelines: StatelessTracker::new(), - query_sets: StatelessTracker::new(), + let value = Self { + buffers: RwLock::new(BufferUsageScope::new()), + textures: RwLock::new(TextureUsageScope::new()), + bind_groups: RwLock::new(StatelessTracker::new()), + render_pipelines: RwLock::new(StatelessTracker::new()), + query_sets: RwLock::new(StatelessTracker::new()), }; - value.buffers.set_size(buffers.len()); - value.textures.set_size(textures.len()); - value.bind_groups.set_size(bind_groups.len()); - value.render_pipelines.set_size(render_pipelines.len()); - value.query_sets.set_size(query_sets.len()); + value.buffers.write().set_size(buffers.len()); + value.textures.write().set_size(textures.len()); + value.bind_groups.write().set_size(bind_groups.len()); + value + .render_pipelines + .write() + .set_size(render_pipelines.len()); + value.query_sets.write().set_size(query_sets.len()); value } @@ -400,8 +405,12 @@ impl RenderBundleScope { &mut self, bind_group: &BindGroupStates, ) -> Result<(), UsageConflict> { - unsafe { self.buffers.merge_bind_group(&bind_group.buffers)? }; - unsafe { self.textures.merge_bind_group(&bind_group.textures)? }; + unsafe { self.buffers.write().merge_bind_group(&bind_group.buffers)? }; + unsafe { + self.textures + .write() + .merge_bind_group(&bind_group.textures)? + }; Ok(()) } @@ -466,8 +475,10 @@ impl UsageScope { &mut self, render_bundle: &RenderBundleScope, ) -> Result<(), UsageConflict> { - self.buffers.merge_usage_scope(&render_bundle.buffers)?; - self.textures.merge_usage_scope(&render_bundle.textures)?; + self.buffers + .merge_usage_scope(&*render_bundle.buffers.read())?; + self.textures + .merge_usage_scope(&*render_bundle.textures.read())?; Ok(()) } @@ -594,10 +605,11 @@ impl Tracker { render_bundle: &RenderBundleScope, ) -> Result<(), UsageConflict> { self.bind_groups - .add_from_tracker(&render_bundle.bind_groups); + .add_from_tracker(&*render_bundle.bind_groups.read()); self.render_pipelines - .add_from_tracker(&render_bundle.render_pipelines); - self.query_sets.add_from_tracker(&render_bundle.query_sets); + .add_from_tracker(&*render_bundle.render_pipelines.read()); + self.query_sets + .add_from_tracker(&*render_bundle.query_sets.read()); Ok(()) } diff --git a/wgpu-core/src/track/stateless.rs b/wgpu-core/src/track/stateless.rs index 3ed5e73415..96353767b1 100644 --- a/wgpu-core/src/track/stateless.rs +++ b/wgpu-core/src/track/stateless.rs @@ -6,6 +6,8 @@ use std::{marker::PhantomData, sync::Arc}; +use parking_lot::Mutex; + use crate::{ hal_api::HalApi, id::TypedId, resource::Resource, storage::Storage, track::ResourceMetadata, }; @@ -13,13 +15,13 @@ use crate::{ /// Stores all the resources that a bind group stores. #[derive(Debug)] pub(crate) struct StatelessBindGroupSate> { - resources: Vec<(Id, Arc)>, + resources: Mutex)>>, } impl> StatelessBindGroupSate { pub fn new() -> Self { Self { - resources: Vec::new(), + resources: Mutex::new(Vec::new()), } } @@ -27,20 +29,37 @@ impl> StatelessBindGroupSate { /// /// When this list of states is merged into a tracker, the memory /// accesses will be in a constant assending order. - pub(crate) fn optimize(&mut self) { - self.resources.sort_unstable_by_key(|&(id, _)| id.unzip().0); + pub(crate) fn optimize(&self) { + let mut resources = self.resources.lock(); + resources.sort_unstable_by_key(|&(id, _)| id.unzip().0); } /// Returns a list of all resources tracked. May contain duplicates. - pub fn used_resources(&self) -> impl Iterator> + '_ { - self.resources.iter().map(|&(_, ref resource)| resource) + pub fn used_resources(&self) -> impl Iterator> + '_ { + let resources = self.resources.lock(); + resources + .iter() + .map(|&(_, ref resource)| resource.clone()) + .collect::>() + .into_iter() + } + + /// Returns a list of all resources tracked. May contain duplicates. + pub fn drain_resources(&self) -> impl Iterator> + '_ { + let mut resources = self.resources.lock(); + resources + .drain(..) + .map(|(_, r)| r) + .collect::>() + .into_iter() } /// Adds the given resource. - pub fn add_single<'a>(&mut self, storage: &'a Storage, id: Id) -> Option<&'a T> { + pub fn add_single<'a>(&self, storage: &'a Storage, id: Id) -> Option<&'a T> { let resource = storage.get(id).ok()?; - self.resources.push((id, resource.clone())); + let mut resources = self.resources.lock(); + resources.push((id, resource.clone())); Some(resource) } @@ -50,7 +69,6 @@ impl> StatelessBindGroupSate { #[derive(Debug)] pub(crate) struct StatelessTracker> { metadata: ResourceMetadata, - _phantom: PhantomData, } @@ -58,7 +76,6 @@ impl> StatelessTracker { pub fn new() -> Self { Self { metadata: ResourceMetadata::new(), - _phantom: PhantomData, } } @@ -83,10 +100,16 @@ impl> StatelessTracker { } /// Returns a list of all resources tracked. - pub fn used_resources(&self) -> impl Iterator> + '_ { + pub fn used_resources(&self) -> impl Iterator> + '_ { self.metadata.owned_resources() } + /// Returns a list of all resources tracked. + pub fn drain_resources(&mut self) -> impl Iterator> + '_ { + let resources = self.metadata.drain_resources(); + resources.into_iter() + } + /// Inserts a single resource into the resource tracker. /// /// If the resource already exists in the tracker, it will be overwritten. diff --git a/wgpu-core/src/track/texture.rs b/wgpu-core/src/track/texture.rs index 146d0970cd..fe8598feee 100644 --- a/wgpu-core/src/track/texture.rs +++ b/wgpu-core/src/track/texture.rs @@ -34,7 +34,7 @@ use hal::TextureUses; use arrayvec::ArrayVec; use naga::FastHashMap; -use parking_lot::RwLockReadGuard; +use parking_lot::{Mutex, RwLockReadGuard}; use wgt::{strict_assert, strict_assert_eq}; use std::{borrow::Cow, iter, marker::PhantomData, ops::Range, sync::Arc, vec::Drain}; @@ -158,12 +158,12 @@ struct TextureBindGroupStateData { /// Stores all the textures that a bind group stores. #[derive(Debug)] pub(crate) struct TextureBindGroupState { - textures: Vec>, + textures: Mutex>>, } impl TextureBindGroupState { pub fn new() -> Self { Self { - textures: Vec::new(), + textures: Mutex::new(Vec::new()), } } @@ -171,24 +171,30 @@ impl TextureBindGroupState { /// /// When this list of states is merged into a tracker, the memory /// accesses will be in a constant assending order. - pub(crate) fn optimize(&mut self) { - self.textures - .sort_unstable_by_key(|v| v.texture.as_info().id().unzip().0); + pub(crate) fn optimize(&self) { + let mut textures = self.textures.lock(); + textures.sort_unstable_by_key(|v| v.texture.as_info().id().unzip().0); } /// Returns a list of all textures tracked. May contain duplicates. - pub fn used_resources(&self) -> impl Iterator>> + '_ { - self.textures.iter().map(|v| &v.texture) + pub fn drain_resources(&self) -> impl Iterator>> + '_ { + let mut textures = self.textures.lock(); + textures + .drain(..) + .map(|v| v.texture) + .collect::>() + .into_iter() } /// Adds the given resource with the given state. pub fn add_single<'a>( - &mut self, + &self, texture: &'a Arc>, selector: Option, state: TextureUses, ) -> Option<&'a Arc>> { - self.textures.push(TextureBindGroupStateData { + let mut textures = self.textures.lock(); + textures.push(TextureBindGroupStateData { selector, texture: texture.clone(), usage: state, @@ -211,6 +217,11 @@ impl TextureStateSet { } } + fn clear(&mut self) { + self.simple.clear(); + self.complex.clear(); + } + fn set_size(&mut self, size: usize) { self.simple.resize(size, TextureUses::UNINITIALIZED); } @@ -220,7 +231,6 @@ impl TextureStateSet { #[derive(Debug)] pub(crate) struct TextureUsageScope { set: TextureStateSet, - metadata: ResourceMetadata>, } @@ -256,9 +266,11 @@ impl TextureUsageScope { self.metadata.set_size(size); } - /// Returns a list of all textures tracked. - pub(crate) fn used_resources(&self) -> impl Iterator>> + '_ { - self.metadata.owned_resources() + /// Drains all textures tracked. + pub(crate) fn drain_resources(&mut self) -> impl Iterator>> + '_ { + let resources = self.metadata.drain_resources(); + self.set.clear(); + resources.into_iter() } /// Returns true if the tracker owns no resources. @@ -320,7 +332,8 @@ impl TextureUsageScope { &mut self, bind_group: &TextureBindGroupState, ) -> Result<(), UsageConflict> { - for t in &bind_group.textures { + let textures = bind_group.textures.lock(); + for t in &*textures { unsafe { self.merge_single(&t.texture, t.selector.clone(), t.usage)? }; } @@ -434,7 +447,7 @@ impl TextureTracker { } /// Returns a list of all textures tracked. - pub fn used_resources(&self) -> impl Iterator>> + '_ { + pub fn used_resources(&self) -> impl Iterator>> + '_ { self.metadata.owned_resources() } @@ -638,7 +651,8 @@ impl TextureTracker { self.set_size(incoming_size); } - for t in bind_group_state.textures.iter() { + let textures = bind_group_state.textures.lock(); + for t in textures.iter() { let index = t.texture.as_info().id().unzip().0 as usize; scope.tracker_assert_in_bounds(index); From ac30622ba4fc2d8c46b0d101de876c3069deda0c Mon Sep 17 00:00:00 2001 From: gents83 Date: Fri, 22 Sep 2023 08:39:04 +0200 Subject: [PATCH 107/132] Fix test --- tests/tests/mem_leaks.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/tests/mem_leaks.rs b/tests/tests/mem_leaks.rs index af7eb1766c..4e025cdb79 100644 --- a/tests/tests/mem_leaks.rs +++ b/tests/tests/mem_leaks.rs @@ -238,13 +238,15 @@ fn draw_test_with_reports( assert_eq!(report.textures.num_allocated, 1); let submit_index = ctx.queue.submit(Some(encoder.finish())); + ctx.device + .poll(wgpu::Maintain::WaitForSubmissionIndex(submit_index)); let global_report = ctx.instance.generate_report(); let report = global_report.hub_report(); assert_eq!(report.command_buffers.num_allocated, 0); ctx.device - .poll(wgpu::Maintain::WaitForSubmissionIndex(submit_index)); + .poll(wgpu::Maintain::Wait); let global_report = ctx.instance.generate_report(); let report = global_report.hub_report(); From 4e682584bc7c60d4c2a88c832a8c8cd74057ea3b Mon Sep 17 00:00:00 2001 From: gents83 Date: Fri, 22 Sep 2023 09:20:08 +0200 Subject: [PATCH 108/132] Fix format --- tests/tests/mem_leaks.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tests/tests/mem_leaks.rs b/tests/tests/mem_leaks.rs index 4e025cdb79..56109faf5c 100644 --- a/tests/tests/mem_leaks.rs +++ b/tests/tests/mem_leaks.rs @@ -245,8 +245,7 @@ fn draw_test_with_reports( let report = global_report.hub_report(); assert_eq!(report.command_buffers.num_allocated, 0); - ctx.device - .poll(wgpu::Maintain::Wait); + ctx.device.poll(wgpu::Maintain::Wait); let global_report = ctx.instance.generate_report(); let report = global_report.hub_report(); From 0eac2052b626fc7e9abbf1685b74f7a1fd46750a Mon Sep 17 00:00:00 2001 From: gents83 Date: Sat, 23 Sep 2023 02:27:49 +0200 Subject: [PATCH 109/132] Improving tracker management --- tests/tests/mem_leaks.rs | 5 +- wgpu-core/src/binding_model.rs | 8 +- wgpu-core/src/command/bundle.rs | 4 +- wgpu-core/src/command/mod.rs | 4 +- wgpu-core/src/device/global.rs | 13 - wgpu-core/src/device/life.rs | 943 +++++++++++++++---------------- wgpu-core/src/device/queue.rs | 28 +- wgpu-core/src/device/resource.rs | 46 +- wgpu-core/src/hal_api.rs | 4 +- wgpu-core/src/id.rs | 61 +- wgpu-core/src/instance.rs | 6 +- wgpu-core/src/pipeline.rs | 8 +- wgpu-core/src/resource.rs | 19 +- wgpu-core/src/track/buffer.rs | 111 ++-- wgpu-core/src/track/mod.rs | 8 + wgpu-core/src/track/stateless.rs | 86 +-- wgpu-core/src/track/texture.rs | 87 +-- wgpu/src/backend/direct.rs | 10 +- 18 files changed, 732 insertions(+), 719 deletions(-) diff --git a/tests/tests/mem_leaks.rs b/tests/tests/mem_leaks.rs index 56109faf5c..af7eb1766c 100644 --- a/tests/tests/mem_leaks.rs +++ b/tests/tests/mem_leaks.rs @@ -238,14 +238,13 @@ fn draw_test_with_reports( assert_eq!(report.textures.num_allocated, 1); let submit_index = ctx.queue.submit(Some(encoder.finish())); - ctx.device - .poll(wgpu::Maintain::WaitForSubmissionIndex(submit_index)); let global_report = ctx.instance.generate_report(); let report = global_report.hub_report(); assert_eq!(report.command_buffers.num_allocated, 0); - ctx.device.poll(wgpu::Maintain::Wait); + ctx.device + .poll(wgpu::Maintain::WaitForSubmissionIndex(submit_index)); let global_report = ctx.instance.generate_report(); let report = global_report.hub_report(); diff --git a/wgpu-core/src/binding_model.rs b/wgpu-core/src/binding_model.rs index e4d389703e..2a4e3084a7 100644 --- a/wgpu-core/src/binding_model.rs +++ b/wgpu-core/src/binding_model.rs @@ -7,7 +7,7 @@ use crate::{ TextureViewId, }, init_tracker::{BufferInitTrackerAction, TextureInitTrackerAction}, - resource::{Resource, ResourceInfo}, + resource::{Resource, ResourceInfo, ResourceType}, track::{BindGroupStates, UsageConflict}, validation::{MissingBufferUsageError, MissingTextureUsageError}, FastHashMap, Label, @@ -481,7 +481,7 @@ impl Drop for BindGroupLayout { } impl Resource for BindGroupLayout { - const TYPE: &'static str = "BindGroupLayout"; + const TYPE: ResourceType = "BindGroupLayout"; fn as_info(&self) -> &ResourceInfo { &self.info @@ -730,7 +730,7 @@ impl PipelineLayout { } impl Resource for PipelineLayout { - const TYPE: &'static str = "PipelineLayout"; + const TYPE: ResourceType = "PipelineLayout"; fn as_info(&self) -> &ResourceInfo { &self.info @@ -921,7 +921,7 @@ impl BindGroup { } impl Resource for BindGroup { - const TYPE: &'static str = "BindGroup"; + const TYPE: ResourceType = "BindGroup"; fn as_info(&self) -> &ResourceInfo { &self.info diff --git a/wgpu-core/src/command/bundle.rs b/wgpu-core/src/command/bundle.rs index 4bc47c7414..e476a5ecf8 100644 --- a/wgpu-core/src/command/bundle.rs +++ b/wgpu-core/src/command/bundle.rs @@ -95,7 +95,7 @@ use crate::{ id::{self, RenderBundleId}, init_tracker::{BufferInitTrackerAction, MemoryInitKind, TextureInitTrackerAction}, pipeline::{self, PipelineFlags, RenderPipeline}, - resource::{Resource, ResourceInfo}, + resource::{Resource, ResourceInfo, ResourceType}, track::RenderBundleScope, validation::check_buffer_usage, Label, LabelHelpers, @@ -952,7 +952,7 @@ impl RenderBundle { } impl Resource for RenderBundle { - const TYPE: &'static str = "RenderBundle"; + const TYPE: ResourceType = "RenderBundle"; fn as_info(&self) -> &ResourceInfo { &self.info diff --git a/wgpu-core/src/command/mod.rs b/wgpu-core/src/command/mod.rs index 672e81bb90..0f40478d6e 100644 --- a/wgpu-core/src/command/mod.rs +++ b/wgpu-core/src/command/mod.rs @@ -24,7 +24,7 @@ use crate::hub::Hub; use crate::id::CommandBufferId; use crate::init_tracker::BufferInitTrackerAction; -use crate::resource::{Resource, ResourceInfo}; +use crate::resource::{Resource, ResourceInfo, ResourceType}; use crate::track::{Tracker, UsageScope}; use crate::{global::Global, hal_api::HalApi, id, identity::GlobalIdentityHandlerFactory, Label}; @@ -278,7 +278,7 @@ impl CommandBuffer { } impl Resource for CommandBuffer { - const TYPE: &'static str = "CommandBuffer"; + const TYPE: ResourceType = "CommandBuffer"; fn as_info(&self) -> &ResourceInfo { &self.info diff --git a/wgpu-core/src/device/global.rs b/wgpu-core/src/device/global.rs index 8a469a40be..8c90b141a7 100644 --- a/wgpu-core/src/device/global.rs +++ b/wgpu-core/src/device/global.rs @@ -486,7 +486,6 @@ impl Global { device .lock_life() .suspected_resources - .buffers .insert(buffer_id, buffer); } @@ -731,7 +730,6 @@ impl Global { } else { life_lock .suspected_resources - .textures .insert(texture_id, texture.clone()); } } @@ -807,7 +805,6 @@ impl Global { view.device .lock_life() .suspected_resources - .texture_views .insert(texture_view_id, view.clone()); if wait { @@ -876,7 +873,6 @@ impl Global { .device .lock_life() .suspected_resources - .samplers .insert(sampler_id, sampler.clone()); } } @@ -965,7 +961,6 @@ impl Global { .device .lock_life() .suspected_resources - .bind_group_layouts .insert(bind_group_layout_id, layout.clone()); } } @@ -1028,7 +1023,6 @@ impl Global { .device .lock_life() .suspected_resources - .pipeline_layouts .insert(pipeline_layout_id, layout.clone()); } } @@ -1099,7 +1093,6 @@ impl Global { .device .lock_life() .suspected_resources - .bind_groups .insert(bind_group_id, bind_group.clone()); } } @@ -1376,7 +1369,6 @@ impl Global { .device .lock_life() .suspected_resources - .render_bundles .insert(render_bundle_id, bundle.clone()); } } @@ -1442,7 +1434,6 @@ impl Global { device .lock_life() .suspected_resources - .query_sets .insert(query_set_id, query_set.clone()); } } @@ -1558,12 +1549,10 @@ impl Global { let mut life_lock = device.lock_life(); life_lock .suspected_resources - .render_pipelines .insert(render_pipeline_id, pipeline.clone()); life_lock .suspected_resources - .pipeline_layouts .insert(layout_id, pipeline.layout.clone()); } } @@ -1676,11 +1665,9 @@ impl Global { let mut life_lock = device.lock_life(); life_lock .suspected_resources - .compute_pipelines .insert(compute_pipeline_id, pipeline.clone()); life_lock .suspected_resources - .pipeline_layouts .insert(layout_id, pipeline.layout.clone()); } } diff --git a/wgpu-core/src/device/life.rs b/wgpu-core/src/device/life.rs index 12f56d1ceb..bbcb0eb643 100644 --- a/wgpu-core/src/device/life.rs +++ b/wgpu-core/src/device/life.rs @@ -9,79 +9,135 @@ use crate::{ }, hal_api::HalApi, hub::Hub, - id::{self}, + id::{ + self, BindGroupId, BindGroupLayoutId, BufferId, ComputePipelineId, PipelineLayoutId, + QuerySetId, RenderBundleId, RenderPipelineId, SamplerId, StagingBufferId, TextureId, + TextureViewId, + }, pipeline::{ComputePipeline, RenderPipeline}, - resource::{self, Buffer, QuerySet, Resource, Sampler, StagingBuffer, Texture, TextureView}, - track::Tracker, - SubmissionIndex, + registry::Registry, + resource::{ + self, Buffer, QuerySet, Resource, ResourceType, Sampler, StagingBuffer, Texture, + TextureView, + }, + track::{ResourceTracker, Tracker}, + FastHashMap, SubmissionIndex, }; use smallvec::SmallVec; use parking_lot::Mutex; use thiserror::Error; +use wgt::{WasmNotSend, WasmNotSync}; -use std::{collections::HashMap, sync::Arc}; +use std::{any::Any, collections::HashMap, sync::Arc}; -/// A struct that keeps lists of resources that are no longer needed by the user. -pub(crate) struct ResourceMaps { - pub(crate) buffers: HashMap>>, - pub(crate) staging_buffers: HashMap>>, - pub(crate) textures: HashMap>>, - pub(crate) texture_views: HashMap>>, - pub(crate) samplers: HashMap>>, - pub(crate) bind_groups: HashMap>>, - pub(crate) compute_pipelines: HashMap>>, - pub(crate) render_pipelines: HashMap>>, - pub(crate) bind_group_layouts: HashMap>>, - pub(crate) pipeline_layouts: HashMap>>, - pub(crate) render_bundles: HashMap>>, - pub(crate) query_sets: HashMap>>, +pub(crate) trait ResourceMap: Any + WasmNotSend + WasmNotSync { + fn as_any(&self) -> &dyn Any; + fn as_any_mut(&mut self) -> &mut dyn Any; + fn clear_map(&mut self); + fn extend_map(&mut self, maps: &mut ResourceMaps); } -impl ResourceMaps { - pub(crate) fn new() -> Self { - Self { - buffers: HashMap::new(), - staging_buffers: HashMap::new(), - textures: HashMap::new(), - texture_views: HashMap::new(), - samplers: HashMap::new(), - bind_groups: HashMap::new(), - compute_pipelines: HashMap::new(), - render_pipelines: HashMap::new(), - bind_group_layouts: HashMap::new(), - pipeline_layouts: HashMap::new(), - render_bundles: HashMap::new(), - query_sets: HashMap::new(), +impl ResourceMap for HashMap> +where + Id: id::TypedId, + R: Resource, +{ + fn as_any(&self) -> &dyn Any { + self + } + fn as_any_mut(&mut self) -> &mut dyn Any { + self + } + fn clear_map(&mut self) { + self.clear() + } + fn extend_map(&mut self, r: &mut ResourceMaps) { + if let Some(other) = r.maps.get_mut(R::TYPE) { + if let Some(other) = other.as_any_mut().downcast_mut::() { + self.extend(other.drain()); + } } } +} + +/// A struct that keeps lists of resources that are no longer needed by the user. +#[derive(Default)] +pub(crate) struct ResourceMaps { + pub(crate) maps: FastHashMap>, +} + +impl ResourceMaps { + fn add_type(&mut self) -> &mut Self + where + Id: id::TypedId, + R: Resource, + { + let map = HashMap::>::default(); + self.maps.insert(R::TYPE, Box::new(map)); + self + } + fn map(&self) -> &HashMap> + where + Id: id::TypedId, + R: Resource, + { + let map = self.maps.get(R::TYPE).unwrap(); + let any_map = map.as_ref().as_any(); + let map = any_map.downcast_ref::>>().unwrap(); + map + } + fn map_mut(&mut self) -> &mut HashMap> + where + Id: id::TypedId, + R: Resource, + { + let map = self + .maps + .entry(R::TYPE) + .or_insert_with(|| Box::>>::new(HashMap::default())); + let any_map = map.as_mut().as_any_mut(); + let map = any_map.downcast_mut::>>().unwrap(); + map + } + pub(crate) fn new() -> Self { + let mut maps = Self::default(); + maps.add_type::>(); + maps.add_type::>(); + maps.add_type::>(); + maps.add_type::>(); + maps.add_type::>(); + maps.add_type::>(); + maps.add_type::>(); + maps.add_type::>(); + maps.add_type::>(); + maps.add_type::>(); + maps.add_type::>(); + maps.add_type::>(); + maps + } pub(crate) fn clear(&mut self) { - self.render_bundles.clear(); - self.bind_groups.clear(); - self.compute_pipelines.clear(); - self.render_pipelines.clear(); - self.bind_group_layouts.clear(); - self.pipeline_layouts.clear(); - self.texture_views.clear(); - self.samplers.clear(); - self.staging_buffers.clear(); - self.query_sets.clear(); - self.textures.clear(); - self.buffers.clear(); - } - - pub(crate) fn extend(&mut self, other: Self) { - self.buffers.extend(other.buffers); - self.staging_buffers.extend(other.staging_buffers); - self.textures.extend(other.textures); - self.texture_views.extend(other.texture_views); - self.samplers.extend(other.samplers); - self.bind_groups.extend(other.bind_groups); - self.compute_pipelines.extend(other.compute_pipelines); - self.render_pipelines.extend(other.render_pipelines); - self.bind_group_layouts.extend(other.bind_group_layouts); - self.pipeline_layouts.extend(other.pipeline_layouts); - self.query_sets.extend(other.query_sets); + self.maps.iter_mut().for_each(|(_t, map)| map.clear_map()); + } + pub(crate) fn extend(&mut self, mut other: Self) { + self.maps.iter_mut().for_each(|(_t, map)| { + map.extend_map(&mut other); + }); + } + pub(crate) fn insert(&mut self, id: Id, r: Arc) -> &mut Self + where + Id: id::TypedId, + R: Resource, + { + self.map_mut().insert(id, r); + self + } + pub(crate) fn contains(&mut self, id: &Id) -> bool + where + Id: id::TypedId, + R: Resource, + { + self.map::().contains_key(id) } } @@ -103,7 +159,7 @@ struct ActiveSubmission { /// This includes things like temporary resources and resources that are /// used by submitted commands but have been dropped by the user (meaning that /// this submission is their last reference.) - last_resources: ResourceMaps, + last_resources: ResourceMaps, /// Buffers to be mapped once this submission has completed. mapped: Vec>>, @@ -177,7 +233,7 @@ pub(crate) struct LifetimeTracker { /// Resources whose user handle has died (i.e. drop/destroy has been called) /// and will likely be ready for destruction soon. - pub suspected_resources: ResourceMaps, + pub suspected_resources: ResourceMaps, /// Resources used by queue submissions still in flight. One entry per /// submission, with older submissions appearing before younger. @@ -192,7 +248,7 @@ pub(crate) struct LifetimeTracker { /// These are freed by `LifeTracker::cleanup`, which is called from periodic /// maintenance functions like `Global::device_poll`, and when a device is /// destroyed. - free_resources: ResourceMaps, + free_resources: ResourceMaps, /// Buffers the user has asked us to map, and which are not used by any /// queue submission still in flight. @@ -211,9 +267,9 @@ impl LifetimeTracker { mapped: Vec::new(), future_suspected_buffers: Vec::new(), future_suspected_textures: Vec::new(), - suspected_resources: ResourceMaps::new(), + suspected_resources: ResourceMaps::new::(), active: Vec::new(), - free_resources: ResourceMaps::new(), + free_resources: ResourceMaps::new::(), ready_to_map: Vec::new(), work_done_closures: SmallVec::new(), } @@ -231,19 +287,17 @@ impl LifetimeTracker { temp_resources: impl Iterator>, encoders: Vec>, ) { - let mut last_resources = ResourceMaps::new(); + let mut last_resources = ResourceMaps::new::(); for res in temp_resources { match res { TempResource::Buffer(raw) => { - last_resources.buffers.insert(raw.as_info().id(), raw); + last_resources.insert(raw.as_info().id(), raw); } TempResource::StagingBuffer(raw) => { - last_resources - .staging_buffers - .insert(raw.as_info().id(), raw); + last_resources.insert(raw.as_info().id(), raw); } TempResource::Texture(raw) => { - last_resources.textures.insert(raw.as_info().id(), raw); + last_resources.insert(raw.as_info().id(), raw); } } } @@ -259,12 +313,10 @@ impl LifetimeTracker { pub fn post_submit(&mut self) { for v in self.future_suspected_buffers.drain(..).take(1) { - self.suspected_resources.buffers.insert(v.as_info().id(), v); + self.suspected_resources.insert(v.as_info().id(), v); } for v in self.future_suspected_textures.drain(..).take(1) { - self.suspected_resources - .textures - .insert(v.as_info().id(), v); + self.suspected_resources.insert(v.as_info().id(), v); } } @@ -340,13 +392,13 @@ impl LifetimeTracker { .map_or(&mut self.free_resources, |a| &mut a.last_resources); match temp_resource { TempResource::Buffer(raw) => { - resources.buffers.insert(raw.as_info().id(), raw); + resources.insert(raw.as_info().id(), raw); } TempResource::StagingBuffer(raw) => { - resources.staging_buffers.insert(raw.as_info().id(), raw); + resources.insert(raw.as_info().id(), raw); } TempResource::Texture(raw) => { - resources.textures.insert(raw.as_info().id(), raw); + resources.insert(raw.as_info().id(), raw); } } } @@ -366,392 +418,339 @@ impl LifetimeTracker { } impl LifetimeTracker { - fn triage_suspected_render_bundles( - &mut self, - hub: &Hub, - trackers: &Mutex>, + fn triage_resources( + resources_map: &mut HashMap>, + active: &mut [ActiveSubmission], + free_resources: &mut ResourceMaps, + trackers: &mut impl ResourceTracker, + registry: &Registry, mut f: F, - ) -> &mut Self + ) -> Vec> where - F: FnMut(&id::RenderBundleId), + Id: id::TypedId, + R: Resource, + F: FnMut(&Id, &Arc), { - self.suspected_resources - .render_bundles - .retain(|&bundle_id, bundle| { - let mut trackers = trackers.lock(); - let is_removed = trackers - .bundles - .remove_abandoned(bundle_id, hub.render_bundles.contains(bundle_id)); + let mut removed_resources = Vec::new(); + resources_map.retain(|&id, resource| { + let submit_index = resource.as_info().submission_index(); + let non_referenced_resources = active + .iter_mut() + .find(|a| a.index == submit_index) + .map_or(&mut *free_resources, |a| &mut a.last_resources); - f(&bundle_id); + let mut count = 1; + count += registry.contains(id) as usize; + count += non_referenced_resources.contains::(&id) as usize; + let is_removed = trackers.remove_abandoned(id, count); + if is_removed { + f(&id, resource); + removed_resources.push(resource.clone()); + non_referenced_resources.insert(id, resource.clone()); + } + !is_removed + }); + removed_resources + } - for v in bundle.used.buffers.write().drain_resources() { - self.suspected_resources - .buffers - .insert(v.as_info().id(), v.clone()); - } - for v in bundle.used.textures.write().drain_resources() { - self.suspected_resources - .textures - .insert(v.as_info().id(), v.clone()); - } - for v in bundle.used.bind_groups.write().drain_resources() { - self.suspected_resources - .bind_groups - .insert(v.as_info().id(), v.clone()); - } - for v in bundle.used.render_pipelines.write().drain_resources() { - self.suspected_resources - .render_pipelines - .insert(v.as_info().id(), v.clone()); - } - for v in bundle.used.query_sets.write().drain_resources() { - self.suspected_resources - .query_sets - .insert(v.as_info().id(), v.clone()); + fn triage_suspected_render_bundles( + &mut self, + hub: &Hub, + trackers: &Mutex>, + #[cfg(feature = "trace")] trace: &mut Option<&mut trace::Trace>, + ) -> &mut Self { + let mut trackers = trackers.lock(); + let resource_map = self.suspected_resources.map_mut(); + let mut removed_resources = Self::triage_resources( + resource_map, + self.active.as_mut_slice(), + &mut self.free_resources, + &mut trackers.bundles, + &hub.render_bundles, + |_bundle_id, _bundle| { + #[cfg(feature = "trace")] + if let Some(ref mut t) = *trace { + t.add(trace::Action::DestroyRenderBundle(*_bundle_id)); } - !is_removed - }); + }, + ); + removed_resources.drain(..).for_each(|bundle| { + for v in bundle.used.buffers.write().drain_resources() { + self.suspected_resources.insert(v.as_info().id(), v.clone()); + } + for v in bundle.used.textures.write().drain_resources() { + self.suspected_resources.insert(v.as_info().id(), v.clone()); + } + for v in bundle.used.bind_groups.write().drain_resources() { + self.suspected_resources.insert(v.as_info().id(), v.clone()); + } + for v in bundle.used.render_pipelines.write().drain_resources() { + self.suspected_resources.insert(v.as_info().id(), v.clone()); + } + for v in bundle.used.query_sets.write().drain_resources() { + self.suspected_resources.insert(v.as_info().id(), v.clone()); + } + }); self } - fn triage_suspected_bind_groups( + fn triage_suspected_bind_groups( &mut self, hub: &Hub, trackers: &Mutex>, - mut f: F, - ) -> Vec - where - F: FnMut(&id::BindGroupId), - { - let mut submit_indices = Vec::new(); - self.suspected_resources - .bind_groups - .retain(|&bind_group_id, bind_group| { - let mut trackers = trackers.lock(); - let is_removed = trackers - .bind_groups - .remove_abandoned(bind_group_id, hub.bind_groups.contains(bind_group_id)); - - f(&bind_group_id); - - for v in bind_group.used.buffers.drain_resources() { - self.suspected_resources - .buffers - .insert(v.as_info().id(), v.clone()); - } - for v in bind_group.used.textures.drain_resources() { - self.suspected_resources - .textures - .insert(v.as_info().id(), v.clone()); - } - for v in bind_group.used.views.drain_resources() { - self.suspected_resources - .texture_views - .insert(v.as_info().id(), v.clone()); + #[cfg(feature = "trace")] trace: &mut Option<&mut trace::Trace>, + ) -> &mut Self { + let mut trackers = trackers.lock(); + let resource_map = self.suspected_resources.map_mut(); + let mut removed_resource = Self::triage_resources( + resource_map, + self.active.as_mut_slice(), + &mut self.free_resources, + &mut trackers.bind_groups, + &hub.bind_groups, + |_bind_group_id, _bind_group| { + #[cfg(feature = "trace")] + if let Some(ref mut t) = *trace { + t.add(trace::Action::DestroyBindGroup(*_bind_group_id)); } - for v in bind_group.used.samplers.drain_resources() { - self.suspected_resources - .samplers - .insert(v.as_info().id(), v.clone()); - } - - self.suspected_resources - .bind_group_layouts - .insert(bind_group.layout.as_info().id(), bind_group.layout.clone()); + }, + ); + removed_resource.drain(..).for_each(|bind_group| { + for v in bind_group.used.buffers.drain_resources() { + self.suspected_resources.insert(v.as_info().id(), v); + } + for v in bind_group.used.textures.drain_resources() { + self.suspected_resources.insert(v.as_info().id(), v); + } + for v in bind_group.used.views.drain_resources() { + self.suspected_resources.insert(v.as_info().id(), v); + } + for v in bind_group.used.samplers.drain_resources() { + self.suspected_resources.insert(v.as_info().id(), v); + } - let submit_index = bind_group.info.submission_index(); - if !submit_indices.contains(&submit_index) { - submit_indices.push(submit_index); - } - self.active - .iter_mut() - .find(|a| a.index == submit_index) - .map_or(&mut self.free_resources, |a| &mut a.last_resources) - .bind_groups - .insert(bind_group_id, bind_group.clone()); - !is_removed - }); - submit_indices + self.suspected_resources + .insert(bind_group.layout.as_info().id(), bind_group.layout.clone()); + }); + self } - fn triage_suspected_texture_views( + fn triage_suspected_texture_views( &mut self, hub: &Hub, trackers: &Mutex>, - mut f: F, - ) -> Vec - where - F: FnMut(&id::TextureViewId), - { - let mut submit_indices = Vec::new(); - self.suspected_resources - .texture_views - .retain(|&view_id, view| { - let mut trackers = trackers.lock(); - let is_removed = trackers - .views - .remove_abandoned(view_id, hub.texture_views.contains(view_id)); - - f(&view_id); - - { - let mut lock = view.parent.write(); - if let Some(parent_texture) = lock.take() { - self.suspected_resources - .textures - .insert(parent_texture.as_info().id(), parent_texture); - } + #[cfg(feature = "trace")] trace: &mut Option<&mut trace::Trace>, + ) -> &mut Self { + let mut trackers = trackers.lock(); + let resource_map = self.suspected_resources.map_mut(); + let mut removed_resources = Self::triage_resources( + resource_map, + self.active.as_mut_slice(), + &mut self.free_resources, + &mut trackers.views, + &hub.texture_views, + |_texture_view_id, _texture_view| { + #[cfg(feature = "trace")] + if let Some(ref mut t) = *trace { + t.add(trace::Action::DestroyTextureView(*_texture_view_id)); } - let submit_index = view.info.submission_index(); - if !submit_indices.contains(&submit_index) { - submit_indices.push(submit_index); - } - self.active - .iter_mut() - .find(|a| a.index == submit_index) - .map_or(&mut self.free_resources, |a| &mut a.last_resources) - .texture_views - .insert(view_id, view.clone()); - !is_removed - }); - submit_indices + }, + ); + removed_resources.drain(..).for_each(|texture_view| { + let mut lock = texture_view.parent.write(); + if let Some(parent_texture) = lock.take() { + self.suspected_resources + .insert(parent_texture.as_info().id(), parent_texture); + } + }); + self } - fn triage_suspected_textures( + fn triage_suspected_textures( &mut self, hub: &Hub, trackers: &Mutex>, - mut f: F, - ) -> &mut Self - where - F: FnMut(&id::TextureId), - { - self.suspected_resources - .textures - .retain(|&texture_id, texture| { - let mut trackers = trackers.lock(); - let is_removed = trackers - .textures - .remove_abandoned(texture_id, hub.textures.contains(texture_id)); - - f(&texture_id); - - let submit_index = texture.info.submission_index(); - let non_referenced_resources = self - .active - .iter_mut() - .find(|a| a.index == submit_index) - .map_or(&mut self.free_resources, |a| &mut a.last_resources); - non_referenced_resources - .textures - .insert(texture_id, texture.clone()); - !is_removed - }); + #[cfg(feature = "trace")] trace: &mut Option<&mut trace::Trace>, + ) -> &mut Self { + let mut trackers = trackers.lock(); + let resource_map = self.suspected_resources.map_mut(); + Self::triage_resources( + resource_map, + self.active.as_mut_slice(), + &mut self.free_resources, + &mut trackers.textures, + &hub.textures, + |_texture_id, _texture| { + #[cfg(feature = "trace")] + if let Some(ref mut t) = *trace { + t.add(trace::Action::DestroyTexture(*_texture_id)); + } + }, + ); self } - fn triage_suspected_samplers( + fn triage_suspected_samplers( &mut self, hub: &Hub, trackers: &Mutex>, - mut f: F, - ) -> Vec - where - F: FnMut(&id::SamplerId), - { - let mut submit_indices = Vec::new(); - self.suspected_resources - .samplers - .retain(|&sampler_id, sampler| { - let mut trackers = trackers.lock(); - let is_removed = trackers - .samplers - .remove_abandoned(sampler_id, hub.samplers.contains(sampler_id)); - - f(&sampler_id); - - let submit_index = sampler.info.submission_index(); - if !submit_indices.contains(&submit_index) { - submit_indices.push(submit_index); + #[cfg(feature = "trace")] trace: &mut Option<&mut trace::Trace>, + ) -> &mut Self { + let mut trackers = trackers.lock(); + let resource_map = self.suspected_resources.map_mut(); + Self::triage_resources( + resource_map, + self.active.as_mut_slice(), + &mut self.free_resources, + &mut trackers.samplers, + &hub.samplers, + |_sampler_id, _sampler| { + #[cfg(feature = "trace")] + if let Some(ref mut t) = *trace { + t.add(trace::Action::DestroySampler(*_sampler_id)); } - self.active - .iter_mut() - .find(|a| a.index == submit_index) - .map_or(&mut self.free_resources, |a| &mut a.last_resources) - .samplers - .insert(sampler_id, sampler.clone()); - !is_removed - }); - submit_indices + }, + ); + self } - fn triage_suspected_buffers( + fn triage_suspected_buffers( &mut self, hub: &Hub, trackers: &Mutex>, - mut f: F, - ) -> Vec - where - F: FnMut(&id::BufferId), - { - let mut submit_indices = Vec::new(); - self.suspected_resources - .buffers - .retain(|&buffer_id, buffer| { - let mut trackers = trackers.lock(); - let is_removed = trackers - .buffers - .remove_abandoned(buffer_id, hub.buffers.contains(buffer_id)); - - f(&buffer_id); - - let submit_index = buffer.info.submission_index(); - if !submit_indices.contains(&submit_index) { - submit_indices.push(submit_index); - } - if let resource::BufferMapState::Init { - ref stage_buffer, .. - } = *buffer.map_state.lock() - { - self.free_resources - .buffers - .insert(stage_buffer.as_info().id(), stage_buffer.clone()); + #[cfg(feature = "trace")] trace: &mut Option<&mut trace::Trace>, + ) -> &mut Self { + let mut trackers = trackers.lock(); + let resource_map = self.suspected_resources.map_mut(); + let mut removed_resources = Self::triage_resources( + resource_map, + self.active.as_mut_slice(), + &mut self.free_resources, + &mut trackers.buffers, + &hub.buffers, + |_buffer_id, _buffer| { + #[cfg(feature = "trace")] + if let Some(ref mut t) = *trace { + t.add(trace::Action::DestroyBuffer(*_buffer_id)); } - self.active - .iter_mut() - .find(|a| a.index == submit_index) - .map_or(&mut self.free_resources, |a| &mut a.last_resources) - .buffers - .insert(buffer_id, buffer.clone()); - !is_removed - }); - submit_indices + }, + ); + removed_resources.drain(..).for_each(|buffer| { + if let resource::BufferMapState::Init { + ref stage_buffer, .. + } = *buffer.map_state.lock() + { + self.free_resources + .insert(stage_buffer.as_info().id(), stage_buffer.clone()); + } + }); + self } - fn triage_suspected_compute_pipelines( + fn triage_suspected_compute_pipelines( &mut self, hub: &Hub, trackers: &Mutex>, - mut f: F, - ) -> Vec - where - F: FnMut(&id::ComputePipelineId), - { - let mut submit_indices = Vec::new(); - self.suspected_resources.compute_pipelines.retain( - |&compute_pipeline_id, compute_pipeline| { - let mut trackers = trackers.lock(); - let is_removed = trackers.compute_pipelines.remove_abandoned( - compute_pipeline_id, - hub.compute_pipelines.contains(compute_pipeline_id), - ); - - f(&compute_pipeline_id); - - self.suspected_resources.pipeline_layouts.insert( - compute_pipeline.layout.as_info().id(), - compute_pipeline.layout.clone(), - ); - - let submit_index = compute_pipeline.info.submission_index(); - if !submit_indices.contains(&submit_index) { - submit_indices.push(submit_index); + #[cfg(feature = "trace")] trace: &mut Option<&mut trace::Trace>, + ) -> &mut Self { + let mut trackers = trackers.lock(); + let resource_map = self.suspected_resources.map_mut(); + let mut removed_resources = Self::triage_resources( + resource_map, + self.active.as_mut_slice(), + &mut self.free_resources, + &mut trackers.compute_pipelines, + &hub.compute_pipelines, + |_compute_pipeline_id, _compute_pipeline| { + #[cfg(feature = "trace")] + if let Some(ref mut t) = *trace { + t.add(trace::Action::DestroyComputePipeline(*_compute_pipeline_id)); } - self.active - .iter_mut() - .find(|a| a.index == submit_index) - .map_or(&mut self.free_resources, |a| &mut a.last_resources) - .compute_pipelines - .insert(compute_pipeline_id, compute_pipeline.clone()); - !is_removed }, ); - submit_indices + removed_resources.drain(..).for_each(|compute_pipeline| { + self.suspected_resources.insert( + compute_pipeline.layout.as_info().id(), + compute_pipeline.layout.clone(), + ); + }); + self } - fn triage_suspected_render_pipelines( + fn triage_suspected_render_pipelines( &mut self, hub: &Hub, trackers: &Mutex>, - mut f: F, - ) -> Vec - where - F: FnMut(&id::RenderPipelineId), - { - let mut submit_indices = Vec::new(); - self.suspected_resources - .render_pipelines - .retain(|&render_pipeline_id, render_pipeline| { - let mut trackers = trackers.lock(); - let is_removed = trackers.render_pipelines.remove_abandoned( - render_pipeline_id, - hub.render_pipelines.contains(render_pipeline_id), - ); - - f(&render_pipeline_id); - - self.suspected_resources.pipeline_layouts.insert( - render_pipeline.layout.as_info().id(), - render_pipeline.layout.clone(), - ); - - let submit_index = render_pipeline.info.submission_index(); - if !submit_indices.contains(&submit_index) { - submit_indices.push(submit_index); + #[cfg(feature = "trace")] trace: &mut Option<&mut trace::Trace>, + ) -> &mut Self { + let mut trackers = trackers.lock(); + let resource_map = self.suspected_resources.map_mut(); + let mut removed_resources = Self::triage_resources( + resource_map, + self.active.as_mut_slice(), + &mut self.free_resources, + &mut trackers.render_pipelines, + &hub.render_pipelines, + |_render_pipeline_id, _render_pipeline| { + #[cfg(feature = "trace")] + if let Some(ref mut t) = *trace { + t.add(trace::Action::DestroyRenderPipeline(*_render_pipeline_id)); } - self.active - .iter_mut() - .find(|a| a.index == submit_index) - .map_or(&mut self.free_resources, |a| &mut a.last_resources) - .render_pipelines - .insert(render_pipeline_id, render_pipeline.clone()); - !is_removed - }); - submit_indices + }, + ); + removed_resources.drain(..).for_each(|render_pipeline| { + self.suspected_resources.insert( + render_pipeline.layout.as_info().id(), + render_pipeline.layout.clone(), + ); + }); + self } - fn triage_suspected_pipeline_layouts(&mut self, mut f: F) -> &mut Self - where - F: FnMut(&id::PipelineLayoutId), - { + fn triage_suspected_pipeline_layouts( + &mut self, + #[cfg(feature = "trace")] trace: &mut Option<&mut trace::Trace>, + ) -> &mut Self { + let mut removed_resources = Vec::new(); self.suspected_resources - .pipeline_layouts - .retain(|pipeline_layout_id, pipeline_layout| { - //Note: this has to happen after all the suspected pipelines are destroyed - f(pipeline_layout_id); - - for bgl in &pipeline_layout.bind_group_layouts { - self.suspected_resources - .bind_group_layouts - .insert(bgl.as_info().id(), bgl.clone()); + .map_mut::>() + .retain(|_pipeline_layout_id, pipeline_layout| { + #[cfg(feature = "trace")] + if let Some(ref mut t) = *trace { + t.add(trace::Action::DestroyPipelineLayout(*_pipeline_layout_id)); } - self.free_resources - .pipeline_layouts - .insert(*pipeline_layout_id, pipeline_layout.clone()); + removed_resources.push(pipeline_layout.clone()); false }); + removed_resources.drain(..).for_each(|pipeline_layout| { + for bgl in &pipeline_layout.bind_group_layouts { + self.suspected_resources + .insert(bgl.as_info().id(), bgl.clone()); + } + }); self } - fn triage_suspected_bind_group_layouts(&mut self, mut f: F) -> &mut Self - where - F: FnMut(&id::BindGroupLayoutId), - { - self.suspected_resources.bind_group_layouts.retain( - |bind_group_layout_id, bind_group_layout| { + fn triage_suspected_bind_group_layouts( + &mut self, + #[cfg(feature = "trace")] trace: &mut Option<&mut trace::Trace>, + ) -> &mut Self { + self.suspected_resources + .map_mut::>() + .retain(|bind_group_layout_id, bind_group_layout| { //Note: this has to happen after all the suspected pipelines are destroyed //Note: nothing else can bump the refcount since the guard is locked exclusively //Note: same BGL can appear multiple times in the list, but only the last // encounter could drop the refcount to 0. - f(bind_group_layout_id); + #[cfg(feature = "trace")] + if let Some(ref mut t) = *trace { + t.add(trace::Action::DestroyBindGroupLayout(*bind_group_layout_id)); + } self.free_resources - .bind_group_layouts .insert(*bind_group_layout_id, bind_group_layout.clone()); false - }, - ); + }); self } @@ -759,31 +758,29 @@ impl LifetimeTracker { &mut self, hub: &Hub, trackers: &Mutex>, - ) -> Vec { - let mut submit_indices = Vec::new(); + ) -> &mut Self { + let mut trackers = trackers.lock(); + let resource_map = self.suspected_resources.map_mut(); + Self::triage_resources( + resource_map, + self.active.as_mut_slice(), + &mut self.free_resources, + &mut trackers.query_sets, + &hub.query_sets, + |_query_set_id, _query_set| {}, + ); + self + } + + fn triage_suspected_staging_buffers(&mut self) -> &mut Self { self.suspected_resources - .query_sets - .retain(|&query_set_id, query_set| { - let mut trackers = trackers.lock(); - let is_removed = trackers - .query_sets - .remove_abandoned(query_set_id, hub.query_sets.contains(query_set_id)); - // #[cfg(feature = "trace")] - // trace.map(|t| t.add(trace::Action::DestroyComputePipeline(id))); - - let submit_index = query_set.info.submission_index(); - if !submit_indices.contains(&submit_index) { - submit_indices.push(submit_index); - } - self.active - .iter_mut() - .find(|a| a.index == submit_index) - .map_or(&mut self.free_resources, |a| &mut a.last_resources) - .query_sets - .insert(query_set_id, query_set.clone()); - !is_removed + .map_mut::>() + .retain(|staging_buffer_id, staging_buffer| { + self.free_resources + .insert(*staging_buffer_id, staging_buffer.clone()); + false }); - submit_indices + self } /// Identify resources to free, according to `trackers` and `self.suspected_resources`. @@ -833,67 +830,65 @@ impl LifetimeTracker { ) { profiling::scope!("triage_suspected"); - self.triage_suspected_render_bundles(hub, trackers, |_id| { + //NOTE: the order is important to release resources that depends between each other! + self.triage_suspected_render_bundles( + hub, + trackers, #[cfg(feature = "trace")] - if let Some(ref mut t) = trace { - t.add(trace::Action::DestroyRenderBundle(*_id)); - } - }); - self.triage_suspected_compute_pipelines(hub, trackers, |_id| { + &mut trace, + ); + self.triage_suspected_compute_pipelines( + hub, + trackers, #[cfg(feature = "trace")] - if let Some(ref mut t) = trace { - t.add(trace::Action::DestroyComputePipeline(*_id)); - } - }); - self.triage_suspected_render_pipelines(hub, trackers, |_id| { + &mut trace, + ); + self.triage_suspected_render_pipelines( + hub, + trackers, #[cfg(feature = "trace")] - if let Some(ref mut t) = trace { - t.add(trace::Action::DestroyRenderPipeline(*_id)); - } - }); - self.triage_suspected_bind_groups(hub, trackers, |_id| { + &mut trace, + ); + self.triage_suspected_bind_groups( + hub, + trackers, #[cfg(feature = "trace")] - if let Some(ref mut t) = trace { - t.add(trace::Action::DestroyBindGroup(*_id)); - } - }); - self.triage_suspected_pipeline_layouts(|_id| { + &mut trace, + ); + self.triage_suspected_pipeline_layouts( #[cfg(feature = "trace")] - if let Some(ref mut t) = trace { - t.add(trace::Action::DestroyPipelineLayout(*_id)); - } - }); - self.triage_suspected_bind_group_layouts(|_id| { + &mut trace, + ); + self.triage_suspected_bind_group_layouts( #[cfg(feature = "trace")] - if let Some(ref mut t) = trace { - t.add(trace::Action::DestroyBindGroupLayout(*_id)); - } - }); + &mut trace, + ); self.triage_suspected_query_sets(hub, trackers); - self.triage_suspected_samplers(hub, trackers, |_id| { + self.triage_suspected_samplers( + hub, + trackers, #[cfg(feature = "trace")] - if let Some(ref mut t) = trace { - t.add(trace::Action::DestroySampler(*_id)); - } - }); - self.triage_suspected_texture_views(hub, trackers, |_id| { + &mut trace, + ); + self.triage_suspected_staging_buffers(); + self.triage_suspected_texture_views( + hub, + trackers, #[cfg(feature = "trace")] - if let Some(ref mut t) = trace { - t.add(trace::Action::DestroyTextureView(*_id)); - } - }); - self.triage_suspected_textures(hub, trackers, |_id| { + &mut trace, + ); + self.triage_suspected_textures( + hub, + trackers, #[cfg(feature = "trace")] - if let Some(ref mut t) = trace { - t.add(trace::Action::DestroyTexture(*_id)); - } - }); - self.triage_suspected_buffers(hub, trackers, |_id| { + &mut trace, + ); + self.triage_suspected_buffers( + hub, + trackers, #[cfg(feature = "trace")] - if let Some(ref mut t) = trace { - t.add(trace::Action::DestroyBuffer(*_id)); - } - }); + &mut trace, + ); } /// Determine which buffers are ready to map, and which must wait for the @@ -944,16 +939,14 @@ impl LifetimeTracker { let buffer_id = buffer.info.id(); let is_removed = { let mut trackers = trackers.lock(); - trackers - .buffers - .remove_abandoned(buffer_id, hub.buffers.contains(buffer_id)) + let mut count = 1; + count += hub.buffers.contains(buffer_id) as usize; + trackers.buffers.remove_abandoned(buffer_id, count) }; if is_removed { *buffer.map_state.lock() = resource::BufferMapState::Idle; - log::info!("Buffer {:?} is not tracked anymore", buffer_id); - self.free_resources - .buffers - .insert(buffer_id, buffer.clone()); + log::info!("Buffer ready to map {:?} is not tracked anymore", buffer_id); + self.free_resources.insert(buffer_id, buffer.clone()); } else { let mapping = match std::mem::replace( &mut *buffer.map_state.lock(), diff --git a/wgpu-core/src/device/queue.rs b/wgpu-core/src/device/queue.rs index 2247933acc..0d60974dac 100644 --- a/wgpu-core/src/device/queue.rs +++ b/wgpu-core/src/device/queue.rs @@ -14,8 +14,8 @@ use crate::{ identity::{GlobalIdentityHandlerFactory, Input}, init_tracker::{has_copy_partial_init_tracker_coverage, TextureInitRange}, resource::{ - Buffer, BufferAccessError, BufferMapState, Resource, ResourceInfo, StagingBuffer, Texture, - TextureInner, + Buffer, BufferAccessError, BufferMapState, Resource, ResourceInfo, ResourceType, + StagingBuffer, Texture, TextureInner, }, track, FastHashMap, SubmissionIndex, }; @@ -38,7 +38,7 @@ pub struct Queue { } impl Resource for Queue { - const TYPE: &'static str = "Queue"; + const TYPE: ResourceType = "Queue"; fn as_info(&self) -> &ResourceInfo { &self.info @@ -1136,7 +1136,7 @@ impl Global { { let mut suspected = temp_suspected.take().unwrap(); suspected.clear(); - temp_suspected.replace(ResourceMaps::new()); + temp_suspected.replace(ResourceMaps::new::()); } // finish all the command buffers first @@ -1201,11 +1201,7 @@ impl Global { unsafe { device.raw().unmap_buffer(raw_buf) } .map_err(DeviceError::from)?; } - temp_suspected - .as_mut() - .unwrap() - .buffers - .insert(id, buffer.clone()); + temp_suspected.as_mut().unwrap().insert(id, buffer.clone()); } else { match *buffer.map_state.lock() { BufferMapState::Idle => (), @@ -1227,11 +1223,7 @@ impl Global { }; texture.info.use_at(submit_index); if texture.is_unique() { - temp_suspected - .as_mut() - .unwrap() - .textures - .insert(id, texture.clone()); + temp_suspected.as_mut().unwrap().insert(id, texture.clone()); } if should_extend { unsafe { @@ -1247,7 +1239,6 @@ impl Global { temp_suspected .as_mut() .unwrap() - .texture_views .insert(texture_view.as_info().id(), texture_view.clone()); } } @@ -1267,7 +1258,6 @@ impl Global { temp_suspected .as_mut() .unwrap() - .bind_groups .insert(bg.as_info().id(), bg.clone()); } } @@ -1278,7 +1268,7 @@ impl Global { { compute_pipeline.info.use_at(submit_index); if compute_pipeline.is_unique() { - temp_suspected.as_mut().unwrap().compute_pipelines.insert( + temp_suspected.as_mut().unwrap().insert( compute_pipeline.as_info().id(), compute_pipeline.clone(), ); @@ -1289,7 +1279,7 @@ impl Global { { render_pipeline.info.use_at(submit_index); if render_pipeline.is_unique() { - temp_suspected.as_mut().unwrap().render_pipelines.insert( + temp_suspected.as_mut().unwrap().insert( render_pipeline.as_info().id(), render_pipeline.clone(), ); @@ -1301,7 +1291,6 @@ impl Global { temp_suspected .as_mut() .unwrap() - .query_sets .insert(query_set.as_info().id(), query_set.clone()); } } @@ -1322,7 +1311,6 @@ impl Global { temp_suspected .as_mut() .unwrap() - .render_bundles .insert(bundle.as_info().id(), bundle.clone()); } } diff --git a/wgpu-core/src/device/resource.rs b/wgpu-core/src/device/resource.rs index 67a258b0b8..50a25a203c 100644 --- a/wgpu-core/src/device/resource.rs +++ b/wgpu-core/src/device/resource.rs @@ -24,7 +24,7 @@ use crate::{ registry::Registry, resource::ResourceInfo, resource::{ - self, Buffer, QuerySet, Resource, Sampler, Texture, TextureView, + self, Buffer, QuerySet, Resource, ResourceType, Sampler, Texture, TextureView, TextureViewNotRenderableReason, }, storage::Storage, @@ -105,7 +105,7 @@ pub struct Device { life_tracker: Mutex>, /// Temporary storage for resource management functions. Cleared at the end /// of every call (unless an error occurs). - pub(crate) temp_suspected: Mutex>>, + pub(crate) temp_suspected: Mutex>, pub(crate) alignments: hal::Alignments, pub(crate) limits: wgt::Limits, pub(crate) features: wgt::Features, @@ -241,7 +241,7 @@ impl Device { fence: RwLock::new(Some(fence)), trackers: Mutex::new(Tracker::new()), life_tracker: Mutex::new(life::LifetimeTracker::new()), - temp_suspected: Mutex::new(Some(life::ResourceMaps::new())), + temp_suspected: Mutex::new(Some(life::ResourceMaps::new::())), #[cfg(feature = "trace")] trace: Mutex::new(trace_path.and_then(|path| match trace::Trace::new(path) { Ok(mut trace) => { @@ -304,7 +304,7 @@ impl Device { let temp_suspected = self.temp_suspected.lock().take().unwrap(); life_tracker.suspected_resources.extend(temp_suspected); } - self.temp_suspected.lock().replace(ResourceMaps::new()); + self.temp_suspected.lock().replace(ResourceMaps::new::()); life_tracker.triage_suspected( hub, @@ -348,6 +348,8 @@ impl Device { self.command_allocator.lock().as_mut().unwrap(), ); let mapping_closures = life_tracker.handle_mapping(hub, self.raw(), &self.trackers); + + //Cleaning up resources and released all unused suspected ones life_tracker.cleanup(); let closures = UserClosures { @@ -365,63 +367,47 @@ impl Device { { for resource in trackers.buffers.used_resources() { if resource.is_unique() { - temp_suspected - .buffers - .insert(resource.as_info().id(), resource.clone()); + temp_suspected.insert(resource.as_info().id(), resource.clone()); } } for resource in trackers.textures.used_resources() { if resource.is_unique() { - temp_suspected - .textures - .insert(resource.as_info().id(), resource.clone()); + temp_suspected.insert(resource.as_info().id(), resource.clone()); } } for resource in trackers.views.used_resources() { if resource.is_unique() { - temp_suspected - .texture_views - .insert(resource.as_info().id(), resource.clone()); + temp_suspected.insert(resource.as_info().id(), resource.clone()); } } for resource in trackers.bind_groups.used_resources() { if resource.is_unique() { - temp_suspected - .bind_groups - .insert(resource.as_info().id(), resource.clone()); + temp_suspected.insert(resource.as_info().id(), resource.clone()); } } for resource in trackers.samplers.used_resources() { if resource.is_unique() { - temp_suspected - .samplers - .insert(resource.as_info().id(), resource.clone()); + temp_suspected.insert(resource.as_info().id(), resource.clone()); } } for resource in trackers.compute_pipelines.used_resources() { if resource.is_unique() { - temp_suspected - .compute_pipelines - .insert(resource.as_info().id(), resource.clone()); + temp_suspected.insert(resource.as_info().id(), resource.clone()); } } for resource in trackers.render_pipelines.used_resources() { if resource.is_unique() { - temp_suspected - .render_pipelines - .insert(resource.as_info().id(), resource.clone()); + temp_suspected.insert(resource.as_info().id(), resource.clone()); } } for resource in trackers.query_sets.used_resources() { if resource.is_unique() { - temp_suspected - .query_sets - .insert(resource.as_info().id(), resource.clone()); + temp_suspected.insert(resource.as_info().id(), resource.clone()); } } } self.lock_life().suspected_resources.extend(temp_suspected); - self.temp_suspected.lock().replace(ResourceMaps::new()); + self.temp_suspected.lock().replace(ResourceMaps::new::()); } pub(crate) fn create_buffer( @@ -3169,7 +3155,7 @@ impl Device { } impl Resource for Device { - const TYPE: &'static str = "Device"; + const TYPE: ResourceType = "Device"; fn as_info(&self) -> &ResourceInfo { &self.info diff --git a/wgpu-core/src/hal_api.rs b/wgpu-core/src/hal_api.rs index 870557b442..b56ff9a458 100644 --- a/wgpu-core/src/hal_api.rs +++ b/wgpu-core/src/hal_api.rs @@ -1,4 +1,4 @@ -use wgt::Backend; +use wgt::{Backend, WasmNotSend, WasmNotSync}; use crate::{ global::Global, @@ -7,7 +7,7 @@ use crate::{ instance::{HalSurface, Instance, Surface}, }; -pub trait HalApi: hal::Api + 'static { +pub trait HalApi: hal::Api + 'static + WasmNotSend + WasmNotSync { const VARIANT: Backend; fn create_instance_from_hal(name: &str, hal_instance: Self::Instance) -> Instance; fn instance_as_hal(instance: &Instance) -> Option<&Self::Instance>; diff --git a/wgpu-core/src/id.rs b/wgpu-core/src/id.rs index 116fb00fa2..b4642d8315 100644 --- a/wgpu-core/src/id.rs +++ b/wgpu-core/src/id.rs @@ -3,9 +3,10 @@ use std::{ any::Any, cmp::Ordering, fmt::{self, Debug}, + hash::Hash, marker::PhantomData, }; -use wgt::Backend; +use wgt::{Backend, WasmNotSend, WasmNotSync}; #[cfg(feature = "id32")] type IdType = u32; @@ -71,7 +72,7 @@ type Dummy = hal::api::Empty; all(feature = "serde", not(feature = "replay")), derive(serde::Deserialize) )] -pub struct Id(NonZeroId, PhantomData); +pub struct Id(NonZeroId, PhantomData); // This type represents Id in a more readable (and editable) way. #[allow(dead_code)] @@ -82,14 +83,20 @@ enum SerialId { Id(Index, Epoch, Backend), } #[cfg(feature = "trace")] -impl From> for SerialId { +impl From> for SerialId +where + T: 'static + WasmNotSend + WasmNotSync, +{ fn from(id: Id) -> Self { let (index, epoch, backend) = id.unzip(); Self::Id(index, epoch, backend) } } #[cfg(feature = "replay")] -impl From for Id { +impl From for Id +where + T: 'static + WasmNotSend + WasmNotSync, +{ fn from(id: SerialId) -> Self { match id { SerialId::Id(index, epoch, backend) => TypedId::zip(index, epoch, backend), @@ -97,7 +104,10 @@ impl From for Id { } } -impl Id { +impl Id +where + T: 'static + WasmNotSend + WasmNotSync, +{ /// # Safety /// /// The raw id must be valid for the type. @@ -128,41 +138,59 @@ impl Id { } } -impl Copy for Id {} +impl Copy for Id where T: 'static + WasmNotSend + WasmNotSync {} -impl Clone for Id { +impl Clone for Id +where + T: 'static + WasmNotSend + WasmNotSync, +{ fn clone(&self) -> Self { Self(self.0, PhantomData) } } -impl Debug for Id { +impl Debug for Id +where + T: 'static + WasmNotSend + WasmNotSync, +{ fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { self.unzip().fmt(formatter) } } -impl std::hash::Hash for Id { +impl Hash for Id +where + T: 'static + WasmNotSend + WasmNotSync, +{ fn hash(&self, state: &mut H) { self.0.hash(state); } } -impl PartialEq for Id { +impl PartialEq for Id +where + T: 'static + WasmNotSend + WasmNotSync, +{ fn eq(&self, other: &Self) -> bool { self.0 == other.0 } } -impl Eq for Id {} +impl Eq for Id where T: 'static + WasmNotSend + WasmNotSync {} -impl PartialOrd for Id { +impl PartialOrd for Id +where + T: 'static + WasmNotSend + WasmNotSync, +{ fn partial_cmp(&self, other: &Self) -> Option { self.0.partial_cmp(&other.0) } } -impl Ord for Id { +impl Ord for Id +where + T: 'static + WasmNotSend + WasmNotSync, +{ fn cmp(&self, other: &Self) -> Ordering { self.0.cmp(&other.0) } @@ -173,14 +201,17 @@ impl Ord for Id { /// Most `wgpu-core` clients should not use this trait. Unusual clients that /// need to construct `Id` values directly, or access their components, like the /// WGPU recording player, may use this trait to do so. -pub trait TypedId: Copy + Debug + Any { +pub trait TypedId: Copy + Debug + Any + 'static + WasmNotSend + WasmNotSync + Eq + Hash { fn zip(index: Index, epoch: Epoch, backend: Backend) -> Self; fn unzip(self) -> (Index, Epoch, Backend); fn into_raw(self) -> NonZeroId; } #[allow(trivial_numeric_casts)] -impl TypedId for Id { +impl TypedId for Id +where + T: 'static + WasmNotSend + WasmNotSync, +{ fn zip(index: Index, epoch: Epoch, backend: Backend) -> Self { assert_eq!(0, epoch >> EPOCH_BITS); assert_eq!(0, (index as IdType) >> INDEX_BITS); diff --git a/wgpu-core/src/instance.rs b/wgpu-core/src/instance.rs index 7846e7031d..48890528df 100644 --- a/wgpu-core/src/instance.rs +++ b/wgpu-core/src/instance.rs @@ -8,7 +8,7 @@ use crate::{ id::{AdapterId, DeviceId, QueueId, SurfaceId}, identity::{GlobalIdentityHandlerFactory, Input}, present::Presentation, - resource::{Resource, ResourceInfo}, + resource::{Resource, ResourceInfo, ResourceType}, LabelHelpers, DOWNLEVEL_WARNING_MESSAGE, }; @@ -162,7 +162,7 @@ pub struct Surface { } impl Resource for Surface { - const TYPE: &'static str = "Surface"; + const TYPE: ResourceType = "Surface"; fn as_info(&self) -> &ResourceInfo { &self.info @@ -386,7 +386,7 @@ impl Adapter { } impl Resource for Adapter { - const TYPE: &'static str = "Adapter"; + const TYPE: ResourceType = "Adapter"; fn as_info(&self) -> &ResourceInfo { &self.info diff --git a/wgpu-core/src/pipeline.rs b/wgpu-core/src/pipeline.rs index d62e32f8b7..c09a2265bf 100644 --- a/wgpu-core/src/pipeline.rs +++ b/wgpu-core/src/pipeline.rs @@ -6,7 +6,7 @@ use crate::{ device::{Device, DeviceError, MissingDownlevelFlags, MissingFeatures, RenderPassContext}, hal_api::HalApi, id::{ComputePipelineId, PipelineLayoutId, RenderPipelineId, ShaderModuleId}, - resource::{Resource, ResourceInfo}, + resource::{Resource, ResourceInfo, ResourceType}, validation, Label, }; use arrayvec::ArrayVec; @@ -69,7 +69,7 @@ impl Drop for ShaderModule { } impl Resource for ShaderModule { - const TYPE: &'static str = "ShaderModule"; + const TYPE: ResourceType = "ShaderModule"; fn as_info(&self) -> &ResourceInfo { &self.info @@ -264,7 +264,7 @@ impl Drop for ComputePipeline { } impl Resource for ComputePipeline { - const TYPE: &'static str = "ComputePipeline"; + const TYPE: ResourceType = "ComputePipeline"; fn as_info(&self) -> &ResourceInfo { &self.info @@ -496,7 +496,7 @@ impl Drop for RenderPipeline { } impl Resource for RenderPipeline { - const TYPE: &'static str = "RenderPipeline"; + const TYPE: ResourceType = "RenderPipeline"; fn as_info(&self) -> &ResourceInfo { &self.info diff --git a/wgpu-core/src/resource.rs b/wgpu-core/src/resource.rs index e005bf7da3..1da553165a 100644 --- a/wgpu-core/src/resource.rs +++ b/wgpu-core/src/resource.rs @@ -23,6 +23,7 @@ use hal::CommandEncoder; use parking_lot::{Mutex, RwLock, RwLockReadGuard, RwLockWriteGuard}; use smallvec::SmallVec; use thiserror::Error; +use wgt::{WasmNotSend, WasmNotSync}; use std::{ borrow::Borrow, @@ -132,8 +133,10 @@ impl ResourceInfo { } } -pub trait Resource { - const TYPE: &'static str; +pub(crate) type ResourceType = &'static str; + +pub trait Resource: 'static + WasmNotSend + WasmNotSync { + const TYPE: ResourceType; fn as_info(&self) -> &ResourceInfo; fn as_info_mut(&mut self) -> &mut ResourceInfo; fn label(&self) -> String { @@ -604,7 +607,7 @@ pub enum CreateBufferError { } impl Resource for Buffer { - const TYPE: &'static str = "Buffer"; + const TYPE: ResourceType = "Buffer"; fn as_info(&self) -> &ResourceInfo { &self.info @@ -656,7 +659,7 @@ impl Drop for StagingBuffer { } impl Resource for StagingBuffer { - const TYPE: &'static str = "StagingBuffer"; + const TYPE: ResourceType = "StagingBuffer"; fn as_info(&self) -> &ResourceInfo { &self.info @@ -956,7 +959,7 @@ pub enum CreateTextureError { } impl Resource for Texture { - const TYPE: &'static str = "Texture"; + const TYPE: ResourceType = "Texture"; fn as_info(&self) -> &ResourceInfo { &self.info @@ -1112,7 +1115,7 @@ pub enum CreateTextureViewError { pub enum TextureViewDestroyError {} impl Resource for TextureView { - const TYPE: &'static str = "TextureView"; + const TYPE: ResourceType = "TextureView"; fn as_info(&self) -> &ResourceInfo { &self.info @@ -1227,7 +1230,7 @@ pub enum CreateSamplerError { } impl Resource for Sampler { - const TYPE: &'static str = "Sampler"; + const TYPE: ResourceType = "Sampler"; fn as_info(&self) -> &ResourceInfo { &self.info @@ -1274,7 +1277,7 @@ impl Drop for QuerySet { } impl Resource for QuerySet { - const TYPE: &'static str = "QuerySet"; + const TYPE: ResourceType = "QuerySet"; fn as_info(&self) -> &ResourceInfo { &self.info diff --git a/wgpu-core/src/track/buffer.rs b/wgpu-core/src/track/buffer.rs index 992b083ba1..932993681a 100644 --- a/wgpu-core/src/track/buffer.rs +++ b/wgpu-core/src/track/buffer.rs @@ -7,7 +7,7 @@ use std::{borrow::Cow, marker::PhantomData, sync::Arc}; -use super::PendingTransition; +use super::{PendingTransition, ResourceTracker}; use crate::{ hal_api::HalApi, id::{BufferId, TypedId}, @@ -291,6 +291,62 @@ pub(crate) struct BufferTracker { temp: Vec>, } + +impl ResourceTracker> for BufferTracker { + /// Removes the buffer `id` from this tracker if it is otherwise unused. + /// + /// A buffer is 'otherwise unused' when the only references to it are: + /// + /// 1) the `Arc` that our caller, `LifetimeTracker::triage_suspected`, has just + /// drained from `LifetimeTracker::suspected_resources`, + /// + /// 2) its `Arc` in [`self.metadata`] (owned by [`Device::trackers`]), and + /// + /// 3) its `Arc` in the [`Hub::buffers`] registry. + /// + /// If the buffer is indeed unused, this function removes 2), and + /// `triage_suspected` will remove 3), leaving 1) as the sole + /// remaining reference. + /// + /// Return `true` if this tracker contained the buffer `id`. This + /// implies that we removed it. + /// + /// [`Device::trackers`]: crate::device::Device + /// [`self.metadata`]: BufferTracker::metadata + /// [`Hub::buffers`]: crate::hub::Hub::buffers + fn remove_abandoned(&mut self, id: BufferId, external_count: usize) -> bool { + let index = id.unzip().0 as usize; + + if index > self.metadata.size() { + return false; + } + + self.tracker_assert_in_bounds(index); + + unsafe { + if self.metadata.contains_unchecked(index) { + let existing_ref_count = self.metadata.get_ref_count_unchecked(index); + //2 ref count if only in Device Tracker and suspected resource itself and already released from user + //so not appearing in Registry + let min_ref_count = 1 + external_count; + if existing_ref_count <= min_ref_count { + self.metadata.remove(index); + log::info!("Buffer {:?} is not tracked anymore", id,); + return true; + } else { + log::info!( + "Buffer {:?} is still referenced from {}", + id, + existing_ref_count + ); + } + } + } + + false + } +} + impl BufferTracker { pub fn new() -> Self { Self { @@ -555,59 +611,6 @@ impl BufferTracker { } None } - - /// Removes the buffer `id` from this tracker if it is otherwise unused. - /// - /// A buffer is 'otherwise unused' when the only references to it are: - /// - /// 1) the `Arc` that our caller, `LifetimeTracker::triage_suspected`, has just - /// drained from `LifetimeTracker::suspected_resources`, - /// - /// 2) its `Arc` in [`self.metadata`] (owned by [`Device::trackers`]), and - /// - /// 3) its `Arc` in the [`Hub::buffers`] registry. - /// - /// If the buffer is indeed unused, this function removes 2), and - /// `triage_suspected` will remove 3), leaving 1) as the sole - /// remaining reference. - /// - /// Return `true` if this tracker contained the buffer `id`. This - /// implies that we removed it. - /// - /// [`Device::trackers`]: crate::device::Device - /// [`self.metadata`]: BufferTracker::metadata - /// [`Hub::buffers`]: crate::hub::Hub::buffers - pub fn remove_abandoned(&mut self, id: BufferId, is_in_registry: bool) -> bool { - let index = id.unzip().0 as usize; - - if index > self.metadata.size() { - return false; - } - - self.tracker_assert_in_bounds(index); - - unsafe { - if self.metadata.contains_unchecked(index) { - let existing_ref_count = self.metadata.get_ref_count_unchecked(index); - //2 ref count if only in Device Tracker and suspected resource itself and already released from user - //so not appearing in Registry - let min_ref_count = if is_in_registry { 3 } else { 2 }; - if existing_ref_count <= min_ref_count { - self.metadata.remove(index); - log::info!("Buffer {:?} is not tracked anymore", id,); - return true; - } else { - log::info!( - "Buffer {:?} is still referenced from {}", - id, - existing_ref_count - ); - } - } - } - - false - } } /// Source of Buffer State. diff --git a/wgpu-core/src/track/mod.rs b/wgpu-core/src/track/mod.rs index b5c3861ebb..bd8d3a5580 100644 --- a/wgpu-core/src/track/mod.rs +++ b/wgpu-core/src/track/mod.rs @@ -484,6 +484,14 @@ impl UsageScope { } } +pub(crate) trait ResourceTracker +where + Id: TypedId, + R: resource::Resource, +{ + fn remove_abandoned(&mut self, id: Id, external_count: usize) -> bool; +} + /// A full double sided tracker used by CommandBuffers and the Device. pub(crate) struct Tracker { pub buffers: BufferTracker, diff --git a/wgpu-core/src/track/stateless.rs b/wgpu-core/src/track/stateless.rs index 96353767b1..e88c0c0c61 100644 --- a/wgpu-core/src/track/stateless.rs +++ b/wgpu-core/src/track/stateless.rs @@ -12,6 +12,8 @@ use crate::{ hal_api::HalApi, id::TypedId, resource::Resource, storage::Storage, track::ResourceMetadata, }; +use super::ResourceTracker; + /// Stores all the resources that a bind group stores. #[derive(Debug)] pub(crate) struct StatelessBindGroupSate> { @@ -72,6 +74,50 @@ pub(crate) struct StatelessTracker> { _phantom: PhantomData, } +impl> ResourceTracker + for StatelessTracker +{ + /// Removes the given resource from the tracker iff we have the last reference to the + /// resource and the epoch matches. + /// + /// Returns true if the resource was removed. + /// + /// If the ID is higher than the length of internal vectors, + /// false will be returned. + fn remove_abandoned(&mut self, id: Id, external_count: usize) -> bool { + let index = id.unzip().0 as usize; + + if index > self.metadata.size() { + return false; + } + + self.tracker_assert_in_bounds(index); + + unsafe { + if self.metadata.contains_unchecked(index) { + let existing_ref_count = self.metadata.get_ref_count_unchecked(index); + //2 ref count if only in Device Tracker and suspected resource itself and already released from user + //so not appearing in Registry + let min_ref_count = 1 + external_count; + if existing_ref_count <= min_ref_count { + self.metadata.remove(index); + log::info!("{} {:?} is not tracked anymore", T::TYPE, id,); + return true; + } else { + log::info!( + "{} {:?} is still referenced from {}", + T::TYPE, + id, + existing_ref_count + ); + } + } + } + + false + } +} + impl> StatelessTracker { pub fn new() -> Self { Self { @@ -187,44 +233,4 @@ impl> StatelessTracker { } None } - - /// Removes the given resource from the tracker iff we have the last reference to the - /// resource and the epoch matches. - /// - /// Returns true if the resource was removed. - /// - /// If the ID is higher than the length of internal vectors, - /// false will be returned. - pub fn remove_abandoned(&mut self, id: Id, is_in_registry: bool) -> bool { - let index = id.unzip().0 as usize; - - if index > self.metadata.size() { - return false; - } - - self.tracker_assert_in_bounds(index); - - unsafe { - if self.metadata.contains_unchecked(index) { - let existing_ref_count = self.metadata.get_ref_count_unchecked(index); - //2 ref count if only in Device Tracker and suspected resource itself and already released from user - //so not appearing in Registry - let min_ref_count = if is_in_registry { 3 } else { 2 }; - if existing_ref_count <= min_ref_count { - self.metadata.remove(index); - log::info!("{} {:?} is not tracked anymore", T::TYPE, id,); - return true; - } else { - log::info!( - "{} {:?} is still referenced from {}", - T::TYPE, - id, - existing_ref_count - ); - } - } - } - - false - } } diff --git a/wgpu-core/src/track/texture.rs b/wgpu-core/src/track/texture.rs index fe8598feee..bda2b9f850 100644 --- a/wgpu-core/src/track/texture.rs +++ b/wgpu-core/src/track/texture.rs @@ -19,7 +19,7 @@ * will treat the contents as junk. !*/ -use super::{range::RangedStates, PendingTransition, PendingTransitionList}; +use super::{range::RangedStates, PendingTransition, PendingTransitionList, ResourceTracker}; use crate::{ hal_api::HalApi, id::{TextureId, TypedId}, @@ -392,6 +392,50 @@ pub(crate) struct TextureTracker { _phantom: PhantomData, } + +impl ResourceTracker> for TextureTracker { + /// Removes the given resource from the tracker iff we have the last reference to the + /// resource and the epoch matches. + /// + /// Returns true if the resource was removed. + /// + /// If the ID is higher than the length of internal vectors, + /// false will be returned. + fn remove_abandoned(&mut self, id: TextureId, external_count: usize) -> bool { + let index = id.unzip().0 as usize; + + if index > self.metadata.size() { + return false; + } + + self.tracker_assert_in_bounds(index); + + unsafe { + if self.metadata.contains_unchecked(index) { + let existing_ref_count = self.metadata.get_ref_count_unchecked(index); + //2 ref count if only in Device Tracker and suspected resource itself and already released from user + //so not appearing in Registry + let min_ref_count = 1 + external_count; + if existing_ref_count <= min_ref_count { + self.start_set.complex.remove(&index); + self.end_set.complex.remove(&index); + self.metadata.remove(index); + log::info!("Texture {:?} is not tracked anymore", id,); + return true; + } else { + log::info!( + "Texture {:?} is still referenced from {}", + id, + existing_ref_count + ); + } + } + } + + false + } +} + impl TextureTracker { pub fn new() -> Self { Self { @@ -706,47 +750,6 @@ impl TextureTracker { false } - - /// Removes the given resource from the tracker iff we have the last reference to the - /// resource and the epoch matches. - /// - /// Returns true if the resource was removed. - /// - /// If the ID is higher than the length of internal vectors, - /// false will be returned. - pub fn remove_abandoned(&mut self, id: TextureId, is_in_registry: bool) -> bool { - let index = id.unzip().0 as usize; - - if index > self.metadata.size() { - return false; - } - - self.tracker_assert_in_bounds(index); - - unsafe { - if self.metadata.contains_unchecked(index) { - let existing_ref_count = self.metadata.get_ref_count_unchecked(index); - //2 ref count if only in Device Tracker and suspected resource itself and already released from user - //so not appearing in Registry - let min_ref_count = if is_in_registry { 3 } else { 2 }; - if existing_ref_count <= min_ref_count { - self.start_set.complex.remove(&index); - self.end_set.complex.remove(&index); - self.metadata.remove(index); - log::info!("Texture {:?} is not tracked anymore", id,); - return true; - } else { - log::info!( - "Texture {:?} is still referenced from {}", - id, - existing_ref_count - ); - } - } - } - - false - } } /// An iterator adapter that can store two different iterator types. diff --git a/wgpu/src/backend/direct.rs b/wgpu/src/backend/direct.rs index 3367654d94..7e0d95d6b4 100644 --- a/wgpu/src/backend/direct.rs +++ b/wgpu/src/backend/direct.rs @@ -3016,7 +3016,10 @@ impl crate::Context for Context { } } -impl From for wgc::id::Id { +impl From for wgc::id::Id +where + T: 'static + WasmNotSend + WasmNotSync, +{ fn from(id: ObjectId) -> Self { // If the id32 feature is enabled in wgpu-core, this will make sure that the id fits in a NonZeroU32. #[allow(clippy::useless_conversion)] @@ -3026,7 +3029,10 @@ impl From for wgc::id::Id { } } -impl From> for ObjectId { +impl From> for ObjectId +where + T: 'static + WasmNotSend + WasmNotSync, +{ fn from(id: wgc::id::Id) -> Self { // If the id32 feature is enabled in wgpu-core, the conversion is not useless #[allow(clippy::useless_conversion)] From 3530dcbaf425cd62e321e9368399dba60d0a3e50 Mon Sep 17 00:00:00 2001 From: gents83 Date: Sat, 23 Sep 2023 03:03:04 +0200 Subject: [PATCH 110/132] Releasing bind group resources fix missing leaks --- wgpu-core/src/binding_model.rs | 12 +++++--- wgpu-core/src/command/bundle.rs | 10 +++--- wgpu-core/src/command/compute.rs | 20 +++++++----- wgpu-core/src/command/render.rs | 20 +++++++----- wgpu-core/src/device/life.rs | 53 ++++++++++++++++++++++++-------- wgpu-core/src/device/resource.rs | 6 ++-- 6 files changed, 80 insertions(+), 41 deletions(-) diff --git a/wgpu-core/src/binding_model.rs b/wgpu-core/src/binding_model.rs index 2a4e3084a7..29c8ccf131 100644 --- a/wgpu-core/src/binding_model.rs +++ b/wgpu-core/src/binding_model.rs @@ -15,6 +15,7 @@ use crate::{ use arrayvec::ArrayVec; +use parking_lot::RwLock; #[cfg(feature = "replay")] use serde::Deserialize; #[cfg(feature = "trace")] @@ -847,9 +848,9 @@ pub struct BindGroup { pub(crate) layout: Arc>, pub(crate) info: ResourceInfo, pub(crate) used: BindGroupStates, - pub(crate) used_buffer_ranges: Vec>, - pub(crate) used_texture_ranges: Vec>, - pub(crate) dynamic_binding_info: Vec, + pub(crate) used_buffer_ranges: RwLock>>, + pub(crate) used_texture_ranges: RwLock>>, + pub(crate) dynamic_binding_info: RwLock>, /// Actual binding sizes for buffers that don't have `min_binding_size` /// specified in BGL. Listed in the order of iteration of `BGL.entries`. pub(crate) late_buffer_binding_sizes: Vec, @@ -877,16 +878,17 @@ impl BindGroup { offsets: &[wgt::DynamicOffset], limits: &wgt::Limits, ) -> Result<(), BindError> { - if self.dynamic_binding_info.len() != offsets.len() { + if self.dynamic_binding_info.read().len() != offsets.len() { return Err(BindError::MismatchedDynamicOffsetCount { group: bind_group_index, - expected: self.dynamic_binding_info.len(), + expected: self.dynamic_binding_info.read().len(), actual: offsets.len(), }); } for (idx, (info, &offset)) in self .dynamic_binding_info + .read() .iter() .zip(offsets.iter()) .enumerate() diff --git a/wgpu-core/src/command/bundle.rs b/wgpu-core/src/command/bundle.rs index e476a5ecf8..8f453605d4 100644 --- a/wgpu-core/src/command/bundle.rs +++ b/wgpu-core/src/command/bundle.rs @@ -318,10 +318,10 @@ impl RenderBundleEncoder { next_dynamic_offset = offsets_range.end; let offsets = &base.dynamic_offsets[offsets_range.clone()]; - if bind_group.dynamic_binding_info.len() != offsets.len() { + if bind_group.dynamic_binding_info.read().len() != offsets.len() { return Err(RenderCommandError::InvalidDynamicOffsetCount { actual: offsets.len(), - expected: bind_group.dynamic_binding_info.len(), + expected: bind_group.dynamic_binding_info.read().len(), }) .map_pass_err(scope); } @@ -330,7 +330,7 @@ impl RenderBundleEncoder { for (offset, info) in offsets .iter() .map(|offset| *offset as wgt::BufferAddress) - .zip(bind_group.dynamic_binding_info.iter()) + .zip(bind_group.dynamic_binding_info.read().iter()) { let (alignment, limit_name) = buffer_binding_type_alignment(&device.limits, info.binding_type); @@ -342,8 +342,8 @@ impl RenderBundleEncoder { } } - buffer_memory_init_actions.extend_from_slice(&bind_group.used_buffer_ranges); - texture_memory_init_actions.extend_from_slice(&bind_group.used_texture_ranges); + buffer_memory_init_actions.extend_from_slice(&bind_group.used_buffer_ranges.read()); + texture_memory_init_actions.extend_from_slice(&bind_group.used_texture_ranges.read()); state.set_bind_group(index, bind_group_guard.get(bind_group_id).as_ref().unwrap(), &bind_group.layout, offsets_range); unsafe { diff --git a/wgpu-core/src/command/compute.rs b/wgpu-core/src/command/compute.rs index 2c8d8a5146..341743985f 100644 --- a/wgpu-core/src/command/compute.rs +++ b/wgpu-core/src/command/compute.rs @@ -505,16 +505,20 @@ impl Global { .map_pass_err(scope)?; buffer_memory_init_actions.extend( - bind_group.used_buffer_ranges.iter().filter_map(|action| { - action - .buffer - .initialization_status - .read() - .check_action(action) - }), + bind_group + .used_buffer_ranges + .read() + .iter() + .filter_map(|action| { + action + .buffer + .initialization_status + .read() + .check_action(action) + }), ); - for action in bind_group.used_texture_ranges.iter() { + for action in bind_group.used_texture_ranges.read().iter() { pending_discard_init_fixups .extend(texture_memory_actions.register_init_action(action)); } diff --git a/wgpu-core/src/command/render.rs b/wgpu-core/src/command/render.rs index cbb657f548..346ef24363 100644 --- a/wgpu-core/src/command/render.rs +++ b/wgpu-core/src/command/render.rs @@ -1424,15 +1424,19 @@ impl Global { // is held to the bind group itself. buffer_memory_init_actions.extend( - bind_group.used_buffer_ranges.iter().filter_map(|action| { - action - .buffer - .initialization_status - .read() - .check_action(action) - }), + bind_group + .used_buffer_ranges + .read() + .iter() + .filter_map(|action| { + action + .buffer + .initialization_status + .read() + .check_action(action) + }), ); - for action in bind_group.used_texture_ranges.iter() { + for action in bind_group.used_texture_ranges.read().iter() { info.pending_discard_init_fixups .extend(texture_memory_actions.register_init_action(action)); } diff --git a/wgpu-core/src/device/life.rs b/wgpu-core/src/device/life.rs index bbcb0eb643..e8bf3be8ed 100644 --- a/wgpu-core/src/device/life.rs +++ b/wgpu-core/src/device/life.rs @@ -418,33 +418,37 @@ impl LifetimeTracker { } impl LifetimeTracker { - fn triage_resources( + fn triage_resources( resources_map: &mut HashMap>, active: &mut [ActiveSubmission], free_resources: &mut ResourceMaps, trackers: &mut impl ResourceTracker, registry: &Registry, - mut f: F, + count_fn: F, + mut on_remove: T, ) -> Vec> where Id: id::TypedId, R: Resource, - F: FnMut(&Id, &Arc), + F: Fn(u64, &[ActiveSubmission], &Id) -> usize, + T: FnMut(&Id, &Arc), { let mut removed_resources = Vec::new(); resources_map.retain(|&id, resource| { let submit_index = resource.as_info().submission_index(); + let mut count = 1; + count += count_fn(submit_index, active, &id); + count += registry.contains(id) as usize; + let non_referenced_resources = active .iter_mut() .find(|a| a.index == submit_index) .map_or(&mut *free_resources, |a| &mut a.last_resources); - - let mut count = 1; - count += registry.contains(id) as usize; count += non_referenced_resources.contains::(&id) as usize; + let is_removed = trackers.remove_abandoned(id, count); if is_removed { - f(&id, resource); + on_remove(&id, resource); removed_resources.push(resource.clone()); non_referenced_resources.insert(id, resource.clone()); } @@ -467,6 +471,7 @@ impl LifetimeTracker { &mut self.free_resources, &mut trackers.bundles, &hub.render_bundles, + |_submit_index, _active, _id| 0, |_bundle_id, _bundle| { #[cfg(feature = "trace")] if let Some(ref mut t) = *trace { @@ -476,19 +481,19 @@ impl LifetimeTracker { ); removed_resources.drain(..).for_each(|bundle| { for v in bundle.used.buffers.write().drain_resources() { - self.suspected_resources.insert(v.as_info().id(), v.clone()); + self.suspected_resources.insert(v.as_info().id(), v); } for v in bundle.used.textures.write().drain_resources() { - self.suspected_resources.insert(v.as_info().id(), v.clone()); + self.suspected_resources.insert(v.as_info().id(), v); } for v in bundle.used.bind_groups.write().drain_resources() { - self.suspected_resources.insert(v.as_info().id(), v.clone()); + self.suspected_resources.insert(v.as_info().id(), v); } for v in bundle.used.render_pipelines.write().drain_resources() { - self.suspected_resources.insert(v.as_info().id(), v.clone()); + self.suspected_resources.insert(v.as_info().id(), v); } for v in bundle.used.query_sets.write().drain_resources() { - self.suspected_resources.insert(v.as_info().id(), v.clone()); + self.suspected_resources.insert(v.as_info().id(), v); } }); self @@ -508,6 +513,7 @@ impl LifetimeTracker { &mut self.free_resources, &mut trackers.bind_groups, &hub.bind_groups, + |_submit_index, _active, _id| 0, |_bind_group_id, _bind_group| { #[cfg(feature = "trace")] if let Some(ref mut t) = *trace { @@ -528,6 +534,10 @@ impl LifetimeTracker { for v in bind_group.used.samplers.drain_resources() { self.suspected_resources.insert(v.as_info().id(), v); } + //Releasing safely unused resources to decrement refcount + bind_group.used_buffer_ranges.write().clear(); + bind_group.used_texture_ranges.write().clear(); + bind_group.dynamic_binding_info.write().clear(); self.suspected_resources .insert(bind_group.layout.as_info().id(), bind_group.layout.clone()); @@ -549,6 +559,7 @@ impl LifetimeTracker { &mut self.free_resources, &mut trackers.views, &hub.texture_views, + |_submit_index, _active, _id| 0, |_texture_view_id, _texture_view| { #[cfg(feature = "trace")] if let Some(ref mut t) = *trace { @@ -580,6 +591,7 @@ impl LifetimeTracker { &mut self.free_resources, &mut trackers.textures, &hub.textures, + |_submit_index, _active, _id| 0, |_texture_id, _texture| { #[cfg(feature = "trace")] if let Some(ref mut t) = *trace { @@ -604,6 +616,7 @@ impl LifetimeTracker { &mut self.free_resources, &mut trackers.samplers, &hub.samplers, + |_submit_index, _active, _id| 0, |_sampler_id, _sampler| { #[cfg(feature = "trace")] if let Some(ref mut t) = *trace { @@ -628,6 +641,19 @@ impl LifetimeTracker { &mut self.free_resources, &mut trackers.buffers, &hub.buffers, + |submit_index, active, buffer_id| { + let mut count = 0; + let mapped = active + .iter() + .find(|a| a.index == submit_index) + .map_or(&self.mapped, |a| &a.mapped); + mapped.iter().for_each(|b| { + if b.as_info().id() == *buffer_id { + count += 1; + } + }); + count + }, |_buffer_id, _buffer| { #[cfg(feature = "trace")] if let Some(ref mut t) = *trace { @@ -661,6 +687,7 @@ impl LifetimeTracker { &mut self.free_resources, &mut trackers.compute_pipelines, &hub.compute_pipelines, + |_submit_index, _active, _id| 0, |_compute_pipeline_id, _compute_pipeline| { #[cfg(feature = "trace")] if let Some(ref mut t) = *trace { @@ -691,6 +718,7 @@ impl LifetimeTracker { &mut self.free_resources, &mut trackers.render_pipelines, &hub.render_pipelines, + |_submit_index, _active, _id| 0, |_render_pipeline_id, _render_pipeline| { #[cfg(feature = "trace")] if let Some(ref mut t) = *trace { @@ -767,6 +795,7 @@ impl LifetimeTracker { &mut self.free_resources, &mut trackers.query_sets, &hub.query_sets, + |_submit_index, _active, _id| 0, |_query_set_id, _query_set| {}, ); self diff --git a/wgpu-core/src/device/resource.rs b/wgpu-core/src/device/resource.rs index 50a25a203c..c6a3514703 100644 --- a/wgpu-core/src/device/resource.rs +++ b/wgpu-core/src/device/resource.rs @@ -2049,9 +2049,9 @@ impl Device { layout: layout.clone(), info: ResourceInfo::new(desc.label.borrow_or_default()), used, - used_buffer_ranges, - used_texture_ranges, - dynamic_binding_info, + used_buffer_ranges: RwLock::new(used_buffer_ranges), + used_texture_ranges: RwLock::new(used_texture_ranges), + dynamic_binding_info: RwLock::new(dynamic_binding_info), // collect in the order of BGL iteration late_buffer_binding_sizes: layout .entries From d4a86898507819dcecca2e5ef4548d95b13e5fb3 Mon Sep 17 00:00:00 2001 From: gents83 Date: Sun, 1 Oct 2023 11:36:03 +0200 Subject: [PATCH 111/132] Resolve merge conflicts --- wgpu-core/src/command/clear.rs | 2 +- wgpu-core/src/command/compute.rs | 4 +- wgpu-core/src/command/render.rs | 4 +- wgpu-core/src/command/transfer.rs | 20 ++-- wgpu-core/src/device/global.rs | 168 ++++++++++++------------------ wgpu-core/src/device/resource.rs | 14 +-- wgpu-core/src/instance.rs | 2 +- wgpu-core/src/present.rs | 10 +- wgpu/src/backend/web.rs | 2 +- wgpu/src/context.rs | 2 +- 10 files changed, 96 insertions(+), 132 deletions(-) diff --git a/wgpu-core/src/command/clear.rs b/wgpu-core/src/command/clear.rs index b99887e058..91e796f338 100644 --- a/wgpu-core/src/command/clear.rs +++ b/wgpu-core/src/command/clear.rs @@ -225,7 +225,7 @@ impl Global { let device = &cmd_buf.device; if !device.is_valid() { - return Err(ClearError::InvalidDevice(cmd_buf.device_id.value.0)); + return Err(ClearError::InvalidDevice(cmd_buf.device.as_info().id())); } let (encoder, tracker) = cmd_buf_data.open_encoder_and_tracker(); diff --git a/wgpu-core/src/command/compute.rs b/wgpu-core/src/command/compute.rs index 877ef19031..8e8d2bb455 100644 --- a/wgpu-core/src/command/compute.rs +++ b/wgpu-core/src/command/compute.rs @@ -371,11 +371,11 @@ impl Global { let device = &cmd_buf.device; if !device.is_valid() { return Err(ComputePassErrorInner::InvalidDevice( - cmd_buf.device_id.value.0, + cmd_buf.device.as_info().id(), )) .map_pass_err(init_scope); } - + let mut cmd_buf_data = cmd_buf.data.lock(); let cmd_buf_data = cmd_buf_data.as_mut().unwrap(); diff --git a/wgpu-core/src/command/render.rs b/wgpu-core/src/command/render.rs index 284d61e33f..02704b3e5e 100644 --- a/wgpu-core/src/command/render.rs +++ b/wgpu-core/src/command/render.rs @@ -1307,11 +1307,11 @@ impl Global { occlusion_query_set_id, }); } - + let device = &cmd_buf.device; if !device.is_valid() { return Err(RenderPassErrorInner::InvalidDevice( - cmd_buf.device_id.value.0, + cmd_buf.device.as_info().id(), )) .map_pass_err(init_scope); } diff --git a/wgpu-core/src/command/transfer.rs b/wgpu-core/src/command/transfer.rs index 4afbc97ed8..9e24e16c4e 100644 --- a/wgpu-core/src/command/transfer.rs +++ b/wgpu-core/src/command/transfer.rs @@ -7,13 +7,13 @@ use crate::{ error::{ErrorFormatter, PrettyError}, global::Global, hal_api::HalApi, - id::{BufferId, CommandEncoderId, DeviceId, TextureId, Valid}, + id::{BufferId, CommandEncoderId, DeviceId, TextureId}, identity::GlobalIdentityHandlerFactory, init_tracker::{ has_copy_partial_init_tracker_coverage, MemoryInitKind, TextureInitRange, TextureInitTrackerAction, }, - resource::{Texture, TextureErrorDimension}, + resource::{Resource, Texture, TextureErrorDimension}, storage::Storage, track::{TextureSelector, Tracker}, }; @@ -579,7 +579,7 @@ impl Global { let device = &cmd_buf.device; if !device.is_valid() { - return Err(TransferError::InvalidDevice(cmd_buf.device_id.value.0).into()); + return Err(TransferError::InvalidDevice(cmd_buf.device.as_info().id()).into()); } #[cfg(feature = "trace")] @@ -733,9 +733,9 @@ impl Global { let cmd_buf = CommandBuffer::get_encoder(hub, command_encoder_id)?; let device = &cmd_buf.device; if !device.is_valid() { - return Err(TransferError::InvalidDevice(cmd_buf.device_id.value.0).into()); + return Err(TransferError::InvalidDevice(cmd_buf.device.as_info().id()).into()); } - + let mut cmd_buf_data = cmd_buf.data.lock(); let cmd_buf_data = cmd_buf_data.as_mut().unwrap(); @@ -891,9 +891,9 @@ impl Global { let cmd_buf = CommandBuffer::get_encoder(hub, command_encoder_id)?; let device = &cmd_buf.device; if !device.is_valid() { - return Err(TransferError::InvalidDevice(cmd_buf.device_id.value.0).into()); + return Err(TransferError::InvalidDevice(cmd_buf.device.as_info().id()).into()); } - + let mut cmd_buf_data = cmd_buf.data.lock(); let cmd_buf_data = cmd_buf_data.as_mut().unwrap(); @@ -912,7 +912,6 @@ impl Global { let texture_guard = hub.textures.read(); - if copy_size.width == 0 || copy_size.height == 0 || copy_size.depth_or_array_layers == 0 { log::trace!("Ignoring copy_texture_to_buffer of size 0"); return Ok(()); @@ -1062,9 +1061,9 @@ impl Global { let cmd_buf = CommandBuffer::get_encoder(hub, command_encoder_id)?; let device = &cmd_buf.device; if !device.is_valid() { - return Err(TransferError::InvalidDevice(cmd_buf.device_id.value.0).into()); + return Err(TransferError::InvalidDevice(cmd_buf.device.as_info().id()).into()); } - + let mut cmd_buf_data = cmd_buf.data.lock(); let cmd_buf_data = cmd_buf_data.as_mut().unwrap(); @@ -1082,7 +1081,6 @@ impl Global { let texture_guard = hub.textures.read(); - if copy_size.width == 0 || copy_size.height == 0 || copy_size.depth_or_array_layers == 0 { log::trace!("Ignoring copy_texture_to_texture of size 0"); return Ok(()); diff --git a/wgpu-core/src/device/global.rs b/wgpu-core/src/device/global.rs index 4d4e5ac2f8..d7817aff88 100644 --- a/wgpu-core/src/device/global.rs +++ b/wgpu-core/src/device/global.rs @@ -95,9 +95,9 @@ impl Global { device_id: DeviceId, ) -> Result { let hub = A::hub(self); - + let device = hub.devices.get(device_id).map_err(|_| InvalidDevice)?; - if !device.valid { + if !device.is_valid() { return Err(InvalidDevice); } @@ -109,9 +109,9 @@ impl Global { device_id: DeviceId, ) -> Result { let hub = A::hub(self); - + let device = hub.devices.get(device_id).map_err(|_| InvalidDevice)?; - if !device.valid { + if !device.is_valid() { return Err(InvalidDevice); } @@ -123,9 +123,9 @@ impl Global { device_id: DeviceId, ) -> Result { let hub = A::hub(self); - + let device = hub.devices.get(device_id).map_err(|_| InvalidDevice)?; - if !device.valid { + if !device.is_valid() { return Err(InvalidDevice); } @@ -144,15 +144,18 @@ impl Global { let fid = hub.buffers.prepare::(id_in); let device = match hub.devices.get(device_id) { - Ok(device) => device, + Ok(device) => { + if !device.is_valid() { + let id = fid.assign_error(desc.label.borrow_or_default()); + return (id, Some(DeviceError::Invalid.into())); + } + device + } Err(_) => { let id = fid.assign_error(desc.label.borrow_or_default()); return (id, Some(DeviceError::Invalid.into())); } }; - if !device.valid { - break DeviceError::Invalid.into(); - } if desc.usage.is_empty() { // Per spec, `usage` must not be zero. @@ -475,7 +478,7 @@ impl Global { pub fn buffer_drop(&self, buffer_id: id::BufferId, wait: bool) { profiling::scope!("Buffer::drop"); - + log::debug!("Buffer {:?} is asked to be dropped", buffer_id); let hub = A::hub(self); @@ -531,7 +534,7 @@ impl Global { Ok(device) => device, Err(_) => break DeviceError::Invalid.into(), }; - if !device.valid { + if !device.is_valid() { break DeviceError::Invalid.into(); } #[cfg(feature = "trace")] @@ -543,7 +546,7 @@ impl Global { Ok(texture) => texture, Err(error) => break error, }; - + let (id, resource) = fid.assign(texture); log::info!("Created Texture {:?} with {:?}", id, desc); @@ -583,7 +586,7 @@ impl Global { Ok(device) => device, Err(_) => break DeviceError::Invalid.into(), }; - if !device.valid { + if !device.is_valid() { break DeviceError::Invalid.into(); } @@ -656,7 +659,7 @@ impl Global { Ok(device) => device, Err(_) => break DeviceError::Invalid.into(), }; - if !device.valid { + if !device.is_valid() { break DeviceError::Invalid.into(); } @@ -735,7 +738,7 @@ impl Global { pub fn texture_drop(&self, texture_id: id::TextureId, wait: bool) { profiling::scope!("Texture::drop"); - + log::debug!("Texture {:?} is asked to be dropped", texture_id); let hub = A::hub(self); @@ -803,7 +806,7 @@ impl Global { Ok(view) => view, Err(e) => break e, }; - + let (id, resource) = fid.assign(view); log::info!("Created TextureView {:?}", id); device.trackers.lock().views.insert_single(id, resource); @@ -825,7 +828,7 @@ impl Global { wait: bool, ) -> Result<(), resource::TextureViewDestroyError> { profiling::scope!("TextureView::drop"); - + log::debug!("TextureView {:?} is asked to be dropped", texture_view_id); let hub = A::hub(self); @@ -868,7 +871,7 @@ impl Global { Ok(device) => device, Err(_) => break DeviceError::Invalid.into(), }; - if !device.valid { + if !device.is_valid() { break DeviceError::Invalid.into(); } @@ -885,7 +888,7 @@ impl Global { let (id, resource) = fid.assign(sampler); log::info!("Created Sampler {:?}", id); device.trackers.lock().samplers.insert_single(id, resource); - + return (id, None); }; @@ -931,7 +934,7 @@ impl Global { Ok(device) => device, Err(_) => break DeviceError::Invalid.into(), }; - if !device.valid { + if !device.is_valid() { break DeviceError::Invalid.into(); } @@ -972,12 +975,11 @@ impl Global { }; layout.compatible_layout = compatible_layout; - let (id, _) = fid.assign(layout); - if let Some(dupe) = compatible_layout { - log::info!("Created BindGroupLayout (duplicate of {dupe:?}) -> {:?}", id); - log::trace!( - "Device::create_bind_group_layout (duplicate of {dupe:?}) -> {:?}", - id.0 + let (id, layout) = fid.assign(layout); + if let Some(dupe) = layout.compatible_layout.as_ref() { + log::info!( + "Created BindGroupLayout (duplicate of {dupe:?}) -> {:?}", + id ); } else { log::info!("Created BindGroupLayout {:?}", id); @@ -996,7 +998,7 @@ impl Global { pub fn bind_group_layout_drop(&self, bind_group_layout_id: id::BindGroupLayoutId) { profiling::scope!("BindGroupLayout::drop"); - + log::debug!( "BindGroupLayout {:?} is asked to be dropped", bind_group_layout_id @@ -1032,7 +1034,7 @@ impl Global { Ok(device) => device, Err(_) => break DeviceError::Invalid.into(), }; - if !device.valid { + if !device.is_valid() { break DeviceError::Invalid.into(); } @@ -1064,7 +1066,7 @@ impl Global { pub fn pipeline_layout_drop(&self, pipeline_layout_id: id::PipelineLayoutId) { profiling::scope!("PipelineLayout::drop"); - + log::debug!( "PipelineLayout {:?} is asked to be dropped", pipeline_layout_id @@ -1096,7 +1098,7 @@ impl Global { Ok(device) => device, Err(_) => break DeviceError::Invalid.into(), }; - if !device.valid { + if !device.is_valid() { break DeviceError::Invalid.into(); } @@ -1141,7 +1143,7 @@ impl Global { pub fn bind_group_drop(&self, bind_group_id: id::BindGroupId) { profiling::scope!("BindGroup::drop"); - + log::debug!("BindGroup {:?} is asked to be dropped", bind_group_id); let hub = A::hub(self); @@ -1175,7 +1177,7 @@ impl Global { Ok(device) => device, Err(_) => break DeviceError::Invalid.into(), }; - if !device.valid { + if !device.is_valid() { break DeviceError::Invalid.into(); } @@ -1207,7 +1209,7 @@ impl Global { Ok(shader) => shader, Err(e) => break e, }; - + let (id, _) = fid.assign(shader); log::info!("Created ShaderModule {:?} with {:?}", id, desc); return (id, None); @@ -1243,7 +1245,7 @@ impl Global { Ok(device) => device, Err(_) => break DeviceError::Invalid.into(), }; - if !device.valid { + if !device.is_valid() { break DeviceError::Invalid.into(); } @@ -1278,9 +1280,9 @@ impl Global { pub fn shader_module_drop(&self, shader_module_id: id::ShaderModuleId) { profiling::scope!("ShaderModule::drop"); - + log::debug!("ShaderModule {:?} is asked to be dropped", shader_module_id); - + let hub = A::hub(self); hub.shader_modules.unregister(shader_module_id); } @@ -1301,7 +1303,7 @@ impl Global { Ok(device) => device, Err(_) => break DeviceError::Invalid, }; - if !device.valid { + if !device.is_valid() { break DeviceError::Invalid; } let queue = match hub.queues.get(device.queue_id.read().unwrap()) { @@ -1341,7 +1343,7 @@ impl Global { pub fn command_encoder_drop(&self, command_encoder_id: id::CommandEncoderId) { profiling::scope!("CommandEncoder::drop"); - + log::debug!( "CommandEncoder {:?} is asked to be dropped", command_encoder_id @@ -1358,7 +1360,7 @@ impl Global { pub fn command_buffer_drop(&self, command_buffer_id: id::CommandBufferId) { profiling::scope!("CommandBuffer::drop"); - + log::debug!( "CommandBuffer {:?} is asked to be dropped", command_buffer_id @@ -1400,7 +1402,7 @@ impl Global { Ok(device) => device, Err(_) => break command::RenderBundleError::INVALID_DEVICE, }; - if !device.valid { + if !device.is_valid() { break command::RenderBundleError::INVALID_DEVICE; } @@ -1439,9 +1441,9 @@ impl Global { pub fn render_bundle_drop(&self, render_bundle_id: id::RenderBundleId) { profiling::scope!("RenderBundle::drop"); - + log::debug!("RenderBundle {:?} is asked to be dropped", render_bundle_id); - + let hub = A::hub(self); if let Some(bundle) = hub.render_bundles.unregister(render_bundle_id) { @@ -1469,7 +1471,7 @@ impl Global { Ok(device) => device, Err(_) => break DeviceError::Invalid.into(), }; - if !device.valid { + if !device.is_valid() { break DeviceError::Invalid.into(); } @@ -1497,13 +1499,13 @@ impl Global { return (id, None); }; - let id = fid.assign_error(""); + let id = fid.assign_error(""); (id, Some(error)) } pub fn query_set_drop(&self, query_set_id: id::QuerySetId) { profiling::scope!("QuerySet::drop"); - + log::debug!("QuerySet {:?} is asked to be dropped", query_set_id); let hub = A::hub(self); @@ -1549,7 +1551,7 @@ impl Global { Ok(device) => device, Err(_) => break DeviceError::Invalid.into(), }; - if !device.valid { + if !device.is_valid() { break DeviceError::Invalid.into(); } #[cfg(feature = "trace")] @@ -1566,7 +1568,7 @@ impl Global { Ok(pair) => pair, Err(e) => break e, }; - + let (id, resource) = fid.assign(pipeline); log::info!("Created RenderPipeline {:?} with {:?}", id, desc); @@ -1625,12 +1627,12 @@ impl Global { pub fn render_pipeline_drop(&self, render_pipeline_id: id::RenderPipelineId) { profiling::scope!("RenderPipeline::drop"); - + log::debug!( "RenderPipeline {:?} is asked to be dropped", render_pipeline_id ); - + let hub = A::hub(self); if let Some(pipeline) = hub.render_pipelines.unregister(render_pipeline_id) { @@ -1669,7 +1671,7 @@ impl Global { Ok(device) => device, Err(_) => break DeviceError::Invalid.into(), }; - if !device.valid { + if !device.is_valid() { break DeviceError::Invalid.into(); } @@ -1747,12 +1749,12 @@ impl Global { pub fn compute_pipeline_drop(&self, compute_pipeline_id: id::ComputePipelineId) { profiling::scope!("ComputePipeline::drop"); - + log::debug!( "ComputePipeline {:?} is asked to be dropped", compute_pipeline_id ); - + let hub = A::hub(self); if let Some(pipeline) = hub.compute_pipelines.unregister(compute_pipeline_id) { @@ -1898,7 +1900,7 @@ impl Global { Ok(device) => device, Err(_) => break DeviceError::Invalid.into(), }; - if !device.valid { + if !device.is_valid() { break DeviceError::Invalid.into(); } @@ -2159,9 +2161,9 @@ impl Global { log::trace!("Device::start_capture"); let hub = A::hub(self); - + if let Ok(device) = hub.devices.get(id) { - if !device.valid { + if !device.is_valid() { return; } unsafe { device.raw().start_capture() }; @@ -2172,9 +2174,9 @@ impl Global { log::trace!("Device::stop_capture"); let hub = A::hub(self); - + if let Ok(device) = hub.devices.get(id) { - if !device.valid { + if !device.is_valid() { return; } unsafe { device.raw().stop_capture() }; @@ -2183,7 +2185,7 @@ impl Global { pub fn device_drop(&self, device_id: DeviceId) { profiling::scope!("Device::drop"); - + log::debug!("Device {:?} is asked to be dropped", device_id); let hub = A::hub(self); @@ -2202,22 +2204,20 @@ impl Global { drop(device); } } - + pub fn device_destroy(&self, device_id: DeviceId) { log::trace!("Device::destroy {device_id:?}"); let hub = A::hub(self); - let mut token = Token::root(); - let (mut device_guard, _) = hub.devices.write(&mut token); - if let Ok(device) = device_guard.get_mut(device_id) { + if let Ok(device) = hub.devices.get(device_id) { // Follow the steps at // https://gpuweb.github.io/gpuweb/#dom-gpudevice-destroy. // It's legal to call destroy multiple times, but if the device // is already invalid, there's nothing more to do. There's also // no need to return an error. - if !device.valid { + if !device.is_valid() { return; } @@ -2239,45 +2239,11 @@ impl Global { log::trace!("Device::lose {device_id:?}"); let hub = A::hub(self); - let mut token = Token::root(); - - let (mut device_guard, _) = hub.devices.write(&mut token); - if let Ok(device) = device_guard.get_mut(device_id) { + if let Ok(device) = hub.devices.get(device_id) { device.lose(reason); } } - /// Exit the unreferenced, inactive device `device_id`. - fn exit_device(&self, device_id: DeviceId) { - let hub = A::hub(self); - let mut token = Token::root(); - let mut free_adapter_id = None; - { - let (device, mut _token) = hub.devices.unregister(device_id, &mut token); - if let Some(mut device) = device { - // The things `Device::prepare_to_die` takes care are mostly - // unnecessary here. We know our queue is empty, so we don't - // need to wait for submissions or triage them. We know we were - // just polled, so `life_tracker.free_resources` is empty. - debug_assert!(device.lock_life(&mut _token).queue_empty()); - device.pending_writes.deactivate(); - - // Adapter is only referenced by the device and itself. - // This isn't a robust way to destroy them, we should find a better one. - if device.adapter_id.ref_count.load() == 1 { - free_adapter_id = Some(device.adapter_id.value.0); - } - - device.dispose(); - } - } - - let hub = A::hub(self); - if let Some(queue) = hub.queues.unregister(queue_id) { - drop(queue); - } - } - pub fn queue_drop(&self, queue_id: QueueId) { profiling::scope!("Queue::drop"); log::debug!("Queue {:?} is asked to be dropped", queue_id); @@ -2343,8 +2309,8 @@ impl Global { } }; - let device = &device_guard[buffer.device_id.value]; - if !device.valid { + let device = &buffer.device; + if !device.is_valid() { return Err((op, BufferAccessError::Invalid)); } diff --git a/wgpu-core/src/device/resource.rs b/wgpu-core/src/device/resource.rs index 70e13d441f..20c4cc67df 100644 --- a/wgpu-core/src/device/resource.rs +++ b/wgpu-core/src/device/resource.rs @@ -46,7 +46,7 @@ use std::{ iter, num::NonZeroU32, sync::{ - atomic::{AtomicU64, Ordering}, + atomic::{AtomicBool, AtomicU64, Ordering}, Arc, }, }; @@ -107,7 +107,7 @@ pub struct Device { /// Error enums, we wouldn't need this. For now, we need it. All the call /// sites where we check it are areas that should be revisited if we start /// using ref-counted references for internal access. - pub(crate) valid: bool, + pub(crate) valid: AtomicBool, /// All live resources allocated with this [`Device`]. /// @@ -252,7 +252,7 @@ impl Device { command_allocator: Mutex::new(Some(com_alloc)), active_submission_index: AtomicU64::new(0), fence: RwLock::new(Some(fence)), - valid: true, + valid: AtomicBool::new(true), trackers: Mutex::new(Tracker::new()), life_tracker: Mutex::new(life::LifetimeTracker::new()), temp_suspected: Mutex::new(Some(life::ResourceMaps::new::())), @@ -279,9 +279,9 @@ impl Device { } pub fn is_valid(&self) -> bool { - self.valid + self.valid.load(Ordering::Acquire) } - + pub(crate) fn release_queue(&self, queue: A::Queue) { self.queue_to_drop.write().replace(queue); } @@ -3175,12 +3175,12 @@ impl Device { }) } - pub(crate) fn lose(&mut self, _reason: Option<&str>) { + pub(crate) fn lose(&self, _reason: Option<&str>) { // Follow the steps at https://gpuweb.github.io/gpuweb/#lose-the-device. // Mark the device explicitly as invalid. This is checked in various // places to prevent new work from being submitted. - self.valid = false; + self.valid.store(false, Ordering::Release); // The following steps remain in "lose the device": // 1) Resolve the GPUDevice device.lost promise. diff --git a/wgpu-core/src/instance.rs b/wgpu-core/src/instance.rs index c19dfd1eb7..12573c1ef2 100644 --- a/wgpu-core/src/instance.rs +++ b/wgpu-core/src/instance.rs @@ -723,7 +723,7 @@ impl Global { pub fn surface_drop(&self, id: SurfaceId) { profiling::scope!("Surface::drop"); - + log::info!("Surface::drop {id:?}"); fn unconfigure( diff --git a/wgpu-core/src/present.rs b/wgpu-core/src/present.rs index d84a5a2099..3b4df90b53 100644 --- a/wgpu-core/src/present.rs +++ b/wgpu-core/src/present.rs @@ -136,11 +136,11 @@ impl Global { let (device, config) = if let Some(ref present) = *surface.presentation.lock() { match present.device.downcast_clone::() { Some(device) => { - if !device.is_valid() { - return Err(DeviceError::Invalid.into()); - } - (device, present.config.clone()) - }, + if !device.is_valid() { + return Err(DeviceError::Invalid.into()); + } + (device, present.config.clone()) + } None => return Err(SurfaceError::NotConfigured), } } else { diff --git a/wgpu/src/backend/web.rs b/wgpu/src/backend/web.rs index e627cd6747..7f0a6db6aa 100644 --- a/wgpu/src/backend/web.rs +++ b/wgpu/src/backend/web.rs @@ -1923,7 +1923,7 @@ impl crate::context::Context for Context { // the device.lost promise, which will require a different invocation pattern // with a callback. } - + fn queue_drop(&self, _queue: &Self::QueueId, _queue_data: &Self::QueueData) { // Queue is dropped automatically } diff --git a/wgpu/src/context.rs b/wgpu/src/context.rs index 5da6ee5463..04b42fe2fc 100644 --- a/wgpu/src/context.rs +++ b/wgpu/src/context.rs @@ -2441,7 +2441,7 @@ where let device_data = downcast_ref(device_data); Context::device_lose(self, &device, device_data) } - + fn queue_drop(&self, queue: &ObjectId, queue_data: &crate::Data) { let queue = ::from(*queue); let queue_data = downcast_ref(queue_data); From af519873c4bd6f86caca60f273fdead0bd7dce4d Mon Sep 17 00:00:00 2001 From: gents83 Date: Sat, 7 Oct 2023 09:31:01 +0200 Subject: [PATCH 112/132] Fixing command order issue --- wgpu-core/src/command/compute.rs | 10 ++++------ wgpu-core/src/command/render.rs | 5 ++--- 2 files changed, 6 insertions(+), 9 deletions(-) diff --git a/wgpu-core/src/command/compute.rs b/wgpu-core/src/command/compute.rs index 8e8d2bb455..1045e4c362 100644 --- a/wgpu-core/src/command/compute.rs +++ b/wgpu-core/src/command/compute.rs @@ -533,16 +533,15 @@ impl Global { .extend(texture_memory_actions.register_init_action(action)); } - let pipeline_layout = &state.binder.pipeline_layout; - let pipeline_layout = pipeline_layout.as_ref().unwrap().clone(); + let pipeline_layout = state.binder.pipeline_layout.clone(); let entries = state.binder.assign_group( index as usize, bind_group_id, bind_group, &temp_offsets, ); - if !entries.is_empty() { - let pipeline_layout = pipeline_layout.raw(); + if !entries.is_empty() && pipeline_layout.is_some() { + let pipeline_layout = pipeline_layout.as_ref().unwrap().raw(); for (i, e) in entries.iter().enumerate() { let raw_bg = bind_group_guard[*e.group_id.as_ref().unwrap()].raw(); unsafe { @@ -906,8 +905,7 @@ pub mod compute_ffi { } pass.base - .commands - .push(ComputeCommand::SetPipeline(pipeline_id)); + .commands.push(ComputeCommand::SetPipeline(pipeline_id)); } /// # Safety diff --git a/wgpu-core/src/command/render.rs b/wgpu-core/src/command/render.rs index 02704b3e5e..c55515e5cf 100644 --- a/wgpu-core/src/command/render.rs +++ b/wgpu-core/src/command/render.rs @@ -1458,7 +1458,7 @@ impl Global { bind_group, &temp_offsets, ); - if !entries.is_empty() { + if !entries.is_empty() && pipeline_layout.is_some() { let pipeline_layout = pipeline_layout.as_ref().unwrap().raw(); for (i, e) in entries.iter().enumerate() { let raw_bg = bind_group_guard[*e.group_id.as_ref().unwrap()].raw(); @@ -2355,8 +2355,7 @@ pub mod render_ffi { } pass.base - .commands - .push(RenderCommand::SetPipeline(pipeline_id)); + .commands.push(RenderCommand::SetPipeline(pipeline_id)); } #[no_mangle] From 877ef948b95247a750228e90202b8853a3c99d09 Mon Sep 17 00:00:00 2001 From: gents83 Date: Sat, 7 Oct 2023 13:27:22 +0200 Subject: [PATCH 113/132] Fix integration and simply layout groups dedup --- deno_webgpu/error.rs | 4 +- player/tests/data/bind-group.ron | 52 +++++++------ wgpu-core/src/binding_model.rs | 64 +--------------- wgpu-core/src/command/bind.rs | 46 ++++++----- wgpu-core/src/command/compute.rs | 51 +++++++------ wgpu-core/src/command/render.rs | 62 +++++++-------- wgpu-core/src/device/global.rs | 95 ++++++----------------- wgpu-core/src/device/life.rs | 22 +++--- wgpu-core/src/device/queue.rs | 12 ++- wgpu-core/src/device/resource.rs | 127 ++++++++++--------------------- wgpu-core/src/identity.rs | 6 +- wgpu-core/src/registry.rs | 5 +- 12 files changed, 196 insertions(+), 350 deletions(-) diff --git a/deno_webgpu/error.rs b/deno_webgpu/error.rs index b966e5d7cf..6c509a80d3 100644 --- a/deno_webgpu/error.rs +++ b/deno_webgpu/error.rs @@ -104,9 +104,7 @@ impl From for WebGpuError { match err { DeviceError::Lost => WebGpuError::Lost, DeviceError::OutOfMemory => WebGpuError::OutOfMemory, - DeviceError::ResourceCreationFailed - | DeviceError::Invalid - | DeviceError::WrongDevice => WebGpuError::Validation(fmt_err(&err)), + _ => WebGpuError::Validation(fmt_err(&err)), } } } diff --git a/player/tests/data/bind-group.ron b/player/tests/data/bind-group.ron index 00ecf0b20c..7b89d456bc 100644 --- a/player/tests/data/bind-group.ron +++ b/player/tests/data/bind-group.ron @@ -2,30 +2,6 @@ features: 0x0, expectations: [], //not crash! actions: [ - CreatePipelineLayout(Id(0, 1, Empty), ( - label: Some("empty"), - bind_group_layouts: [], - push_constant_ranges: [], - )), - CreateShaderModule( - id: Id(0, 1, Empty), - desc: ( - label: None, - flags: (bits: 3), - ), - data: "empty.wgsl", - ), - CreateComputePipeline( - id: Id(0, 1, Empty), - desc: ( - label: None, - layout: Some(Id(0, 1, Empty)), - stage: ( - module: Id(0, 1, Empty), - entry_point: "main", - ), - ), - ), CreateBuffer(Id(0, 1, Empty), ( label: None, size: 16, @@ -58,16 +34,42 @@ ) ], )), + CreatePipelineLayout(Id(0, 1, Empty), ( + label: Some("empty"), + bind_group_layouts: [ + Id(0, 1, Empty), + ], + push_constant_ranges: [], + )), + CreateShaderModule( + id: Id(0, 1, Empty), + desc: ( + label: None, + flags: (bits: 3), + ), + data: "empty.wgsl", + ), + CreateComputePipeline( + id: Id(0, 1, Empty), + desc: ( + label: None, + layout: Some(Id(0, 1, Empty)), + stage: ( + module: Id(0, 1, Empty), + entry_point: "main", + ), + ), + ), Submit(1, [ RunComputePass( base: ( commands: [ - SetPipeline(Id(0, 1, Empty)), SetBindGroup( index: 0, num_dynamic_offsets: 0, bind_group_id: Id(0, 1, Empty), ), + SetPipeline(Id(0, 1, Empty)), ], dynamic_offsets: [], string_data: [], diff --git a/wgpu-core/src/binding_model.rs b/wgpu-core/src/binding_model.rs index 9c9ada039b..5f973da662 100644 --- a/wgpu-core/src/binding_model.rs +++ b/wgpu-core/src/binding_model.rs @@ -454,21 +454,6 @@ pub type BindGroupLayouts = crate::storage::Storage, BindG pub struct BindGroupLayout { pub(crate) raw: Option, pub(crate) device: Arc>, - // When a layout created and there already exists a compatible layout the new layout - // keeps a reference to the older compatible one. In some places we substitute the - // bind group layout id with its compatible sibling. - // Since this substitution can come at a cost, it is skipped when wgpu-core generates - // its own resource IDs. - pub(crate) inner: BglOrDuplicate, -} - -pub(crate) enum BglOrDuplicate { - Inner(BindGroupLayoutInner), - Duplicate(Arc>), -} - -pub struct BindGroupLayoutInner { - pub(crate) raw: A::BindGroupLayout, pub(crate) entries: BindEntryMap, #[allow(unused)] pub(crate) dynamic_count: usize, @@ -503,7 +488,7 @@ impl Resource for BindGroupLayout { fn label(&self) -> String { #[cfg(debug_assertions)] - return self.as_inner().map_or("", |inner| &inner.label); + return self.label.clone(); #[cfg(not(debug_assertions))] return String::new(); } @@ -512,53 +497,6 @@ impl BindGroupLayout { pub(crate) fn raw(&self) -> &A::BindGroupLayout { self.raw.as_ref().unwrap() } - #[track_caller] - pub(crate) fn assume_deduplicated(&self) -> &BindGroupLayoutInner { - self.as_inner().unwrap() - } - - pub(crate) fn as_inner(&self) -> Option<&BindGroupLayoutInner> { - match self.inner { - BglOrDuplicate::Inner(ref inner) => Some(inner), - BglOrDuplicate::Duplicate(_) => None, - } - } - - pub(crate) fn into_inner(self) -> Option> { - match self.inner { - BglOrDuplicate::Inner(inner) => Some(inner), - BglOrDuplicate::Duplicate(_) => None, - } - } - - pub(crate) fn as_duplicate(&self) -> Option>> { - match self.inner { - BglOrDuplicate::Duplicate(layout) => Some(layout.clone()), - BglOrDuplicate::Inner(_) => None, - } - } -} - -// If a bindgroup needs to be substitued with its compatible equivalent, return the latter. -pub(crate) fn try_get_bind_group_layout( - layouts: &BindGroupLayouts, - id: BindGroupLayoutId, -) -> Option<&Arc>> { - let layout = layouts.get(id).ok()?; - if let BglOrDuplicate::Duplicate(original_layout) = layout.inner { - return Some(&original_layout); - } - Some(layout) -} - -pub(crate) fn get_bind_group_layout( - layouts: &BindGroupLayouts, - id: BindGroupLayoutId, -) -> (BindGroupLayoutId, &Arc>) { - let layout = &layouts[id]; - layout - .as_duplicate() - .map_or((id, layout), |deduped| (deduped, &layouts[deduped])) } #[derive(Clone, Debug, Error)] diff --git a/wgpu-core/src/command/bind.rs b/wgpu-core/src/command/bind.rs index 331f2a56e9..33f1be0a38 100644 --- a/wgpu-core/src/command/bind.rs +++ b/wgpu-core/src/command/bind.rs @@ -6,6 +6,7 @@ use crate::{ hal_api::HalApi, id::BindGroupId, pipeline::LateSizedBufferGroup, + resource::Resource, }; use arrayvec::ArrayVec; @@ -41,9 +42,6 @@ mod compat { if expected_bgl.is_equal(assigned_bgl) { return true; } - if let Some(compatible_bgl) = assigned_bgl.compatible_layout.as_ref() { - return compatible_bgl.is_equal(expected_bgl); - } } return false; } @@ -51,12 +49,7 @@ mod compat { } fn is_incompatible(&self) -> bool { - if let Some(expected_bgl) = self.expected.as_ref() { - if let Some(assigned_bgl) = self.assigned.as_ref() { - return !assigned_bgl.is_equal(expected_bgl); - } - } - true + !self.is_valid() } } @@ -135,9 +128,9 @@ struct LateBufferBinding { bound_size: wgt::BufferAddress, } -#[derive(Debug, Default)] -pub(super) struct EntryPayload { - pub(super) group_id: Option, +#[derive(Debug)] +pub(super) struct EntryPayload { + pub(super) group: Option>>, pub(super) dynamic_offsets: Vec, late_buffer_bindings: Vec, /// Since `LateBufferBinding` may contain information about the bindings @@ -145,9 +138,20 @@ pub(super) struct EntryPayload { pub(super) late_bindings_effective_count: usize, } -impl EntryPayload { +impl Default for EntryPayload { + fn default() -> Self { + Self { + group: None, + dynamic_offsets: Default::default(), + late_buffer_bindings: Default::default(), + late_bindings_effective_count: Default::default(), + } + } +} + +impl EntryPayload { fn reset(&mut self) { - self.group_id = None; + self.group = None; self.dynamic_offsets.clear(); self.late_buffer_bindings.clear(); self.late_bindings_effective_count = 0; @@ -158,7 +162,7 @@ impl EntryPayload { pub(super) struct Binder { pub(super) pipeline_layout: Option>>, manager: compat::BoundBindGroupLayouts, - payloads: [EntryPayload; hal::MAX_BIND_GROUPS], + payloads: [EntryPayload; hal::MAX_BIND_GROUPS], } impl Binder { @@ -181,7 +185,7 @@ impl Binder { &'a mut self, new: &Arc>, late_sized_buffer_groups: &[LateSizedBufferGroup], - ) -> (usize, &'a [EntryPayload]) { + ) -> (usize, &'a [EntryPayload]) { let old_id_opt = self.pipeline_layout.replace(new.clone()); let mut bind_range = self.manager.update_expectations(&new.bind_group_layouts); @@ -221,15 +225,15 @@ impl Binder { pub(super) fn assign_group<'a>( &'a mut self, index: usize, - bind_group_id: BindGroupId, - bind_group: &BindGroup, + bind_group: &Arc>, offsets: &[wgt::DynamicOffset], - ) -> &'a [EntryPayload] { + ) -> &'a [EntryPayload] { + let bind_group_id = bind_group.as_info().id(); log::trace!("\tBinding [{}] = group {:?}", index, bind_group_id); debug_assert_eq!(A::VARIANT, bind_group_id.backend()); let payload = &mut self.payloads[index]; - payload.group_id = Some(bind_group_id); + payload.group = Some(bind_group.clone()); payload.dynamic_offsets.clear(); payload.dynamic_offsets.extend_from_slice(offsets); @@ -261,7 +265,7 @@ impl Binder { let payloads = &self.payloads; self.manager .list_active() - .map(move |index| *payloads[index].group_id.as_ref().unwrap()) + .map(move |index| payloads[index].group.as_ref().unwrap().as_info().id()) } pub(super) fn invalid_mask(&self) -> BindGroupMask { diff --git a/wgpu-core/src/command/compute.rs b/wgpu-core/src/command/compute.rs index 1045e4c362..f3de81adf1 100644 --- a/wgpu-core/src/command/compute.rs +++ b/wgpu-core/src/command/compute.rs @@ -505,7 +505,7 @@ impl Global { ); dynamic_offset_count += num_dynamic_offsets as usize; - let bind_group: &BindGroup = tracker + let bind_group = tracker .bind_groups .add_single(&*bind_group_guard, bind_group_id) .ok_or(ComputePassErrorInner::InvalidBindGroup(bind_group_id)) @@ -534,23 +534,23 @@ impl Global { } let pipeline_layout = state.binder.pipeline_layout.clone(); - let entries = state.binder.assign_group( - index as usize, - bind_group_id, - bind_group, - &temp_offsets, - ); + let entries = + state + .binder + .assign_group(index as usize, bind_group, &temp_offsets); if !entries.is_empty() && pipeline_layout.is_some() { let pipeline_layout = pipeline_layout.as_ref().unwrap().raw(); for (i, e) in entries.iter().enumerate() { - let raw_bg = bind_group_guard[*e.group_id.as_ref().unwrap()].raw(); - unsafe { - raw.set_bind_group( - pipeline_layout, - index + i as u32, - raw_bg, - &e.dynamic_offsets, - ); + if let Some(group) = e.group.as_ref() { + let raw_bg = group.raw(); + unsafe { + raw.set_bind_group( + pipeline_layout, + index + i as u32, + raw_bg, + &e.dynamic_offsets, + ); + } } } } @@ -585,14 +585,16 @@ impl Global { ); if !entries.is_empty() { for (i, e) in entries.iter().enumerate() { - let raw_bg = bind_group_guard[*e.group_id.as_ref().unwrap()].raw(); - unsafe { - raw.set_bind_group( - pipeline.layout.raw(), - start_index as u32 + i as u32, - raw_bg, - &e.dynamic_offsets, - ); + if let Some(group) = e.group.as_ref() { + let raw_bg = group.raw(); + unsafe { + raw.set_bind_group( + pipeline.layout.raw(), + start_index as u32 + i as u32, + raw_bg, + &e.dynamic_offsets, + ); + } } } } @@ -905,7 +907,8 @@ pub mod compute_ffi { } pass.base - .commands.push(ComputeCommand::SetPipeline(pipeline_id)); + .commands + .push(ComputeCommand::SetPipeline(pipeline_id)); } /// # Safety diff --git a/wgpu-core/src/command/render.rs b/wgpu-core/src/command/render.rs index 09f2da70da..d4a8e3fe81 100644 --- a/wgpu-core/src/command/render.rs +++ b/wgpu-core/src/command/render.rs @@ -1422,13 +1422,13 @@ impl Global { ); dynamic_offset_count += num_dynamic_offsets as usize; - let bind_group: &crate::binding_model::BindGroup = tracker + let bind_group = tracker .bind_groups .add_single(&*bind_group_guard, bind_group_id) .ok_or(RenderCommandError::InvalidBindGroup(bind_group_id)) .map_pass_err(scope)?; - if bind_group.device_id.value != device_id { + if bind_group.device.as_info().id() != device.as_info().id() { return Err(DeviceError::WrongDevice).map_pass_err(scope); } @@ -1464,23 +1464,23 @@ impl Global { } let pipeline_layout = state.binder.pipeline_layout.clone(); - let entries = state.binder.assign_group( - index as usize, - bind_group_id, - bind_group, - &temp_offsets, - ); + let entries = + state + .binder + .assign_group(index as usize, bind_group, &temp_offsets); if !entries.is_empty() && pipeline_layout.is_some() { let pipeline_layout = pipeline_layout.as_ref().unwrap().raw(); for (i, e) in entries.iter().enumerate() { - let raw_bg = bind_group_guard[*e.group_id.as_ref().unwrap()].raw(); - unsafe { - raw.set_bind_group( - pipeline_layout, - index + i as u32, - raw_bg, - &e.dynamic_offsets, - ); + if let Some(group) = e.group.as_ref() { + let raw_bg = group.raw(); + unsafe { + raw.set_bind_group( + pipeline_layout, + index + i as u32, + raw_bg, + &e.dynamic_offsets, + ); + } } } } @@ -1497,7 +1497,7 @@ impl Global { .ok_or(RenderCommandError::InvalidPipeline(pipeline_id)) .map_pass_err(scope)?; - if pipeline.device_id.value != device_id { + if pipeline.device.as_info().id() != device.as_info().id() { return Err(DeviceError::WrongDevice).map_pass_err(scope); } @@ -1549,15 +1549,16 @@ impl Global { ); if !entries.is_empty() { for (i, e) in entries.iter().enumerate() { - let raw_bg = - bind_group_guard[*e.group_id.as_ref().unwrap()].raw(); - unsafe { - raw.set_bind_group( - pipeline.layout.raw(), - start_index as u32 + i as u32, - raw_bg, - &e.dynamic_offsets, - ); + if let Some(group) = e.group.as_ref() { + let raw_bg = group.raw(); + unsafe { + raw.set_bind_group( + pipeline.layout.raw(), + start_index as u32 + i as u32, + raw_bg, + &e.dynamic_offsets, + ); + } } } } @@ -1622,7 +1623,7 @@ impl Global { .merge_single(&*buffer_guard, buffer_id, hal::BufferUses::INDEX) .map_pass_err(scope)?; - if buffer.device_id.value != device_id { + if buffer.device.as_info().id() != device.as_info().id() { return Err(DeviceError::WrongDevice).map_pass_err(scope); } @@ -1675,7 +1676,7 @@ impl Global { .merge_single(&*buffer_guard, buffer_id, hal::BufferUses::VERTEX) .map_pass_err(scope)?; - if buffer.device_id.value != device_id { + if buffer.device.as_info().id() != device.as_info().id() { return Err(DeviceError::WrongDevice).map_pass_err(scope); } @@ -2254,7 +2255,7 @@ impl Global { .ok_or(RenderCommandError::InvalidRenderBundle(bundle_id)) .map_pass_err(scope)?; - if bundle.device_id.value != device_id { + if bundle.device.as_info().id() != device.as_info().id() { return Err(DeviceError::WrongDevice).map_pass_err(scope); } @@ -2421,7 +2422,8 @@ pub mod render_ffi { } pass.base - .commands.push(RenderCommand::SetPipeline(pipeline_id)); + .commands + .push(RenderCommand::SetPipeline(pipeline_id)); } #[no_mangle] diff --git a/wgpu-core/src/device/global.rs b/wgpu-core/src/device/global.rs index a1422c7a8f..33d5735bdf 100644 --- a/wgpu-core/src/device/global.rs +++ b/wgpu-core/src/device/global.rs @@ -1,9 +1,8 @@ #[cfg(feature = "trace")] use crate::device::trace; use crate::{ - binding_model::{self, BindGroupLayout}, - command, conv, - device::{life::WaitIdleError, map_buffer, queue, Device, DeviceError, HostMap}, + binding_model, command, conv, + device::{life::WaitIdleError, map_buffer, queue, DeviceError, HostMap}, global::Global, hal_api::HalApi, id::{self, AdapterId, DeviceId, QueueId, SurfaceId}, @@ -177,7 +176,7 @@ impl Global { trace.add(trace::Action::CreateBuffer(fid.id(), desc)); } - let buffer = match device.create_buffer(device_id, desc, false) { + let buffer = match device.create_buffer(desc, false) { Ok(buffer) => buffer, Err(e) => { let id = fid.assign_error(desc.label.borrow_or_default()); @@ -225,7 +224,7 @@ impl Global { usage: wgt::BufferUsages::MAP_WRITE | wgt::BufferUsages::COPY_SRC, mapped_at_creation: false, }; - let stage = match device.create_buffer(device_id, &stage_desc, true) { + let stage = match device.create_buffer(&stage_desc, true) { Ok(stage) => stage, Err(e) => { device @@ -236,7 +235,7 @@ impl Global { return (id, Some(e)); } }; - let stage_fid = hub.buffers.request::(); + let stage_fid = hub.buffers.request(); let stage = stage_fid.init(stage); let mapping = match unsafe { device.raw().map_buffer(stage.raw(), 0..stage.size) } { @@ -543,14 +542,14 @@ impl Global { trace.add(trace::Action::CreateTexture(fid.id(), desc.clone())); } - let texture = match device.create_texture(device_id, &device.adapter, desc) { + let texture = match device.create_texture(&device.adapter, desc) { Ok(texture) => texture, Err(error) => break error, }; let (id, resource) = fid.assign(texture); log::info!("Created Texture {:?} with {:?}", id, desc); - + device.trackers.lock().textures.insert_single( id, resource, @@ -609,7 +608,6 @@ impl Global { let mut texture = device.create_texture_from_hal( hal_texture, conv::map_texture_usage(desc.usage, desc.format.into()), - device_id, desc, format_features, resource::TextureClearMode::None, @@ -671,7 +669,7 @@ impl Global { trace.add(trace::Action::CreateBuffer(fid.id(), desc.clone())); } - let buffer = device.create_buffer_from_hal(hal_buffer, device_id, desc); + let buffer = device.create_buffer_from_hal(hal_buffer, desc); let (id, buffer) = fid.assign(buffer); log::info!("Created buffer {:?} with {:?}", id, desc); @@ -959,45 +957,27 @@ impl Global { } } - let mut compatible_layout = None; - let layout = { + if let Some((id, layout)) = { let bgl_guard = hub.bind_group_layouts.read(); - if let Some((_id, layout)) = - Device::deduplicate_bind_group_layout(device_id, &entry_map, &*bgl_guard) - { - compatible_layout = Some(layout.clone()); - } - - if let Some(original_layout) = compatible_layout { - BindGroupLayout { - device: original_layout.device.clone(), - inner: crate::binding_model::BglOrDuplicate::Duplicate(original_layout.clone()), - } - } else { - match device.create_bind_group_layout( - desc.label.borrow_option(), - entry_map, - ) { - Ok(layout) => layout, - Err(e) => break e, - } - } - }; - - let (id, layout) = fid.assign(layout); - - if let Some(dupe) = compatible_layout { - log::info!( - "Created BindGroupLayout (duplicate of {dupe:?}) -> {:?}", - id - ); - } else { - log::info!("Created BindGroupLayout {:?}", id); + device.deduplicate_bind_group_layout(&entry_map, &*bgl_guard) + } { + log::info!("Reusing BindGroupLayout {layout:?} -> {:?}", id); + let id = fid.assign_existing(&layout); + return (id, None); } + let layout = + match device.create_bind_group_layout(desc.label.borrow_option(), entry_map) { + Ok(layout) => layout, + Err(e) => break e, + }; + + let (id, _layout) = fid.assign(layout); + log::info!("Created BindGroupLayout {:?}", id); return (id, None); }; + let fid = hub.bind_group_layouts.prepare::(id_in); let id = fid.assign_error(desc.label.borrow_or_default()); (id, Some(error)) } @@ -1118,7 +1098,7 @@ impl Global { } let bind_group_layout_guard = hub.bind_group_layouts.read(); - let mut bind_group_layout = match bind_group_layout_guard.get(desc.layout) { + let bind_group_layout = match bind_group_layout_guard.get(desc.layout) { Ok(layout) => layout, Err(..) => break binding_model::CreateBindGroupError::InvalidLayout, }; @@ -1127,12 +1107,6 @@ impl Global { break DeviceError::WrongDevice.into(); } - let mut layout_id = id::Valid(desc.layout); - if let Some(id) = bind_group_layout.as_duplicate() { - layout_id = id; - bind_group_layout = &bind_group_layout_guard[id]; - } - let bind_group = match device.create_bind_group(bind_group_layout, desc, hub) { Ok(bind_group) => bind_group, Err(e) => break e, @@ -1626,25 +1600,6 @@ impl Global { .assign_existing(bg), None => break binding_model::GetBindGroupLayoutError::InvalidGroupIndex(index), }; - - let layout = &bgl_guard[*id]; - layout.multi_ref_count.inc(); - - if G::ids_are_generated_in_wgpu() { - return (id.0, None); - } - - // The ID is provided externally, so we must create a new bind group layout - // with the given ID as a duplicate of the existing one. - let new_layout = BindGroupLayout { - device_id: layout.device_id.clone(), - inner: crate::binding_model::BglOrDuplicate::::Duplicate(*id), - multi_ref_count: crate::MultiRefCount::new(), - }; - - let fid = hub.bind_group_layouts.prepare(id_in); - let id = fid.assign(new_layout, &mut Token::root()); - return (id, None); }; @@ -1969,7 +1924,7 @@ impl Global { if !caps.formats.contains(&config.format) { break 'outer E::UnsupportedFormat { requested: config.format, - available: caps.formats.clone(), + available: caps.formats, }; } if config.format.remove_srgb_suffix() != format.remove_srgb_suffix() { diff --git a/wgpu-core/src/device/life.rs b/wgpu-core/src/device/life.rs index 0f0ea9e3a3..5e4ab5fe7b 100644 --- a/wgpu-core/src/device/life.rs +++ b/wgpu-core/src/device/life.rs @@ -29,7 +29,7 @@ use parking_lot::Mutex; use thiserror::Error; use wgt::{WasmNotSend, WasmNotSync}; -use std::{any::Any, collections::HashMap, sync::Arc}; +use std::{any::Any, sync::Arc}; pub(crate) trait ResourceMap: Any + WasmNotSend + WasmNotSync { fn as_any(&self) -> &dyn Any; @@ -38,7 +38,7 @@ pub(crate) trait ResourceMap: Any + WasmNotSend + WasmNotSync { fn extend_map(&mut self, maps: &mut ResourceMaps); } -impl ResourceMap for HashMap> +impl ResourceMap for FastHashMap> where Id: id::TypedId, R: Resource, @@ -73,21 +73,21 @@ impl ResourceMaps { Id: id::TypedId, R: Resource, { - let map = HashMap::>::default(); + let map = FastHashMap::>::default(); self.maps.insert(R::TYPE, Box::new(map)); self } - fn map(&self) -> &HashMap> + fn map(&self) -> &FastHashMap> where Id: id::TypedId, R: Resource, { let map = self.maps.get(R::TYPE).unwrap(); let any_map = map.as_ref().as_any(); - let map = any_map.downcast_ref::>>().unwrap(); + let map = any_map.downcast_ref::>>().unwrap(); map } - fn map_mut(&mut self) -> &mut HashMap> + fn map_mut(&mut self) -> &mut FastHashMap> where Id: id::TypedId, R: Resource, @@ -95,9 +95,9 @@ impl ResourceMaps { let map = self .maps .entry(R::TYPE) - .or_insert_with(|| Box::>>::new(HashMap::default())); + .or_insert_with(|| Box::>>::default()); let any_map = map.as_mut().as_any_mut(); - let map = any_map.downcast_mut::>>().unwrap(); + let map = any_map.downcast_mut::>>().unwrap(); map } pub(crate) fn new() -> Self { @@ -419,7 +419,7 @@ impl LifetimeTracker { impl LifetimeTracker { fn triage_resources( - resources_map: &mut HashMap>, + resources_map: &mut FastHashMap>, active: &mut [ActiveSubmission], free_resources: &mut ResourceMaps, trackers: &mut impl ResourceTracker, @@ -774,10 +774,8 @@ impl LifetimeTracker { if let Some(ref mut t) = *trace { t.add(trace::Action::DestroyBindGroupLayout(*bind_group_layout_id)); } - if let Some(inner) = lay.into_inner() { self.free_resources - .insert(*bind_group_layout_id, inner.raw.clone()); - } + .insert(*bind_group_layout_id, bind_group_layout.clone()); false }); self diff --git a/wgpu-core/src/device/queue.rs b/wgpu-core/src/device/queue.rs index 15b7fe0052..f765f4b653 100644 --- a/wgpu-core/src/device/queue.rs +++ b/wgpu-core/src/device/queue.rs @@ -418,7 +418,7 @@ impl Global { let mut pending_writes = device.pending_writes.lock(); let pending_writes = pending_writes.as_mut().unwrap(); - let stage_fid = hub.staging_buffers.request::(); + let stage_fid = hub.staging_buffers.request(); let staging_buffer = stage_fid.init(staging_buffer); if let Err(flush_error) = unsafe { @@ -431,7 +431,6 @@ impl Global { } let result = self.queue_write_staging_buffer_impl( - queue_id, device, pending_writes, &staging_buffer, @@ -506,7 +505,6 @@ impl Global { } let result = self.queue_write_staging_buffer_impl( - queue_id, device, pending_writes, &staging_buffer, @@ -595,7 +593,7 @@ impl Global { .as_ref() .ok_or(TransferError::InvalidBuffer(buffer_id))?; - if dst.device_id.value.0 != device_id { + if dst.device.as_info().id() != device.as_info().id() { return Err(DeviceError::WrongDevice.into()); } @@ -679,7 +677,7 @@ impl Global { .get(destination.texture) .map_err(|_| TransferError::InvalidTexture(destination.texture))?; - if dst.device_id.value.0 != queue_id { + if dst.device.as_info().id() != queue_id { return Err(DeviceError::WrongDevice.into()); } @@ -820,7 +818,7 @@ impl Global { // `device.pending_writes.consume`. let (staging_buffer, staging_buffer_ptr) = prepare_staging_buffer(device, stage_size)?; - let stage_fid = hub.staging_buffers.request::(); + let stage_fid = hub.staging_buffers.request(); let staging_buffer = stage_fid.init(staging_buffer); if stage_bytes_per_row == bytes_per_row { @@ -1165,7 +1163,7 @@ impl Global { None => continue, }; - if cmdbuf.device_id.value.0 != queue_id { + if cmdbuf.device.as_info().id() != queue_id { return Err(DeviceError::WrongDevice.into()); } diff --git a/wgpu-core/src/device/resource.rs b/wgpu-core/src/device/resource.rs index ac47bb7353..c288fff0cd 100644 --- a/wgpu-core/src/device/resource.rs +++ b/wgpu-core/src/device/resource.rs @@ -1,11 +1,7 @@ #[cfg(feature = "trace")] use crate::device::trace; use crate::{ - binding_model::{ - self, get_bind_group_layout, try_get_bind_group_layout, BindGroupLayout, - BindGroupLayoutEntryError,BglOrDuplicate, - BindGroupLayoutInner, - }, + binding_model::{self, BindGroupLayout, BindGroupLayoutEntryError}, command, conv, device::life::{LifetimeTracker, WaitIdleError}, device::queue::PendingWrites, @@ -431,11 +427,10 @@ impl Device { pub(crate) fn create_buffer( self: &Arc, - self_id: DeviceId, desc: &resource::BufferDescriptor, transient: bool, ) -> Result, resource::CreateBufferError> { - debug_assert_eq!(self_id.backend(), A::VARIANT); + debug_assert_eq!(self.as_info().id().backend(), A::VARIANT); if desc.size > self.limits.max_buffer_size { return Err(resource::CreateBufferError::MaxBufferSize { @@ -532,12 +527,11 @@ impl Device { self: &Arc, hal_texture: A::Texture, hal_usage: hal::TextureUses, - self_id: DeviceId, desc: &resource::TextureDescriptor, format_features: wgt::TextureFormatFeatures, clear_mode: resource::TextureClearMode, ) -> Texture { - debug_assert_eq!(self_id.backend(), A::VARIANT); + debug_assert_eq!(self.as_info().id().backend(), A::VARIANT); Texture { inner: RwLock::new(Some(resource::TextureInner::Native { @@ -563,10 +557,9 @@ impl Device { pub fn create_buffer_from_hal( self: &Arc, hal_buffer: A::Buffer, - self_id: DeviceId, desc: &resource::BufferDescriptor, ) -> Buffer { - debug_assert_eq!(self_id.backend(), A::VARIANT); + debug_assert_eq!(self.as_info().id().backend(), A::VARIANT); Buffer { raw: Some(hal_buffer), @@ -582,7 +575,6 @@ impl Device { pub(crate) fn create_texture( self: &Arc, - self_id: DeviceId, adapter: &Adapter, desc: &resource::TextureDescriptor, ) -> Result, resource::CreateTextureError> { @@ -800,14 +792,8 @@ impl Device { resource::TextureClearMode::BufferCopy }; - let mut texture = self.create_texture_from_hal( - raw_texture, - hal_usage, - self_id, - desc, - format_features, - clear_mode, - ); + let mut texture = + self.create_texture_from_hal(raw_texture, hal_usage, desc, format_features, clear_mode); texture.hal_usage = hal_usage; Ok(texture) } @@ -1405,19 +1391,16 @@ impl Device { } pub(crate) fn deduplicate_bind_group_layout<'a>( - self_id: DeviceId, + self: &Arc, entry_map: &'a binding_model::BindEntryMap, guard: &'a Storage, id::BindGroupLayoutId>, - ) -> Option<(id::BindGroupLayoutId, &'a Arc>)> { + ) -> Option<(id::BindGroupLayoutId, Arc>)> { guard - .iter(self_id.backend()) + .iter(self.as_info().id().backend()) .find(|&(_, bgl)| { - bgl.device.info.id() == self_id - && bgl - .as_inner() - .map_or(false, |inner| inner.entries == *entry_map) + bgl.device.info.id() == self.as_info().id() && bgl.entries == *entry_map }) - .map(|(id, resource)| (id, resource)) + .map(|(id, resource)| (id, resource.clone())) } pub(crate) fn get_introspection_bind_group_layouts<'a>( @@ -1426,12 +1409,7 @@ impl Device { pipeline_layout .bind_group_layouts .iter() - .map(|&id| { - &get_bind_group_layout(bgl_guard, id) - .1 - .assume_deduplicated() - .entries - }) + .map(|layout| &layout.entries) .collect() } @@ -1448,10 +1426,8 @@ impl Device { .bind_group_layouts .iter() .enumerate() - .map(|(group_index, &bgl_id)| pipeline::LateSizedBufferGroup { - shader_sizes: get_bind_group_layout(bgl_guard, bgl_id) - .1 - .assume_deduplicated() + .map(|(group_index, bgl)| pipeline::LateSizedBufferGroup { + shader_sizes: bgl .entries .values() .filter_map(|entry| match entry.ty { @@ -1668,17 +1644,14 @@ impl Device { raw: Some(raw), device: self.clone(), info: ResourceInfo::new(label.unwrap_or("")), - inner: BglOrDuplicate::Inner(BindGroupLayoutInner { - raw, - dynamic_count: entry_map - .values() - .filter(|b| b.ty.has_dynamic_offset()) - .count(), - count_validator, - entries: entry_map, - #[cfg(debug_assertions)] - label: label.unwrap_or("").to_string(), - }), + dynamic_count: entry_map + .values() + .filter(|b| b.ty.has_dynamic_offset()) + .count(), + count_validator, + entries: entry_map, + #[cfg(debug_assertions)] + label: label.unwrap_or("").to_string(), }) } @@ -1741,10 +1714,6 @@ impl Device { .add_single(storage, bb.buffer_id, internal_use) .ok_or(Error::InvalidBuffer(bb.buffer_id))?; - if buffer.device_id.value.0 != device_id { - return Err(DeviceError::WrongDevice.into()); - } - check_buffer_usage(buffer.usage, pub_usage)?; let raw_buffer = buffer .raw @@ -1836,7 +1805,7 @@ impl Device { texture_id, ))?; - if texture.device_id.value.0 != device_id { + if texture.device.as_info().id() != view.device.as_info().id() { return Err(DeviceError::WrongDevice.into()); } @@ -1870,7 +1839,7 @@ impl Device { // Check that the number of entries in the descriptor matches // the number of entries in the layout. let actual = desc.entries.len(); - let expected = layout.assume_deduplicated().entries.len(); + let expected = layout.entries.len(); if actual != expected { return Err(Error::BindingsNumMismatch { expected, actual }); } @@ -1900,14 +1869,12 @@ impl Device { let binding = entry.binding; // Find the corresponding declaration in the layout let decl = layout - .assume_deduplicated() .entries .get(&binding) .ok_or(Error::MissingBindingDeclaration(binding))?; let (res_index, count) = match entry.resource { Br::Buffer(ref bb) => { let bb = Self::create_buffer_binding( - self_id, bb, binding, decl, @@ -1930,7 +1897,6 @@ impl Device { let res_index = hal_buffers.len(); for bb in bindings_array.iter() { let bb = Self::create_buffer_binding( - self_id, bb, binding, decl, @@ -1953,7 +1919,7 @@ impl Device { .add_single(&*sampler_guard, id) .ok_or(Error::InvalidSampler(id))?; - if sampler.device_id.value.0 != self_id { + if sampler.device.as_info().id() != self.as_info().id() { return Err(DeviceError::WrongDevice.into()); } @@ -2005,7 +1971,7 @@ impl Device { .samplers .add_single(&*sampler_guard, id) .ok_or(Error::InvalidSampler(id))?; - if sampler.device.as_info().id() != self_id { + if sampler.device.as_info().id() != self.as_info().id() { return Err(DeviceError::WrongDevice.into()); } hal_samplers.push(sampler.raw()); @@ -2025,7 +1991,6 @@ impl Device { "SampledTexture, ReadonlyStorageTexture or WriteonlyStorageTexture", )?; Self::create_texture_binding( - self_id, view, internal_use, pub_usage, @@ -2053,7 +2018,6 @@ impl Device { Self::texture_use_parameters(binding, decl, view, "SampledTextureArray, ReadonlyStorageTextureArray or WriteonlyStorageTextureArray")?; Self::create_texture_binding( - self_id, view, internal_use, pub_usage, @@ -2085,10 +2049,9 @@ impl Device { return Err(Error::DuplicateBinding(a.binding)); } } - let layout_inner = layout.assume_deduplicated(); let hal_desc = hal::BindGroupDescriptor { label: desc.label.borrow_option(), - layout: layout_inner.raw(), + layout: layout.raw(), entries: &hal_entries, buffers: &hal_buffers, samplers: &hal_samplers, @@ -2112,7 +2075,7 @@ impl Device { used_texture_ranges: RwLock::new(used_texture_ranges), dynamic_binding_info: RwLock::new(dynamic_binding_info), // collect in the order of BGL iteration - late_buffer_binding_sizes: layout_inner + late_buffer_binding_sizes: layout .entries .keys() .flat_map(|binding| late_buffer_binding_sizes.get(binding).cloned()) @@ -2341,15 +2304,15 @@ impl Device { // validate total resource counts for &id in desc.bind_group_layouts.iter() { - let Some(bind_group_layout) = try_get_bind_group_layout(bgl_guard, id) else { + let Ok(bind_group_layout) = bgl_guard.get(id) else { return Err(Error::InvalidBindGroupLayout(id)); }; - if bind_group_layout.device_id.value.0 != self_id { + if bind_group_layout.device.as_info().id() != self.as_info().id() { return Err(DeviceError::WrongDevice.into()); } - count_validator.merge(&bind_group_layout.assume_deduplicated().count_validator); + count_validator.merge(&bind_group_layout.count_validator); } count_validator .validate(&self.limits) @@ -2358,12 +2321,7 @@ impl Device { let bgl_vec = desc .bind_group_layouts .iter() - .map(|&id| { - try_get_bind_group_layout(bgl_guard, id) - .unwrap() - .assume_deduplicated() - .raw() - }) + .map(|&id| bgl_guard.get(id).unwrap().raw()) .collect::>(); let hal_desc = hal::PipelineLayoutDescriptor { label: desc.label.borrow_option(), @@ -2387,10 +2345,7 @@ impl Device { bind_group_layouts: desc .bind_group_layouts .iter() - .map(|&id| { - let (_, layout) = get_bind_group_layout(bgl_guard, id); - layout.clone() - }) + .map(|&id| bgl_guard.get(id).unwrap().clone()) .collect(), push_constant_ranges: desc.push_constant_ranges.iter().cloned().collect(), }) @@ -2423,11 +2378,7 @@ impl Device { } for (bgl_id, map) in ids.group_ids.iter_mut().zip(derived_group_layouts) { - let bgl = match Device::deduplicate_bind_group_layout( - self.info.id(), - &map, - &bgl_registry.read(), - ) { + let bgl = match self.deduplicate_bind_group_layout(&map, &bgl_registry.read()) { Some((dedup_id, _)) => { *bgl_id = dedup_id; None @@ -2479,7 +2430,7 @@ impl Device { .get(desc.stage.module) .map_err(|_| validation::StageError::InvalidModule)?; - if shader_module.device_id.value.0 != self_id { + if shader_module.device.as_info().id() != self.as_info().id() { return Err(DeviceError::WrongDevice.into()); } @@ -2526,7 +2477,7 @@ impl Device { .get(pipeline_layout_id) .map_err(|_| pipeline::CreateComputePipelineError::InvalidLayout)?; - if layout.device_id.value.0 != self_id { + if layout.device.as_info().id() != self.as_info().id() { return Err(DeviceError::WrongDevice.into()); } @@ -2869,20 +2820,20 @@ impl Device { error: validation::StageError::InvalidModule, } })?; - if shader_module.device.as_info().id() != self_id { + if shader_module.device.as_info().id() != self.as_info().id() { return Err(DeviceError::WrongDevice.into()); } shader_modules.push(shader_module.clone()); let pipeline_layout_guard = hub.pipeline_layouts.read(); - + let provided_layouts = match desc.layout { Some(pipeline_layout_id) => { let pipeline_layout = pipeline_layout_guard .get(pipeline_layout_id) .map_err(|_| pipeline::CreateRenderPipelineError::InvalidLayout)?; - if pipeline_layout.device_id.value.0 != self_id { + if pipeline_layout.device.as_info().id() != self.as_info().id() { return Err(DeviceError::WrongDevice.into()); } diff --git a/wgpu-core/src/identity.rs b/wgpu-core/src/identity.rs index fa7d057dd5..ae83a905c3 100644 --- a/wgpu-core/src/identity.rs +++ b/wgpu-core/src/identity.rs @@ -3,9 +3,9 @@ use wgt::Backend; use crate::{ id::{self}, - Epoch, Index, + Epoch, FastHashMap, Index, }; -use std::{collections::HashMap, fmt::Debug, marker::PhantomData, sync::Arc}; +use std::{fmt::Debug, marker::PhantomData, sync::Arc}; /// A simple structure to allocate [`Id`] identifiers. /// @@ -38,7 +38,7 @@ use std::{collections::HashMap, fmt::Debug, marker::PhantomData, sync::Arc}; pub(super) struct IdentityValues { free: Vec<(Index, Epoch)>, //sorted by Index - used: HashMap>, + used: FastHashMap>, count: usize, } diff --git a/wgpu-core/src/registry.rs b/wgpu-core/src/registry.rs index 310c6814a0..17d538c494 100644 --- a/wgpu-core/src/registry.rs +++ b/wgpu-core/src/registry.rs @@ -113,10 +113,7 @@ impl> Registry { data: &self.storage, } } - pub(crate) fn request(&self) -> FutureId - where - F: IdentityHandlerFactory, - { + pub(crate) fn request(&self) -> FutureId { FutureId { id: self.identity.process(self.backend), identity: self.identity.clone(), From a7de4da0946376abb91eaec1fe9aed5577e14645 Mon Sep 17 00:00:00 2001 From: gents83 Date: Sat, 7 Oct 2023 13:33:56 +0200 Subject: [PATCH 114/132] Fixing clippy doc --- wgpu-core/src/resource.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/wgpu-core/src/resource.rs b/wgpu-core/src/resource.rs index 1da553165a..3f3579bdd8 100644 --- a/wgpu-core/src/resource.rs +++ b/wgpu-core/src/resource.rs @@ -44,7 +44,7 @@ use std::{ /// its latest submission index and label. /// /// A resource may need to be retained for any of several reasons: -/// and any lifetime logic will be handled by Arc refcount +/// and any lifetime logic will be handled by `Arc` refcount /// /// - The user may hold a reference to it (via a `wgpu::Buffer`, say). /// From 5d086723c0047037ea7c954382497eb7777da3ed Mon Sep 17 00:00:00 2001 From: gents83 Date: Sun, 8 Oct 2023 10:35:44 +0200 Subject: [PATCH 115/132] Fix clippy fmt --- wgpu-core/src/device/global.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/wgpu-core/src/device/global.rs b/wgpu-core/src/device/global.rs index 59bdcc1efe..1f5420e480 100644 --- a/wgpu-core/src/device/global.rs +++ b/wgpu-core/src/device/global.rs @@ -372,11 +372,11 @@ impl Global { .devices .get(device_id) .map_err(|_| DeviceError::Invalid)?; - + if !device.is_valid() { return Err(DeviceError::Invalid.into()); } - + let buffer = hub .buffers .get(buffer_id) @@ -435,7 +435,7 @@ impl Global { if !device.is_valid() { return Err(DeviceError::Invalid.into()); } - + let buffer = hub .buffers .get(buffer_id) From b7e518a32927fa91d12ba3e3af52a2c8498314b6 Mon Sep 17 00:00:00 2001 From: gents83 Date: Sun, 8 Oct 2023 12:10:17 +0200 Subject: [PATCH 116/132] Fix wrong checks on is_valid and is_incompatible --- wgpu-core/src/command/bind.rs | 8 +++++--- wgpu-core/src/device/global.rs | 8 +++++--- 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/wgpu-core/src/command/bind.rs b/wgpu-core/src/command/bind.rs index 33f1be0a38..66b95a6df9 100644 --- a/wgpu-core/src/command/bind.rs +++ b/wgpu-core/src/command/bind.rs @@ -37,19 +37,21 @@ mod compat { } fn is_valid(&self) -> bool { + if self.expected.is_none() { + return true; + } if let Some(expected_bgl) = self.expected.as_ref() { if let Some(assigned_bgl) = self.assigned.as_ref() { if expected_bgl.is_equal(assigned_bgl) { return true; } } - return false; } - true + false } fn is_incompatible(&self) -> bool { - !self.is_valid() + self.expected.is_none() || !self.is_valid() } } diff --git a/wgpu-core/src/device/global.rs b/wgpu-core/src/device/global.rs index 1f5420e480..0edd5e2a7a 100644 --- a/wgpu-core/src/device/global.rs +++ b/wgpu-core/src/device/global.rs @@ -1828,7 +1828,7 @@ impl Global { unreachable!("Fallback system failed to choose present mode. This is a bug. Mode: {:?}, Options: {:?}", config.present_mode, &caps.present_modes); }; - log::warn!( + log::info!( "Automatically choosing presentation mode by rule {:?}. Chose {new_mode:?}", config.present_mode ); @@ -1872,7 +1872,7 @@ impl Global { ); }; - log::warn!( + log::info!( "Automatically choosing alpha mode by rule {:?}. Chose {new_alpha_mode:?}", config.composite_alpha_mode ); @@ -2453,7 +2453,9 @@ impl Global { .buffers .get(buffer_id) .map_err(|_| BufferAccessError::Invalid)?; - + if !buffer.device.is_valid() { + return Err(DeviceError::Invalid.into()); + } closure = buffer.buffer_unmap_inner() } From cbf8860cd57225bee5583ccc142473a0be9d3d6c Mon Sep 17 00:00:00 2001 From: gents83 Date: Mon, 9 Oct 2023 18:35:39 +0200 Subject: [PATCH 117/132] Applying cargo fmt --- wgpu-core/src/device/global.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/wgpu-core/src/device/global.rs b/wgpu-core/src/device/global.rs index bb89563ed6..702c37972f 100644 --- a/wgpu-core/src/device/global.rs +++ b/wgpu-core/src/device/global.rs @@ -1892,7 +1892,7 @@ impl Global { // User callbacks must not be called while we are holding locks. let mut user_callbacks = None; - + let surface_guard = self.surfaces.read(); let device_guard = hub.devices.read(); From 804f1da8dcac4d5079e3b47ab05bae2014e84ac1 Mon Sep 17 00:00:00 2001 From: gents83 Date: Mon, 9 Oct 2023 18:40:58 +0200 Subject: [PATCH 118/132] Fix integration issues --- wgpu-core/src/device/global.rs | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/wgpu-core/src/device/global.rs b/wgpu-core/src/device/global.rs index 702c37972f..af111a978b 100644 --- a/wgpu-core/src/device/global.rs +++ b/wgpu-core/src/device/global.rs @@ -1888,21 +1888,14 @@ impl Global { } log::debug!("configuring surface with {:?}", config); - let hub = A::hub(self); // User callbacks must not be called while we are holding locks. let mut user_callbacks = None; - let surface_guard = self.surfaces.read(); - let device_guard = hub.devices.read(); - let error = 'outer: loop { let hub = A::hub(self); - let mut token = Token::root(); - - let (mut surface_guard, mut token) = self.surfaces.write(&mut token); - let (adapter_guard, mut token) = hub.adapters.read(&mut token); - let (device_guard, mut token) = hub.devices.read(&mut token); + let surface_guard = self.surfaces.read(); + let device_guard = hub.devices.read(); let device = match device_guard.get(device_id) { Ok(device) => device, From 41f81714efa2daf18db87c7456e7c8b2f0030298 Mon Sep 17 00:00:00 2001 From: gents83 Date: Thu, 12 Oct 2023 19:03:14 +0200 Subject: [PATCH 119/132] Fix format --- wgpu-core/src/device/global.rs | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/wgpu-core/src/device/global.rs b/wgpu-core/src/device/global.rs index e22258aeeb..2035038dc6 100644 --- a/wgpu-core/src/device/global.rs +++ b/wgpu-core/src/device/global.rs @@ -1889,7 +1889,7 @@ impl Global { log::debug!("configuring surface with {:?}", config); - let error = 'outer: loop { + let error = 'outer: loop { // User callbacks must not be called while we are holding locks. let user_callbacks; { @@ -1972,7 +1972,7 @@ impl Global { if let Err(error) = validate_surface_configuration(&mut hal_config, &caps) { break error; } - + // Wait for all work to finish before configuring the surface. let fence = device.fence.read(); let fence = fence.as_ref().unwrap(); @@ -2007,7 +2007,9 @@ impl Global { Ok(()) => (), Err(error) => { break match error { - hal::SurfaceError::Outdated | hal::SurfaceError::Lost => E::InvalidSurface, + hal::SurfaceError::Outdated | hal::SurfaceError::Lost => { + E::InvalidSurface + } hal::SurfaceError::Device(error) => E::Device(error.into()), hal::SurfaceError::Other(message) => { log::error!("surface configuration failed: {}", message); @@ -2024,10 +2026,10 @@ impl Global { num_frames, acquired_texture: None, }); - } - - user_callbacks.fire(); - return None; + } + + user_callbacks.fire(); + return None; }; Some(error) From ad0109a1e89f8177f75d2bae06f655e2ceda80dc Mon Sep 17 00:00:00 2001 From: gents83 Date: Thu, 12 Oct 2023 19:04:01 +0200 Subject: [PATCH 120/132] Fix integration --- wgpu-hal/src/dx12/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/wgpu-hal/src/dx12/mod.rs b/wgpu-hal/src/dx12/mod.rs index 27e1791631..fa2dd1d3f4 100644 --- a/wgpu-hal/src/dx12/mod.rs +++ b/wgpu-hal/src/dx12/mod.rs @@ -136,7 +136,7 @@ impl Instance { d3d12::ComPtr::from_raw(swap_chain_panel) }), supports_allow_tearing: self.supports_allow_tearing, - swap_chain: None, + swap_chain: RwLock::new(None), } } } From dbde6dd40b3300a7beb15e2260d7e2ac044f3efb Mon Sep 17 00:00:00 2001 From: gents83 Date: Thu, 12 Oct 2023 19:09:07 +0200 Subject: [PATCH 121/132] Fix merge --- wgpu-core/src/device/global.rs | 2 +- wgpu-core/src/instance.rs | 29 +++++++++++++++++------------ 2 files changed, 18 insertions(+), 13 deletions(-) diff --git a/wgpu-core/src/device/global.rs b/wgpu-core/src/device/global.rs index 2035038dc6..5e1bdc9879 100644 --- a/wgpu-core/src/device/global.rs +++ b/wgpu-core/src/device/global.rs @@ -1978,7 +1978,7 @@ impl Global { let fence = fence.as_ref().unwrap(); match device.maintain(hub, fence, wgt::Maintain::Wait) { Ok((closures, _)) => { - user_callbacks = Some(closures); + user_callbacks = closures; } Err(e) => { break e.into(); diff --git a/wgpu-core/src/instance.rs b/wgpu-core/src/instance.rs index b5d5a899be..c20ce97f43 100644 --- a/wgpu-core/src/instance.rs +++ b/wgpu-core/src/instance.rs @@ -728,20 +728,25 @@ impl Global { profiling::scope!("Instance::instance_create_surface_from_swap_chain_panel"); let surface = Surface { - presentation: None, - #[cfg(all(feature = "vulkan", not(target_arch = "wasm32")))] - vulkan: None, - dx12: self.instance.dx12.as_ref().map(|inst| HalSurface { - raw: unsafe { inst.create_surface_from_swap_chain_panel(swap_chain_panel as _) }, - }), - dx11: None, - #[cfg(feature = "gles")] - gl: None, + presentation: Mutex::new(None), + info: ResourceInfo::new(""), + raw: { + let hal_surface: HalSurface = self + .instance + .dx12 + .as_ref() + .map(|inst| HalSurface { + raw: unsafe { + inst.create_surface_from_swap_chain_panel(swap_chain_panel as _) + }, + }) + .unwrap(); + AnySurface::new(hal_surface) + }, }; - let mut token = Token::root(); - let id = self.surfaces.prepare(id_in).assign(surface, &mut token); - id.0 + let (id, _) = self.surfaces.prepare::(id_in).assign(surface); + id } pub fn surface_drop(&self, id: SurfaceId) { From 2af22cdac4c84ed64b1e0a0b65ac4188aafc152f Mon Sep 17 00:00:00 2001 From: gents83 Date: Thu, 12 Oct 2023 19:13:02 +0200 Subject: [PATCH 122/132] Fix compilation --- tests/tests/device.rs | 1 + wgpu-core/src/instance.rs | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/tests/tests/device.rs b/tests/tests/device.rs index f6616e274f..9a02009352 100644 --- a/tests/tests/device.rs +++ b/tests/tests/device.rs @@ -49,6 +49,7 @@ fn device_lifetime_check() { backends: wgpu::util::backend_bits_from_env().unwrap_or(wgpu::Backends::all()), dx12_shader_compiler: wgpu::util::dx12_shader_compiler_from_env().unwrap_or_default(), gles_minor_version: wgpu::util::gles_minor_version_from_env().unwrap_or_default(), + flags: wgpu::InstanceFlags::debugging().with_env(), }); let adapter = wgpu::util::initialize_adapter_from_env_or_default(&instance, None) diff --git a/wgpu-core/src/instance.rs b/wgpu-core/src/instance.rs index c20ce97f43..a6c9e3d047 100644 --- a/wgpu-core/src/instance.rs +++ b/wgpu-core/src/instance.rs @@ -736,9 +736,9 @@ impl Global { .dx12 .as_ref() .map(|inst| HalSurface { - raw: unsafe { + raw: Arc::new(unsafe { inst.create_surface_from_swap_chain_panel(swap_chain_panel as _) - }, + }), }) .unwrap(); AnySurface::new(hal_surface) From 22b14711f6447dd32a80a9faa2504dca7c016e01 Mon Sep 17 00:00:00 2001 From: gents83 Date: Sat, 14 Oct 2023 12:35:35 +0200 Subject: [PATCH 123/132] Fix integration --- wgpu-core/src/device/global.rs | 2 +- wgpu-hal/src/gles/queue.rs | 9 +++++++-- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/wgpu-core/src/device/global.rs b/wgpu-core/src/device/global.rs index d60b142829..f842c4fa33 100644 --- a/wgpu-core/src/device/global.rs +++ b/wgpu-core/src/device/global.rs @@ -2323,7 +2323,7 @@ impl Global { let device = &buffer.device; if !device.is_valid() { - return Err((op, DeviceError::Lost)); + return Err((op, DeviceError::Lost.into())); } if let Err(e) = check_buffer_usage(buffer.usage, pub_usage) { diff --git a/wgpu-hal/src/gles/queue.rs b/wgpu-hal/src/gles/queue.rs index 70ca88bb72..da56d0b2ac 100644 --- a/wgpu-hal/src/gles/queue.rs +++ b/wgpu-hal/src/gles/queue.rs @@ -60,13 +60,18 @@ impl super::Queue { unsafe { gl.draw_buffers(&[glow::COLOR_ATTACHMENT0 + draw_buffer]) }; unsafe { gl.draw_arrays(glow::TRIANGLES, 0, 3) }; - if self.draw_buffer_count != 0 { + let draw_buffer_count = self.draw_buffer_count.load(Ordering::Relaxed); + if draw_buffer_count != 0 { // Reset the draw buffers to what they were before the clear - let indices = (0..self.draw_buffer_count as u32) + let indices = (0..draw_buffer_count as u32) .map(|i| glow::COLOR_ATTACHMENT0 + i) .collect::>(); unsafe { gl.draw_buffers(&indices) }; } + #[cfg(not(target_arch = "wasm32"))] + for draw_buffer in 0..draw_buffer_count as u32 { + unsafe { gl.disable_draw_buffer(glow::BLEND, draw_buffer) }; + } } unsafe fn reset_state(&self, gl: &glow::Context) { From 2fb021850cf33753cd3d8db7692c55b438d7b658 Mon Sep 17 00:00:00 2001 From: gents83 Date: Sat, 14 Oct 2023 12:37:50 +0200 Subject: [PATCH 124/132] Removing wrong merge --- wgpu-hal/src/gles/queue.rs | 4 ---- 1 file changed, 4 deletions(-) diff --git a/wgpu-hal/src/gles/queue.rs b/wgpu-hal/src/gles/queue.rs index da56d0b2ac..a2bd33978d 100644 --- a/wgpu-hal/src/gles/queue.rs +++ b/wgpu-hal/src/gles/queue.rs @@ -68,10 +68,6 @@ impl super::Queue { .collect::>(); unsafe { gl.draw_buffers(&indices) }; } - #[cfg(not(target_arch = "wasm32"))] - for draw_buffer in 0..draw_buffer_count as u32 { - unsafe { gl.disable_draw_buffer(glow::BLEND, draw_buffer) }; - } } unsafe fn reset_state(&self, gl: &glow::Context) { From 12d16eeb8368c44e4daef415eb383831a37af6ce Mon Sep 17 00:00:00 2001 From: gents83 Date: Sun, 15 Oct 2023 19:28:42 +0200 Subject: [PATCH 125/132] Fixing issue on zero_init_texture test --- tests/src/image.rs | 7 ------- tests/tests/zero_init_texture_after_discard.rs | 11 +++++------ wgpu-core/src/command/mod.rs | 2 +- 3 files changed, 6 insertions(+), 14 deletions(-) diff --git a/tests/src/image.rs b/tests/src/image.rs index e50fd43e7f..9fd0f33d73 100644 --- a/tests/src/image.rs +++ b/tests/src/image.rs @@ -417,13 +417,6 @@ fn copy_texture_to_buffer( } TextureFormat::Depth24PlusStencil8 => { copy_via_compute(device, encoder, texture, buffer, TextureAspect::DepthOnly); - // copy_via_compute( - // device, - // encoder, - // texture, - // buffer_stencil.as_ref().unwrap(), - // TextureAspect::StencilOnly, - // ); copy_texture_to_buffer_with_aspect( encoder, texture, diff --git a/tests/tests/zero_init_texture_after_discard.rs b/tests/tests/zero_init_texture_after_discard.rs index e47f1aa0fa..2d08197392 100644 --- a/tests/tests/zero_init_texture_after_discard.rs +++ b/tests/tests/zero_init_texture_after_discard.rs @@ -83,15 +83,14 @@ fn discarding_either_depth_or_stencil_aspect() { .limits(Limits::downlevel_defaults()), |mut ctx| { let mut case = TestCase::new(&mut ctx, TextureFormat::Depth24PlusStencil8); - case.create_command_encoder(); - case.discard_depth(); - case.submit_command_encoder(); case.create_command_encoder(); + case.discard_depth(); case.discard_stencil(); - case.submit_command_encoder(); - - case.create_command_encoder(); + //When splitting it in subsequent submits of different command encoders + //it seems that texture tracker is not able anymore to get that the texture has been left in DEPTH_STENCIL_WRITE + //and it assume that it could find it uninitialized and set it in RESOURCE as not owning it + //When using a unique submit instead the tracker is able to follow all resource barriers and everything is smooth case.copy_texture_to_buffer(); case.submit_command_encoder(); diff --git a/wgpu-core/src/command/mod.rs b/wgpu-core/src/command/mod.rs index 59336a41cc..969cbcedb3 100644 --- a/wgpu-core/src/command/mod.rs +++ b/wgpu-core/src/command/mod.rs @@ -255,7 +255,7 @@ impl CommandBuffer { pub(crate) fn extract_baked_commands(&mut self) -> BakedCommands { log::info!( - "Extracting BackedCommands from CommandBuffer {:?}", + "Extracting BakedCommands from CommandBuffer {:?}", self.info.label() ); let data = self.data.lock().take().unwrap(); From aa1a05f85da689098cef27e3a442676a11e61524 Mon Sep 17 00:00:00 2001 From: gents83 Date: Sun, 5 Nov 2023 13:14:50 +0100 Subject: [PATCH 126/132] Fixing double lock on buffer destroy + test added --- tests/tests/buffer_usages.rs | 103 ++++++++++++++++++++++++++++++++++- wgpu-core/src/resource.rs | 11 +--- 2 files changed, 103 insertions(+), 11 deletions(-) diff --git a/tests/tests/buffer_usages.rs b/tests/tests/buffer_usages.rs index 657b8a41b4..194ed0ec2f 100644 --- a/tests/tests/buffer_usages.rs +++ b/tests/tests/buffer_usages.rs @@ -1,6 +1,6 @@ //! Tests for buffer usages validation. -use wgpu::BufferUsages as Bu; +use wgpu::{BufferUsages as Bu, MapMode as Ma}; use wgpu_test::{fail_if, gpu_test, GpuTestConfiguration, TestParameters}; use wgt::BufferAddress; @@ -67,3 +67,104 @@ static BUFFER_USAGE_MAPPABLE_PRIMARY_BUFFERS: GpuTestConfiguration = GpuTestConf ], ); }); + +async fn map_test( + device: &wgpu::Device, + usage_type: &str, + map_mode_type: Ma, + before_unmap: bool, + before_destroy: bool, + after_unmap: bool, + after_destroy: bool, +) { + log::info!("map_test usage_type:{usage_type} map_mode_type:{:?} before_unmap:{before_unmap} before_destroy:{before_destroy} after_unmap:{after_unmap} after_destroy:{after_destroy}", map_mode_type); + + let size = 8; + let usage = match usage_type { + "read" => Bu::COPY_DST | Bu::MAP_READ, + "write" => Bu::COPY_SRC | Bu::MAP_WRITE, + _ => Bu::from_bits(0).unwrap(), + }; + let buffer_creation_validation_error = usage.is_empty(); + + let mut buffer = None; + + fail_if(device, buffer_creation_validation_error, || { + buffer = Some(device.create_buffer(&wgpu::BufferDescriptor { + label: None, + size, + usage, + mapped_at_creation: false, + })); + }); + if buffer_creation_validation_error { + return; + } + + let buffer = buffer.unwrap(); + + let map_async_validation_error = buffer_creation_validation_error + || (map_mode_type == Ma::Read && !usage.contains(Bu::MAP_READ)) + || (map_mode_type == Ma::Write && !usage.contains(Bu::MAP_WRITE)); + + fail_if(device, map_async_validation_error, || { + buffer.slice(0..size).map_async(map_mode_type, |_| {}); + }); + + if map_async_validation_error { + return; + } + + if before_unmap { + buffer.unmap(); + } + + if before_destroy { + buffer.destroy(); + } + + device.poll(wgpu::MaintainBase::Wait); + + if !before_unmap && !before_destroy { + { + let view = buffer.slice(0..size).get_mapped_range(); + assert!(!view.is_empty()); + } + + if after_unmap { + buffer.unmap(); + } + + if after_destroy { + buffer.destroy(); + } + } +} + +#[gpu_test] +static BUFFER_MAP_ASYNC_MAP_STATE: GpuTestConfiguration = GpuTestConfiguration::new() + .parameters(TestParameters::default().features(wgpu::Features::MAPPABLE_PRIMARY_BUFFERS)) + .run_async(move |ctx| async move { + for usage_type in ["invalid", "read", "write"] { + for map_mode_type in [Ma::Read, Ma::Write] { + for before_unmap in [false, true] { + for before_destroy in [false, true] { + for after_unmap in [false, true] { + for after_destroy in [false, true] { + map_test( + &ctx.device, + usage_type, + map_mode_type, + before_unmap, + before_destroy, + after_unmap, + after_destroy, + ) + .await + } + } + } + } + } + } + }); diff --git a/wgpu-core/src/resource.rs b/wgpu-core/src/resource.rs index 19319b729a..0137b61147 100644 --- a/wgpu-core/src/resource.rs +++ b/wgpu-core/src/resource.rs @@ -544,16 +544,7 @@ impl Buffer { let device = &self.device; let buffer_id = self.info.id(); - map_closure = match &*self.map_state.lock() { - &BufferMapState::Waiting(..) // To get the proper callback behavior. - | &BufferMapState::Init { .. } - | &BufferMapState::Active { .. } - => { - self.buffer_unmap_inner() - .unwrap_or(None) - } - _ => None, - }; + map_closure = self.buffer_unmap_inner().unwrap_or(None); #[cfg(feature = "trace")] if let Some(ref mut trace) = *device.trace.lock() { From 2cfa76648ad5992703edbaf98272a6cf21bcfc0b Mon Sep 17 00:00:00 2001 From: gents83 Date: Fri, 10 Nov 2023 20:16:06 +0100 Subject: [PATCH 127/132] Fixing wrong merge --- wgpu-core/src/device/global.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/wgpu-core/src/device/global.rs b/wgpu-core/src/device/global.rs index 2dd292e201..2c01547bb0 100644 --- a/wgpu-core/src/device/global.rs +++ b/wgpu-core/src/device/global.rs @@ -485,9 +485,9 @@ impl Global { let hub = A::hub(self); log::debug!("Buffer {:?} is asked to be dropped", buffer_id); - let buffer = hub - .buffers - .get(buffer_id) + let mut buffer_guard = hub.buffers.write(); + let buffer = buffer_guard + .take_and_mark_destroyed(buffer_id) .map_err(|_| resource::DestroyError::Invalid)?; buffer.destroy() } From 715bd000e50c95cbda7b58b63d7d258ef3f4a76f Mon Sep 17 00:00:00 2001 From: gents83 Date: Mon, 13 Nov 2023 17:53:13 +0100 Subject: [PATCH 128/132] Fixing changelog integration --- CHANGELOG.md | 16 +++++----------- 1 file changed, 5 insertions(+), 11 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index d9adda1d0f..a7a4020bcb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -42,12 +42,11 @@ Bottom level categories: For naga changelogs at or before v0.14.0. See [naga's changelog](naga/CHANGELOG.md). -### Major changes +### Changes + - Arcanization of wgpu core resources: Removed 'Token' and 'LifeTime' related management, removed 'RefCount' and 'MultiRefCount' in favour of using only 'Arc' internal reference count, removing mut from resources and added instead internal members locks on demand or atomics operations, resources now implement Drop and destroy stuff when last 'Arc' resources is released, resources hold an 'Arc' in order to be able to implement Drop, resources have an utility to retrieve the id of the resource itself, removed all guards and just retrive the 'Arc' needed on-demand to unlock registry of resources asap removing locking from hot paths. By @gents83 in [#3626](https://github.com/gfx-rs/wgpu/pull/3626) and tnx also to @jimblandy, @nical, @Wumpf, @Elabajaba & @cwfitzgerald -### Changes - #### General - Log vulkan validation layer messages during instance creation and destruction: By @exrook in [#4586](https://github.com/gfx-rs/wgpu/pull/4586) @@ -377,17 +376,12 @@ By @fornwall in [#3904](https://github.com/gfx-rs/wgpu/pull/3904) and [#3905](ht #### Misc Breaking Changes - Change `AdapterInfo::{device,vendor}` to be `u32` instead of `usize`. By @ameknite in [#3760](https://github.com/gfx-rs/wgpu/pull/3760) -- Remove the `backend_bits` parameter in `initialize_adapter_from_env` and `initialize_adapter_from_env_or_default` - use [InstanceDescriptor::backends](https://docs.rs/wgpu/latest/wgpu/struct.InstanceDescriptor.html#structfield.backends) instead. By @fornwall in [#3904](https://github.com/gfx-rs/wgpu/pull/3904) - -#### DX12 - -- Increase the `max_storage_buffers_per_shader_stage` and `max_storage_textures_per_shader_stage` limits based on what the hardware supports. by @Elabajaba in [#3798]https://github.com/gfx-rs/wgpu/pull/3798 -- Add a `compatible_surface` parameter to `initialize_adapter_from_env` and use that to make `initialize_adapter_from_env_or_default` always respect its `compatible_surface` parameter. By @fornwall in [#3905](https://github.com/gfx-rs/wgpu/pull/3905) ### Changes - Added support for importing external buffers using `buffer_from_raw` (Dx12, Metal, Vulkan) and `create_buffer_from_hal`. By @AdrianEddy in [#3355](https://github.com/gfx-rs/wgpu/pull/3355) + #### Vulkan - Work around [Vulkan-ValidationLayers#5671](https://github.com/KhronosGroup/Vulkan-ValidationLayers/issues/5671) by ignoring reports of violations of [VUID-vkCmdEndDebugUtilsLabelEXT-commandBuffer-01912](https://registry.khronos.org/vulkan/specs/1.3-extensions/html/vkspec.html#VUID-vkCmdEndDebugUtilsLabelEXT-commandBuffer-01912). By @jimblandy in [#3809](https://github.com/gfx-rs/wgpu/pull/3809). @@ -1088,7 +1082,7 @@ both `raw_window_handle::HasRawWindowHandle` and `raw_window_handle::HasRawDispl #### Vulkan -- Fix `astc_hdr` formats support by @jinleili in [#2971]](https://github.com/gfx-rs/wgpu/pull/2971) +- Fix `astc_hdr` formats support by @jinleili in [#2971]](https://github.com/gfx-rs/wgpu/pull/2971) - Update to Naga b209d911 (2022-9-1) to avoid generating SPIR-V that violates Vulkan valid usage rules `VUID-StandaloneSpirv-Flat-06202` and `VUID-StandaloneSpirv-Flat-04744`. By @jimblandy in @@ -2297,4 +2291,4 @@ DeviceDescriptor { - concept of the storage hub - basic recording of passes and command buffers - submission-based lifetime tracking and command buffer recycling -- automatic resource transitions +- automatic resource transitions \ No newline at end of file From b6d61da6ce098741ff1991f0c42a08c29d87eac3 Mon Sep 17 00:00:00 2001 From: gents83 Date: Mon, 13 Nov 2023 19:32:34 +0100 Subject: [PATCH 129/132] Fix insert_impl and take_and_mark_destroyed --- tests/tests/zero_init_texture_after_discard.rs | 16 ++++++---------- wgpu-core/src/identity.rs | 4 ++-- wgpu-core/src/registry.rs | 10 ++++++---- wgpu-core/src/storage.rs | 13 +++++++++---- wgpu/src/backend/direct.rs | 4 ---- 5 files changed, 23 insertions(+), 24 deletions(-) diff --git a/tests/tests/zero_init_texture_after_discard.rs b/tests/tests/zero_init_texture_after_discard.rs index b041d470c0..3c0561343c 100644 --- a/tests/tests/zero_init_texture_after_discard.rs +++ b/tests/tests/zero_init_texture_after_discard.rs @@ -13,11 +13,8 @@ static DISCARDING_COLOR_TARGET_RESETS_TEXTURE_INIT_STATE_CHECK_VISIBLE_ON_COPY_A let mut case = TestCase::new(&mut ctx, TextureFormat::Rgba8UnormSrgb); case.create_command_encoder(); case.discard(); - case.submit_command_encoder(); - - case.create_command_encoder(); case.copy_texture_to_buffer(); - case.submit_command_encoder(); + case.submit_command_encoder_and_wait(); case.assert_buffers_are_zero(); }); @@ -31,7 +28,7 @@ static DISCARDING_COLOR_TARGET_RESETS_TEXTURE_INIT_STATE_CHECK_VISIBLE_ON_COPY_I case.create_command_encoder(); case.discard(); case.copy_texture_to_buffer(); - case.submit_command_encoder(); + case.submit_command_encoder_and_wait(); case.assert_buffers_are_zero(); }); @@ -58,7 +55,7 @@ static DISCARDING_DEPTH_TARGET_RESETS_TEXTURE_INIT_STATE_CHECK_VISIBLE_ON_COPY_I case.create_command_encoder(); case.discard(); case.copy_texture_to_buffer(); - case.submit_command_encoder(); + case.submit_command_encoder_and_wait(); case.assert_buffers_are_zero(); } @@ -85,12 +82,10 @@ static DISCARDING_EITHER_DEPTH_OR_STENCIL_ASPECT_TEST: GpuTestConfiguration = ] { let mut case = TestCase::new(&mut ctx, format); case.create_command_encoder(); - case.discard_depth(); case.discard_stencil(); case.copy_texture_to_buffer(); - - case.submit_command_encoder(); + case.submit_command_encoder_and_wait(); case.assert_buffers_are_zero(); } @@ -208,10 +203,11 @@ impl<'ctx> TestCase<'ctx> { ) } - pub fn submit_command_encoder(&mut self) { + pub fn submit_command_encoder_and_wait(&mut self) { self.ctx .queue .submit([self.encoder.take().unwrap().finish()]); + self.ctx.device.poll(MaintainBase::Wait); } pub fn discard(&mut self) { diff --git a/wgpu-core/src/identity.rs b/wgpu-core/src/identity.rs index ae83a905c3..3f421dd697 100644 --- a/wgpu-core/src/identity.rs +++ b/wgpu-core/src/identity.rs @@ -53,7 +53,7 @@ impl IdentityValues { Some((index, epoch)) => I::zip(index, epoch + 1, backend), None => { let epoch = 1; - let used = self.used.entry(epoch).or_default(); + let used = self.used.entry(epoch).or_insert_with(Default::default); let index = if let Some(i) = used.iter().max_by_key(|v| *v) { i + 1 } else { @@ -68,7 +68,7 @@ impl IdentityValues { pub fn mark_as_used(&mut self, id: I) -> I { self.count += 1; let (index, epoch, _backend) = id.unzip(); - let used = self.used.entry(epoch).or_default(); + let used = self.used.entry(epoch).or_insert_with(Default::default); used.push(index); id } diff --git a/wgpu-core/src/registry.rs b/wgpu-core/src/registry.rs index 17d538c494..6c86b275b0 100644 --- a/wgpu-core/src/registry.rs +++ b/wgpu-core/src/registry.rs @@ -81,14 +81,16 @@ impl> FutureId<'_, I, T> { } pub fn assign(self, value: T) -> (I, Arc) { - self.data.write().insert(self.id, self.init(value)); - (self.id, self.data.read().get(self.id).unwrap().clone()) + let mut data = self.data.write(); + data.insert(self.id, self.init(value)); + (self.id, data.get(self.id).unwrap().clone()) } pub fn assign_existing(self, value: &Arc) -> I { + let mut data = self.data.write(); #[cfg(debug_assertions)] - debug_assert!(!self.data.read().contains(self.id)); - self.data.write().insert(self.id, value.clone()); + debug_assert!(!data.contains(self.id)); + data.insert(self.id, value.clone()); self.id } diff --git a/wgpu-core/src/storage.rs b/wgpu-core/src/storage.rs index a292403dcb..c3fc899113 100644 --- a/wgpu-core/src/storage.rs +++ b/wgpu-core/src/storage.rs @@ -130,26 +130,31 @@ where } } - fn insert_impl(&mut self, index: usize, element: Element) { + fn insert_impl(&mut self, index: usize, epoch: Epoch, element: Element) { if index >= self.map.len() { self.map.resize_with(index + 1, || Element::Vacant); } match std::mem::replace(&mut self.map[index], element) { Element::Vacant => {} - _ => panic!("Index {index:?} is already occupied"), + Element::Occupied(_, storage_epoch) => { + assert_ne!(epoch, storage_epoch, "Index {index:?} of {} is already occupied", T::TYPE); + } + Element::Error(storage_epoch, _) => { + assert_ne!(epoch, storage_epoch, "Index {index:?} of {} is already occupied with Error", T::TYPE); + } } } pub(crate) fn insert(&mut self, id: I, value: Arc) { log::info!("User is inserting {}{:?}", T::TYPE, id); let (index, epoch, _backend) = id.unzip(); - self.insert_impl(index as usize, Element::Occupied(value, epoch)) + self.insert_impl(index as usize, epoch, Element::Occupied(value, epoch)) } pub(crate) fn insert_error(&mut self, id: I, label: &str) { log::info!("User is insering as error {}{:?}", T::TYPE, id); let (index, epoch, _) = id.unzip(); - self.insert_impl(index as usize, Element::Error(epoch, label.to_string())) + self.insert_impl(index as usize, epoch, Element::Error(epoch, label.to_string())) } pub(crate) fn take_and_mark_destroyed(&mut self, id: I) -> Result, InvalidId> { diff --git a/wgpu/src/backend/direct.rs b/wgpu/src/backend/direct.rs index f0c550fea3..147322b66b 100644 --- a/wgpu/src/backend/direct.rs +++ b/wgpu/src/backend/direct.rs @@ -2352,10 +2352,6 @@ impl crate::Context for Context { Err(err) => self.handle_error_fatal(err, "Queue::submit"), }; - for cmdbuf in &temp_command_buffers { - wgc::gfx_select!(*queue => global.command_buffer_drop(*cmdbuf)); - } - (Unused, index) } From c8a60e21ef110c5799681d1169c6e6cc0801a799 Mon Sep 17 00:00:00 2001 From: gents83 Date: Mon, 13 Nov 2023 19:34:16 +0100 Subject: [PATCH 130/132] Fix fmt --- wgpu-core/src/storage.rs | 20 +++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) diff --git a/wgpu-core/src/storage.rs b/wgpu-core/src/storage.rs index c3fc899113..2abd36e8c2 100644 --- a/wgpu-core/src/storage.rs +++ b/wgpu-core/src/storage.rs @@ -137,10 +137,20 @@ where match std::mem::replace(&mut self.map[index], element) { Element::Vacant => {} Element::Occupied(_, storage_epoch) => { - assert_ne!(epoch, storage_epoch, "Index {index:?} of {} is already occupied", T::TYPE); + assert_ne!( + epoch, + storage_epoch, + "Index {index:?} of {} is already occupied", + T::TYPE + ); } Element::Error(storage_epoch, _) => { - assert_ne!(epoch, storage_epoch, "Index {index:?} of {} is already occupied with Error", T::TYPE); + assert_ne!( + epoch, + storage_epoch, + "Index {index:?} of {} is already occupied with Error", + T::TYPE + ); } } } @@ -154,7 +164,11 @@ where pub(crate) fn insert_error(&mut self, id: I, label: &str) { log::info!("User is insering as error {}{:?}", T::TYPE, id); let (index, epoch, _) = id.unzip(); - self.insert_impl(index as usize, epoch, Element::Error(epoch, label.to_string())) + self.insert_impl( + index as usize, + epoch, + Element::Error(epoch, label.to_string()), + ) } pub(crate) fn take_and_mark_destroyed(&mut self, id: I) -> Result, InvalidId> { From 0e437be621732c90813fd90aff2caba5ba36e667 Mon Sep 17 00:00:00 2001 From: gents83 Date: Sat, 18 Nov 2023 10:18:59 +0100 Subject: [PATCH 131/132] Updated changelog --- CHANGELOG.md | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index eb0dfebe89..d2faf1cef6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -42,8 +42,24 @@ Bottom level categories: ### Changes -- Arcanization of wgpu core resources: Removed 'Token' and 'LifeTime' related management, -removed 'RefCount' and 'MultiRefCount' in favour of using only 'Arc' internal reference count, removing mut from resources and added instead internal members locks on demand or atomics operations, resources now implement Drop and destroy stuff when last 'Arc' resources is released, resources hold an 'Arc' in order to be able to implement Drop, resources have an utility to retrieve the id of the resource itself, removed all guards and just retrive the 'Arc' needed on-demand to unlock registry of resources asap removing locking from hot paths. By @gents83 in [#3626](https://github.com/gfx-rs/wgpu/pull/3626) and tnx also to @jimblandy, @nical, @Wumpf, @Elabajaba & @cwfitzgerald +- Arcanization of wgpu core resources: +Removed Token and LifeTime related management +Removed RefCount and MultiRefCount in favour of using only Arc internal reference count +Removing mut from resources and added instead internal members locks on demand or atomics operations +Resources now implement Drop and destroy stuff when last Arc resources is released +Resources hold an Arc in order to be able to implement Drop +Resources have an utility to retrieve the id of the resource itself +Remove all guards and just retrive the Arc needed on-demand to unlock registry of resources asap +Verify correct resources release when unused or not needed +Check Web and Metal compliation (thanks to @niklaskorz) +Fix tests on all platforms +Test a multithreaded scenario +Storage is now holding only user-land resources, but Arc is keeping refcount for resources +When user unregister a resource, it's not dropped if still in use due to refcount inside wgpu +IdentityManager is now unique and free is called on resource drop instead of storage unregister +Identity changes due to Arcanization and Registry being just the user reference +Added MemLeaks test and fixing mem leaks +By @gents83 in [#3626](https://github.com/gfx-rs/wgpu/pull/3626) and tnx also to @jimblandy, @nical, @Wumpf, @Elabajaba & @cwfitzgerald #### General From 724fc1a0531a5d3f3d20a6af1095ce396a8627b6 Mon Sep 17 00:00:00 2001 From: Sludge <96552222+SludgePhD@users.noreply.github.com> Date: Sat, 18 Nov 2023 18:34:28 +0100 Subject: [PATCH 132/132] Shorten lock durations to avoid deadlocks --- wgpu-core/src/device/global.rs | 14 ++++++++----- wgpu-core/src/device/queue.rs | 4 ++-- wgpu-core/src/device/resource.rs | 34 ++++++++++++++++++-------------- 3 files changed, 30 insertions(+), 22 deletions(-) diff --git a/wgpu-core/src/device/global.rs b/wgpu-core/src/device/global.rs index a711c20fe9..d7cf7fbcea 100644 --- a/wgpu-core/src/device/global.rs +++ b/wgpu-core/src/device/global.rs @@ -734,11 +734,12 @@ impl Global { if let resource::TextureInner::Native { ref raw } = *texture.inner().as_ref().unwrap() { if !raw.is_none() { let temp = queue::TempResource::Texture(texture.clone()); - let mut pending_writes = device.pending_writes.lock(); - let pending_writes = pending_writes.as_mut().unwrap(); + let mut guard = device.pending_writes.lock(); + let pending_writes = guard.as_mut().unwrap(); if pending_writes.dst_textures.contains_key(&texture_id) { pending_writes.temp_resources.push(temp); } else { + drop(guard); device .lock_life() .schedule_resource_destruction(temp, last_submit_index); @@ -763,7 +764,6 @@ impl Global { let device = &texture.device; { - let mut life_lock = device.lock_life(); if device .pending_writes .lock() @@ -772,9 +772,13 @@ impl Global { .dst_textures .contains_key(&texture_id) { - life_lock.future_suspected_textures.push(texture.clone()); + device + .lock_life() + .future_suspected_textures + .push(texture.clone()); } else { - life_lock + device + .lock_life() .suspected_resources .insert(texture_id, texture.clone()); } diff --git a/wgpu-core/src/device/queue.rs b/wgpu-core/src/device/queue.rs index 9dfa412e1a..3902d6190c 100644 --- a/wgpu-core/src/device/queue.rs +++ b/wgpu-core/src/device/queue.rs @@ -1147,9 +1147,9 @@ impl Global { // a temporary one, since the chains are not finished. let mut temp_suspected = device.temp_suspected.lock(); { - let mut suspected = temp_suspected.take().unwrap(); + let mut suspected = + temp_suspected.replace(ResourceMaps::new::()).unwrap(); suspected.clear(); - temp_suspected.replace(ResourceMaps::new::()); } // finish all the command buffers first diff --git a/wgpu-core/src/device/resource.rs b/wgpu-core/src/device/resource.rs index 09e9038114..035b2ea554 100644 --- a/wgpu-core/src/device/resource.rs +++ b/wgpu-core/src/device/resource.rs @@ -312,18 +312,19 @@ impl Device { ) -> Result<(UserClosures, bool), WaitIdleError> { profiling::scope!("Device::maintain"); { - let mut life_tracker = self.lock_life(); - // Normally, `temp_suspected` exists only to save heap // allocations: it's cleared at the start of the function // call, and cleared by the end. But `Global::queue_submit` is // fallible; if it exits early, it may leave some resources in // `temp_suspected`. - { - let temp_suspected = self.temp_suspected.lock().take().unwrap(); - life_tracker.suspected_resources.extend(temp_suspected); - } - self.temp_suspected.lock().replace(ResourceMaps::new::()); + let temp_suspected = self + .temp_suspected + .lock() + .replace(ResourceMaps::new::()) + .unwrap(); + + let mut life_tracker = self.lock_life(); + life_tracker.suspected_resources.extend(temp_suspected); life_tracker.triage_suspected( hub, @@ -397,7 +398,11 @@ impl Device { } pub(crate) fn untrack(&self, trackers: &Tracker) { - let mut temp_suspected = self.temp_suspected.lock().take().unwrap(); + let mut temp_suspected = self + .temp_suspected + .lock() + .replace(ResourceMaps::new::()) + .unwrap(); temp_suspected.clear(); // As the tracker is cleared/dropped, we need to consider all the resources // that it references for destruction in the next GC pass. @@ -444,7 +449,6 @@ impl Device { } } self.lock_life().suspected_resources.extend(temp_suspected); - self.temp_suspected.lock().replace(ResourceMaps::new::()); } pub(crate) fn create_buffer( @@ -3203,8 +3207,8 @@ impl Device { &self, submission_index: SubmissionIndex, ) -> Result<(), WaitIdleError> { - let fence = self.fence.read(); - let fence = fence.as_ref().unwrap(); + let guard = self.fence.read(); + let fence = guard.as_ref().unwrap(); let last_done_index = unsafe { self.raw .as_ref() @@ -3221,6 +3225,7 @@ impl Device { .wait(fence, submission_index, !0) .map_err(DeviceError::from)? }; + drop(guard); let closures = self.lock_life().triage_submissions( submission_index, self.command_allocator.lock().as_mut().unwrap(), @@ -3277,9 +3282,8 @@ impl Device { self.valid.store(false, Ordering::Release); // 1) Resolve the GPUDevice device.lost promise. - let mut life_tracker = self.lock_life(); - if life_tracker.device_lost_closure.is_some() { - let device_lost_closure = life_tracker.device_lost_closure.take().unwrap(); + let closure = self.lock_life().device_lost_closure.take(); + if let Some(device_lost_closure) = closure { device_lost_closure.call(DeviceLostReason::Unknown, message.to_string()); } @@ -3310,7 +3314,6 @@ impl Device { /// Wait for idle and remove resources that we can, before we die. pub(crate) fn prepare_to_die(&self) { self.pending_writes.lock().as_mut().unwrap().deactivate(); - let mut life_tracker = self.lock_life(); let current_index = self.active_submission_index.load(Ordering::Relaxed); if let Err(error) = unsafe { let fence = self.fence.read(); @@ -3322,6 +3325,7 @@ impl Device { } { log::error!("failed to wait for the device: {:?}", error); } + let mut life_tracker = self.lock_life(); let _ = life_tracker.triage_submissions( current_index, self.command_allocator.lock().as_mut().unwrap(),