From 250afdc9ab1c3a0262e748438706a908f61130d1 Mon Sep 17 00:00:00 2001 From: Nicolas Silva Date: Mon, 20 Nov 2023 19:59:30 +0000 Subject: [PATCH] Bug 1865585 - Update `wgpu` to revision 6e21f7a9291db4395192d6b510d906978ae2d251. r=webgpu-reviewers,supply-chain-reviewers,ErichDonGubler,teoxoy Note: This revision contains the arcanization work # Changelog * #4702 Add `WasmNotSendSync` By daxpedda in https://github.com/gfx-rs/wgpu/pull/4702 * #4707 Add more metal keywords By fornwall in https://github.com/gfx-rs/wgpu/pull/4707 * #4706 [naga] remove `span` and `validate` features By teoxoy in https://github.com/gfx-rs/wgpu/pull/4706 * #4709 [dx12] filter out haswell iGPUs By teoxoy in https://github.com/gfx-rs/wgpu/pull/4709 * #4712 Fix typo in pull request template. By jimblandy in https://github.com/gfx-rs/wgpu/pull/4712 * #4598 Add more lints By daxpedda in https://github.com/gfx-rs/wgpu/pull/4598 * #4713 [naga wgsl-in] Include base when printing pointer and array types. By jimblandy in https://github.com/gfx-rs/wgpu/pull/4713 * #4718 [vk] check that adapters are Vulkan compliant By teoxoy in https://github.com/gfx-rs/wgpu/pull/4718 * #4719 [naga] Let constant evaluation of `As` preserve `Splat` expressions. By jimblandy in https://github.com/gfx-rs/wgpu/pull/4719 * #4725 Corrects typo in examples FrameCounter By cantudo in https://github.com/gfx-rs/wgpu/pull/4725 * #3626 Arcanization of wgpu core resources By gents83 in https://github.com/gfx-rs/wgpu/pull/3626 Differential Revision: https://phabricator.services.mozilla.com/D194048 --- .cargo/config.in | 4 +- Cargo.lock | 10 +- gfx/wgpu_bindings/Cargo.toml | 14 +- gfx/wgpu_bindings/moz.yaml | 4 +- supply-chain/audits.toml | 25 + third_party/rust/naga/.cargo-checksum.json | 2 +- third_party/rust/naga/Cargo.toml | 9 - third_party/rust/naga/benches/criterion.rs | 5 - third_party/rust/naga/src/arena.rs | 86 +- .../rust/naga/src/back/msl/keywords.rs | 295 ++- third_party/rust/naga/src/back/msl/writer.rs | 2 +- third_party/rust/naga/src/block.rs | 23 +- .../rust/naga/src/front/wgsl/to_wgsl.rs | 6 +- .../rust/naga/src/proc/constant_evaluator.rs | 26 +- third_party/rust/naga/src/proc/emitter.rs | 1 - third_party/rust/naga/src/span.rs | 28 +- third_party/rust/naga/src/valid/analyzer.rs | 4 +- third_party/rust/naga/src/valid/compose.rs | 2 - third_party/rust/naga/src/valid/expression.rs | 5 - third_party/rust/naga/src/valid/function.rs | 22 +- third_party/rust/naga/src/valid/handles.rs | 10 - third_party/rust/naga/src/valid/interface.rs | 26 +- third_party/rust/naga/src/valid/mod.rs | 18 +- .../rust/wgpu-core/.cargo-checksum.json | 2 +- third_party/rust/wgpu-core/Cargo.toml | 6 +- third_party/rust/wgpu-core/src/any_surface.rs | 112 + .../rust/wgpu-core/src/binding_model.rs | 195 +- .../rust/wgpu-core/src/command/bind.rs | 201 +- .../rust/wgpu-core/src/command/bundle.rs | 290 ++- .../rust/wgpu-core/src/command/clear.rs | 132 +- .../rust/wgpu-core/src/command/compute.rs | 263 +-- .../rust/wgpu-core/src/command/memory_init.rs | 131 +- third_party/rust/wgpu-core/src/command/mod.rs | 289 +-- .../rust/wgpu-core/src/command/query.rs | 95 +- .../rust/wgpu-core/src/command/render.rs | 528 ++--- .../rust/wgpu-core/src/command/transfer.rs | 372 ++-- third_party/rust/wgpu-core/src/conv.rs | 30 +- .../rust/wgpu-core/src/device/any_device.rs | 88 + .../rust/wgpu-core/src/device/global.rs | 1935 ++++++----------- third_party/rust/wgpu-core/src/device/life.rs | 1228 ++++++----- third_party/rust/wgpu-core/src/device/mod.rs | 48 +- .../rust/wgpu-core/src/device/queue.rs | 946 ++++---- .../rust/wgpu-core/src/device/resource.rs | 1183 +++++----- third_party/rust/wgpu-core/src/error.rs | 2 +- third_party/rust/wgpu-core/src/global.rs | 79 +- third_party/rust/wgpu-core/src/hal_api.rs | 47 +- third_party/rust/wgpu-core/src/hub.rs | 621 +----- third_party/rust/wgpu-core/src/id.rs | 85 +- third_party/rust/wgpu-core/src/identity.rs | 172 +- .../rust/wgpu-core/src/init_tracker/buffer.rs | 28 +- .../wgpu-core/src/init_tracker/texture.rs | 16 +- third_party/rust/wgpu-core/src/instance.rs | 636 +++--- third_party/rust/wgpu-core/src/lib.rs | 158 +- third_party/rust/wgpu-core/src/pipeline.rs | 144 +- third_party/rust/wgpu-core/src/present.rs | 264 +-- third_party/rust/wgpu-core/src/registry.rs | 263 ++- third_party/rust/wgpu-core/src/resource.rs | 717 ++++-- third_party/rust/wgpu-core/src/storage.rs | 236 +- .../rust/wgpu-core/src/track/buffer.rs | 293 +-- .../rust/wgpu-core/src/track/metadata.rs | 135 +- third_party/rust/wgpu-core/src/track/mod.rs | 147 +- .../rust/wgpu-core/src/track/stateless.rs | 179 +- .../rust/wgpu-core/src/track/texture.rs | 503 ++--- .../rust/wgpu-hal/.cargo-checksum.json | 2 +- .../rust/wgpu-hal/examples/halmark/main.rs | 9 +- .../rust/wgpu-hal/examples/raw-gles.rs | 2 +- .../rust/wgpu-hal/src/auxil/dxgi/factory.rs | 36 +- third_party/rust/wgpu-hal/src/dx11/device.rs | 6 +- third_party/rust/wgpu-hal/src/dx11/library.rs | 4 +- third_party/rust/wgpu-hal/src/dx11/mod.rs | 10 +- third_party/rust/wgpu-hal/src/dx12/adapter.rs | 3 +- third_party/rust/wgpu-hal/src/dx12/device.rs | 2 +- .../rust/wgpu-hal/src/dx12/instance.rs | 5 +- third_party/rust/wgpu-hal/src/dx12/mod.rs | 59 +- third_party/rust/wgpu-hal/src/empty.rs | 14 +- third_party/rust/wgpu-hal/src/gles/adapter.rs | 19 +- third_party/rust/wgpu-hal/src/gles/device.rs | 36 +- third_party/rust/wgpu-hal/src/gles/egl.rs | 63 +- third_party/rust/wgpu-hal/src/gles/mod.rs | 15 +- third_party/rust/wgpu-hal/src/gles/queue.rs | 42 +- third_party/rust/wgpu-hal/src/gles/web.rs | 191 +- third_party/rust/wgpu-hal/src/gles/wgl.rs | 26 +- third_party/rust/wgpu-hal/src/lib.rs | 56 +- third_party/rust/wgpu-hal/src/metal/mod.rs | 14 +- .../rust/wgpu-hal/src/metal/surface.rs | 30 +- .../rust/wgpu-hal/src/vulkan/adapter.rs | 17 +- .../rust/wgpu-hal/src/vulkan/device.rs | 2 +- .../rust/wgpu-hal/src/vulkan/instance.rs | 30 +- third_party/rust/wgpu-hal/src/vulkan/mod.rs | 52 +- .../rust/wgpu-types/.cargo-checksum.json | 2 +- third_party/rust/wgpu-types/src/lib.rs | 2 + 91 files changed, 7024 insertions(+), 7151 deletions(-) create mode 100644 third_party/rust/wgpu-core/src/any_surface.rs create mode 100644 third_party/rust/wgpu-core/src/device/any_device.rs diff --git a/.cargo/config.in b/.cargo/config.in index 5fb4a3c9c2ac4..715c7802acf61 100644 --- a/.cargo/config.in +++ b/.cargo/config.in @@ -25,9 +25,9 @@ git = "https://github.com/franziskuskiefer/cose-rust" rev = "43c22248d136c8b38fe42ea709d08da6355cf04b" replace-with = "vendored-sources" -[source."git+https://github.com/gfx-rs/wgpu?rev=3ec547cdcaaa14488327d8f1b5f7736278c4178d"] +[source."git+https://github.com/gfx-rs/wgpu?rev=6e21f7a9291db4395192d6b510d906978ae2d251"] git = "https://github.com/gfx-rs/wgpu" -rev = "3ec547cdcaaa14488327d8f1b5f7736278c4178d" +rev = "6e21f7a9291db4395192d6b510d906978ae2d251" replace-with = "vendored-sources" [source."git+https://github.com/glandium/warp?rev=4af45fae95bc98b0eba1ef0db17e1dac471bb23d"] diff --git a/Cargo.lock b/Cargo.lock index 678c5d84673fd..f53420c126e23 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1146,7 +1146,7 @@ dependencies = [ [[package]] name = "d3d12" version = "0.7.0" -source = "git+https://github.com/gfx-rs/wgpu?rev=3ec547cdcaaa14488327d8f1b5f7736278c4178d#3ec547cdcaaa14488327d8f1b5f7736278c4178d" +source = "git+https://github.com/gfx-rs/wgpu?rev=6e21f7a9291db4395192d6b510d906978ae2d251#6e21f7a9291db4395192d6b510d906978ae2d251" dependencies = [ "bitflags 2.4.0", "libloading", @@ -3761,7 +3761,7 @@ checksum = "a2983372caf4480544083767bf2d27defafe32af49ab4df3a0b7fc90793a3664" [[package]] name = "naga" version = "0.14.0" -source = "git+https://github.com/gfx-rs/wgpu?rev=3ec547cdcaaa14488327d8f1b5f7736278c4178d#3ec547cdcaaa14488327d8f1b5f7736278c4178d" +source = "git+https://github.com/gfx-rs/wgpu?rev=6e21f7a9291db4395192d6b510d906978ae2d251#6e21f7a9291db4395192d6b510d906978ae2d251" dependencies = [ "bit-set", "bitflags 2.4.0", @@ -6362,7 +6362,7 @@ dependencies = [ [[package]] name = "wgpu-core" version = "0.18.0" -source = "git+https://github.com/gfx-rs/wgpu?rev=3ec547cdcaaa14488327d8f1b5f7736278c4178d#3ec547cdcaaa14488327d8f1b5f7736278c4178d" +source = "git+https://github.com/gfx-rs/wgpu?rev=6e21f7a9291db4395192d6b510d906978ae2d251#6e21f7a9291db4395192d6b510d906978ae2d251" dependencies = [ "arrayvec", "bit-vec", @@ -6385,7 +6385,7 @@ dependencies = [ [[package]] name = "wgpu-hal" version = "0.18.0" -source = "git+https://github.com/gfx-rs/wgpu?rev=3ec547cdcaaa14488327d8f1b5f7736278c4178d#3ec547cdcaaa14488327d8f1b5f7736278c4178d" +source = "git+https://github.com/gfx-rs/wgpu?rev=6e21f7a9291db4395192d6b510d906978ae2d251#6e21f7a9291db4395192d6b510d906978ae2d251" dependencies = [ "android_system_properties", "arrayvec", @@ -6422,7 +6422,7 @@ dependencies = [ [[package]] name = "wgpu-types" version = "0.18.0" -source = "git+https://github.com/gfx-rs/wgpu?rev=3ec547cdcaaa14488327d8f1b5f7736278c4178d#3ec547cdcaaa14488327d8f1b5f7736278c4178d" +source = "git+https://github.com/gfx-rs/wgpu?rev=6e21f7a9291db4395192d6b510d906978ae2d251#6e21f7a9291db4395192d6b510d906978ae2d251" dependencies = [ "bitflags 2.4.0", "js-sys", diff --git a/gfx/wgpu_bindings/Cargo.toml b/gfx/wgpu_bindings/Cargo.toml index 94075dbf122cf..8b4f64b9428b6 100644 --- a/gfx/wgpu_bindings/Cargo.toml +++ b/gfx/wgpu_bindings/Cargo.toml @@ -17,7 +17,7 @@ default = [] [dependencies.wgc] package = "wgpu-core" git = "https://github.com/gfx-rs/wgpu" -rev = "3ec547cdcaaa14488327d8f1b5f7736278c4178d" +rev = "6e21f7a9291db4395192d6b510d906978ae2d251" #Note: "replay" shouldn't ideally be needed, # but it allows us to serialize everything across IPC. features = ["replay", "trace", "serial-pass", "strict_asserts", "wgsl"] @@ -27,36 +27,36 @@ features = ["replay", "trace", "serial-pass", "strict_asserts", "wgsl"] [target.'cfg(any(target_os = "macos", target_os = "ios"))'.dependencies.wgc] package = "wgpu-core" git = "https://github.com/gfx-rs/wgpu" -rev = "3ec547cdcaaa14488327d8f1b5f7736278c4178d" +rev = "6e21f7a9291db4395192d6b510d906978ae2d251" features = ["metal"] # We want the wgpu-core Direct3D backends on Windows. [target.'cfg(windows)'.dependencies.wgc] package = "wgpu-core" git = "https://github.com/gfx-rs/wgpu" -rev = "3ec547cdcaaa14488327d8f1b5f7736278c4178d" +rev = "6e21f7a9291db4395192d6b510d906978ae2d251" features = ["dx11", "dx12"] # We want the wgpu-core Vulkan backend on Linux and Windows. [target.'cfg(any(windows, all(unix, not(any(target_os = "macos", target_os = "ios")))))'.dependencies.wgc] package = "wgpu-core" git = "https://github.com/gfx-rs/wgpu" -rev = "3ec547cdcaaa14488327d8f1b5f7736278c4178d" +rev = "6e21f7a9291db4395192d6b510d906978ae2d251" features = ["vulkan"] [dependencies.wgt] package = "wgpu-types" git = "https://github.com/gfx-rs/wgpu" -rev = "3ec547cdcaaa14488327d8f1b5f7736278c4178d" +rev = "6e21f7a9291db4395192d6b510d906978ae2d251" [dependencies.wgh] package = "wgpu-hal" git = "https://github.com/gfx-rs/wgpu" -rev = "3ec547cdcaaa14488327d8f1b5f7736278c4178d" +rev = "6e21f7a9291db4395192d6b510d906978ae2d251" [target.'cfg(windows)'.dependencies.d3d12] git = "https://github.com/gfx-rs/wgpu" -rev = "3ec547cdcaaa14488327d8f1b5f7736278c4178d" +rev = "6e21f7a9291db4395192d6b510d906978ae2d251" [target.'cfg(windows)'.dependencies] winapi = "0.3" diff --git a/gfx/wgpu_bindings/moz.yaml b/gfx/wgpu_bindings/moz.yaml index 1821521918915..2f3e66102bc7b 100644 --- a/gfx/wgpu_bindings/moz.yaml +++ b/gfx/wgpu_bindings/moz.yaml @@ -20,11 +20,11 @@ origin: # Human-readable identifier for this version/release # Generally "version NNN", "tag SSS", "bookmark SSS" - release: commit 3ec547cdcaaa14488327d8f1b5f7736278c4178d + release: commit 6e21f7a9291db4395192d6b510d906978ae2d251 # Revision to pull in # Must be a long or short commit SHA (long preferred) - revision: 3ec547cdcaaa14488327d8f1b5f7736278c4178d + revision: 6e21f7a9291db4395192d6b510d906978ae2d251 license: ['MIT', 'Apache-2.0'] diff --git a/supply-chain/audits.toml b/supply-chain/audits.toml index d8c97773fb98f..368848349bbe7 100644 --- a/supply-chain/audits.toml +++ b/supply-chain/audits.toml @@ -1166,6 +1166,11 @@ who = "Nicolas Silva " criteria = "safe-to-deploy" delta = "0.7.0 -> 0.7.0@git:ba3d6898f18c25bb5a2b8ba18790134b97758e83" +[[audits.d3d12]] +who = "Nicolas Silva " +criteria = "safe-to-deploy" +delta = "0.7.0@git:3ec547cdcaaa14488327d8f1b5f7736278c4178d -> 0.7.0@git:6e21f7a9291db4395192d6b510d906978ae2d251" + [[audits.d3d12]] who = "Teodor Tanasoaia " criteria = "safe-to-deploy" @@ -2359,6 +2364,11 @@ who = "Nicolas Silva " criteria = "safe-to-deploy" delta = "0.14.0@git:34e947de4b3e0b0d6b0e2f40cede926467ea9f1e -> 0.14.0@git:ba3d6898f18c25bb5a2b8ba18790134b97758e83" +[[audits.naga]] +who = "Nicolas Silva " +criteria = "safe-to-deploy" +delta = "0.14.0@git:3ec547cdcaaa14488327d8f1b5f7736278c4178d -> 0.14.0@git:6e21f7a9291db4395192d6b510d906978ae2d251" + [[audits.naga]] who = "Teodor Tanasoaia " criteria = "safe-to-deploy" @@ -4087,6 +4097,11 @@ who = "Nicolas Silva " criteria = "safe-to-deploy" delta = "0.18.0@git:34e947de4b3e0b0d6b0e2f40cede926467ea9f1e -> 0.18.0@git:ba3d6898f18c25bb5a2b8ba18790134b97758e83" +[[audits.wgpu-core]] +who = "Nicolas Silva " +criteria = "safe-to-deploy" +delta = "0.18.0@git:3ec547cdcaaa14488327d8f1b5f7736278c4178d -> 0.18.0@git:6e21f7a9291db4395192d6b510d906978ae2d251" + [[audits.wgpu-core]] who = "Teodor Tanasoaia " criteria = "safe-to-deploy" @@ -4145,6 +4160,11 @@ who = "Nicolas Silva " criteria = "safe-to-deploy" delta = "0.18.0@git:34e947de4b3e0b0d6b0e2f40cede926467ea9f1e -> 0.18.0@git:ba3d6898f18c25bb5a2b8ba18790134b97758e83" +[[audits.wgpu-hal]] +who = "Nicolas Silva " +criteria = "safe-to-deploy" +delta = "0.18.0@git:3ec547cdcaaa14488327d8f1b5f7736278c4178d -> 0.18.0@git:6e21f7a9291db4395192d6b510d906978ae2d251" + [[audits.wgpu-hal]] who = "Teodor Tanasoaia " criteria = "safe-to-deploy" @@ -4203,6 +4223,11 @@ who = "Nicolas Silva " criteria = "safe-to-deploy" delta = "0.18.0@git:34e947de4b3e0b0d6b0e2f40cede926467ea9f1e -> 0.18.0@git:ba3d6898f18c25bb5a2b8ba18790134b97758e83" +[[audits.wgpu-types]] +who = "Nicolas Silva " +criteria = "safe-to-deploy" +delta = "0.18.0@git:3ec547cdcaaa14488327d8f1b5f7736278c4178d -> 0.18.0@git:6e21f7a9291db4395192d6b510d906978ae2d251" + [[audits.wgpu-types]] who = "Teodor Tanasoaia " criteria = "safe-to-deploy" diff --git a/third_party/rust/naga/.cargo-checksum.json b/third_party/rust/naga/.cargo-checksum.json index 7367dafe4a1d1..811e8c8084aa6 100644 --- a/third_party/rust/naga/.cargo-checksum.json +++ b/third_party/rust/naga/.cargo-checksum.json @@ -1 +1 @@ -{"files":{".cargo/config.toml":"d7389d2a0c08ec72b79e83a3c76980903e3f9123625c32e69c798721193e2e74","CHANGELOG.md":"772216b2e6d35621ab36c985e8114b54403e2f3fb49bf1f8540c451fd06f8387","Cargo.toml":"839f25c8882c9f05ce0816d4c75c27c4ef89629e85fc8f653875823ff220b129","README.md":"a8209bed7462de253558ee8b197c0ce706d505243b2d23614f74464f1d62e15f","benches/criterion.rs":"98107330452b14b95fbd513f547c9447d21a6fafb2124f4442098262c3f2e84d","src/arena.rs":"debe6414ef822c055b52e808407a1844bf9fb26d682df3df9582d7723de77fdf","src/back/dot/mod.rs":"a40050a73ac00c8fa43dd0b45a84fca6959d28c8c99ab3046b01f33c02f8c8f4","src/back/glsl/features.rs":"05a7d77dd8db1fb65e8961b7c22a3d8ed51863f552fc46d665a9cf979bb979d9","src/back/glsl/keywords.rs":"fbc6dad7e0da457f682ee21f86f5b9ebf0b080d750e34e5600085e14f07ab1c1","src/back/glsl/mod.rs":"82db970ba4e4e50c5a69f1d0bce66b9b82ef0825cfd0f99ede49d0140ff88316","src/back/hlsl/conv.rs":"8aef3788a6441178ed30fe265840ae85b3c5a7c013e4946eacd6c025ec8209f2","src/back/hlsl/help.rs":"e574193fec91f0f44786d54be3d9e8fbb81c3987af8fb6597cb0d3e75f618011","src/back/hlsl/keywords.rs":"eb4af8d697fb7f3991859d66339b5b2eac27c9fe41b73146ac838b207f462c9c","src/back/hlsl/mod.rs":"c677ebbb649a1c5f85f350d8da7129457d50ff52b1c88c1f0fac4a9d11eb020c","src/back/hlsl/storage.rs":"6b5b81ba147ad2b20bfb7c069d3881d46d3da974c033a2669a9cf86c9b9baf5b","src/back/hlsl/writer.rs":"6de37ac96310f8d26ceb0335de73e216d9eabd8d84223d42322c72d910d67c0f","src/back/mod.rs":"b941caed50c086f49d25e76228d247ba6c2da6dbeea18d968c02dc68bb97f409","src/back/msl/keywords.rs":"64a92abc3be4d4840817c1385d30d9f0401184a94367e604bbd25995e96c0154","src/back/msl/mod.rs":"16d905902e30cf900ec924b66ff496adbbbc54af15c59713f358bfac042a625a","src/back/msl/sampler.rs":"9b01d68669e12ff7123243284b85e1a9d2c4d49140bd74ca32dedc007cbf15af","src/back/msl/writer.rs":"ea0750e8f43eb2d9e83ccfe8dadd3467a00db925a9ed97a813766a6898c9ae0d","src/back/spv/block.rs":"52afd6ad5a0556b57f3b60f563b02f8cac3879195cb99087ed670c635526a0cd","src/back/spv/helpers.rs":"a4e260130f39c7345decec40dadf1e94419c8f6d236ce7a53b5300aa72952a1b","src/back/spv/image.rs":"d81d770645913fd57725237f6f96cc4d0a6674b7096fab7e620fb58dc0087e64","src/back/spv/index.rs":"26611dd50df5cfd214900e19415f5374dd301d3b7d3bfedbc5ec0f254328287a","src/back/spv/instructions.rs":"d0ced535fdec49323105a7d6ee40a8ed6b4966ac5f0f40b062f0eb11a531b106","src/back/spv/layout.rs":"e263de53cd2f9a03ad94b82b434ce636609bc1ed435a2d1132951663bfaa8ebd","src/back/spv/mod.rs":"f15774663582721f463436b18e1cf5aefb590ea9c891d2fbb05a6c7f0a1ef719","src/back/spv/ray.rs":"a34bf6b26d873f7270caa45841d9ef291aca8d9732ecd086b14d8856038e1e41","src/back/spv/recyclable.rs":"114db0ea12774d6514f995d07295cb9a42631ab75165fc60980c10e9b5ecb832","src/back/spv/selection.rs":"81e404abfa0a977f7c1f76ccb37a78d13ccadbda229048dad53cc67687cc39db","src/back/spv/writer.rs":"847142ca3587bd1c9701d53afa77d50667b3cf7987232b96f5301b2651bbdb28","src/back/wgsl/mod.rs":"2dd12bbea9ace835850192bb68c5760953da6bac6a636073d1eca19381c0c0b6","src/back/wgsl/writer.rs":"2d1cf8a690e0447e541666a682e1909c25ab4e46386b420bd6c04d5b0b12643a","src/block.rs":"02776b02303c381d6ed39852059e1cbe575da2c799b2421385280a07f828c77d","src/compact/expressions.rs":"7a4c916282a5b484519ed29ab451c7b595d8dea73c83c5c2cf7efc6fbc648fda","src/compact/functions.rs":"174bd9167ecf6353afb8c36d365ba3f9b483233eb4bacf578e50183c7433aa15","src/compact/handle_set_map.rs":"817c5193352d5fd6a61a5c970daba23224e14a65aea15f8f1c8679c99f834ca2","src/compact/mod.rs":"6b171f09fcd9423eecb2865fa854ed5cec725ff30dc572baaee68b3b24cef0eb","src/compact/statements.rs":"4df33ee9589300e769e75c674bdc30578e93704ec3eb2aabc7132121745b55c8","src/compact/types.rs":"18343f2ca2c123eea2531cffc1d54a7798797caccecaa1f9b8c4fd5dd6ca1a05","src/front/glsl/ast.rs":"2ae292c09fed43951e20ec6ce0b7b99efe91726cf90487d6c129137b08618e01","src/front/glsl/builtins.rs":"0c3697ec7ecccb5cd46653adf2749ea8b600c5f66b5292910868aac034cae5a7","src/front/glsl/context.rs":"a2116a163a670ae8b93a48057d3e91c3c59b3df1aed7703e901bb4bf63365dc2","src/front/glsl/error.rs":"08409f8cc13b73c6eda938ff7ebf2b46e9d27c66d718d640ad8d6a33fddbb7a1","src/front/glsl/functions.rs":"a43f4004f1da7d30d8d8e2c77388d5091fc33cb7240d0d916369d0cb724a3f2a","src/front/glsl/lex.rs":"08736ae8beb955da5b0e6e3e0f45995a824995f7096d516a2910417e9c7afa32","src/front/glsl/mod.rs":"c8e435a894d641e6c92fcd7d357d51e1136af1a69410fbaedb88c7262b2269df","src/front/glsl/offset.rs":"3b8f6c298b31eeae14f7a7bbfcfce77cfd3bd534ef9f77c3b9f012b7bf56ccb6","src/front/glsl/parser.rs":"fe5291512db412b33b6c09d5b3dcf7c54ff6ec55b47f0a078dcc11695e78471d","src/front/glsl/parser/declarations.rs":"2229e491a58e3584b5e9f8d58248505f9f164221037aea9520d850d932ce4ad2","src/front/glsl/parser/expressions.rs":"520cfc9402d5fbd48e52ef1d36562c6b74794c09ec33ec1ebb10aa48d129b66f","src/front/glsl/parser/functions.rs":"670ca6bba5a56f3919968658966b05ba824c2d76427b8927d4b111f715091629","src/front/glsl/parser/types.rs":"0971bc98cbde1d70c0ced1717c8726a12a5bfafa9a72b127ed242db5435ec8a8","src/front/glsl/parser_tests.rs":"fc2120fff132af850b52f9ac5d19f0c2fce7997b17ba49881b155d173359cfd3","src/front/glsl/token.rs":"42325adbef5bfc9e6f9e40e7ed3cbfa7ef498c05799fdb7694b123e6450410e1","src/front/glsl/types.rs":"2aa7a25a550a2262501fbef6d7c9fcee69ec5ae60e08ef8c42a9b542a9995fed","src/front/glsl/variables.rs":"fb2a09e386b6e98ca9fb8fb744afac1e8b19d1b67c6ede5c474e3ba860d3d4cb","src/front/interpolator.rs":"9b6ca498d5fbd9bc1515510a04e303a00b324121d7285da3c955cfe18eb4224c","src/front/mod.rs":"6f77ca3ff925cc16e5ae46132bd6196ca4c9c80a7db123f0d2ec09aae93ba51f","src/front/spv/convert.rs":"9b4ecc53131b6250cde4cae93557eb467127e9e8d0536d29a851538684ae5371","src/front/spv/error.rs":"3129fd1fe346441d61d0f641734df7e919919db15df706788d16402ebf480607","src/front/spv/function.rs":"3a3f0c07862750f79f8ebc273c5df11efc67272566458410f776bd8fa271a0f8","src/front/spv/image.rs":"5d55cfbf6752732a594114cd09a9a207216e1ee85d8f2c9bc4310217a55ea321","src/front/spv/mod.rs":"cdeba305d13d753cdc777111a7990f212aec5706ee16ca9c5cfca3954995e073","src/front/spv/null.rs":"e1446d99d04c76a9c3bbd24dd9b20c4711ce8a918a9b403be6cccbde1175b3b4","src/front/type_gen.rs":"5a4c0b98338e0ee9b525943c60706b8c3066fb372f080faea0d53448468f9f71","src/front/wgsl/error.rs":"1b8202553bd69fb91064ad7bd588b52036799bef943d9fdab34a727f02b8da46","src/front/wgsl/index.rs":"2b9a4929a46bd822d3ed6f9a150e24d437e5bdca8293eb748aebe80ce7e74153","src/front/wgsl/lower/construction.rs":"f9cdfff82f0b0ccfa8aa9206f13fd349e5856c866f9b3d3c9df47bf783cb2273","src/front/wgsl/lower/mod.rs":"acb9f7ecdf5498a7b726c8cb34003cc77c4d151555e82b654db97157d1b69203","src/front/wgsl/mod.rs":"02b194a0a29ef7281f71b424564e18ada4a8b1a0d8c26ec40b6be195bd4c4904","src/front/wgsl/parse/ast.rs":"c7eaae40179f0889f2b142d3b31968cbfab6d3cfe02e425912c6da8dadac51df","src/front/wgsl/parse/conv.rs":"01b25edbe80b263a3fa51bc980c075630bb31d4af851441323383eb4f3b83360","src/front/wgsl/parse/lexer.rs":"a26d1c6f76b4883a4b79a617f9914eb3f4b05fbae4d4112606a9598d31a46b8e","src/front/wgsl/parse/mod.rs":"3b4895a2baf91c719b95f0afb6441ffac2036c2a9ff817e633882fd257afcc38","src/front/wgsl/parse/number.rs":"64fe5d5f6b1d6a6acf15f4cad3cda8045ad5d82efda179627f848aba9c47e605","src/front/wgsl/tests.rs":"ab26c5c82a21a4e8072fcdc8b3c47c3d6ac1d854047dd88db99e02aa67502e48","src/front/wgsl/to_wgsl.rs":"0aa5970f3870f67d9ac9656104b0862be49518477a4ffa9decf5e15b537ce255","src/keywords/mod.rs":"0138f3931f8af0b0a05174549d0fd2152945b027dc3febefc1bbd676581d2e45","src/keywords/wgsl.rs":"7c3b364b60ca29cb8a68ef781de9ecd28b76b74bed18bf18a35d2ebffaa855ab","src/lib.rs":"78eb0f9a1c5f4d59eb40725c1d28153df99f4bda3f7ba0460d0dc1eef70320f4","src/proc/constant_evaluator.rs":"bda1b65361131c0c253ab0be17d6fe0cd7a3f5658026245d59ec5b5c208b2844","src/proc/emitter.rs":"3f0ca32c8bf7e37013e432235cb4d7a9c34a554a56b70420bbaf81a708c54a52","src/proc/index.rs":"f4250f6944c2b631e8140979024e8deb86fa8d5352d8641ba954a388b2c0940e","src/proc/layouter.rs":"58f608bb889136cce365d4ec460074749dcc5fcd63c341179914faae6583e2d4","src/proc/mod.rs":"0457fcad6a61b699fafb720f96b4efeadd50e0f5b8abbc35cd748165b5ff62bb","src/proc/namer.rs":"7328fac41e40890c64c7ee2fa985a4395424f18b08d30f30ca2583fdabd2fd35","src/proc/terminator.rs":"13c59bf00f5b26171d971effc421091f5e00dedddd246c2daa44fe65aeda060a","src/proc/typifier.rs":"7235f82a241b9e64299754e8762fecb6b23a65e918d6ec699c3f2d78b776f436","src/span.rs":"c09627f5ae17ed864521f8d42f760d0a2168984e7f63eb676f80c4315512d707","src/valid/analyzer.rs":"2309f8fc1cfd83b6fcd402d4211da3c84c20dc194d615734595c571d90ea4f1f","src/valid/compose.rs":"73c559aae4c594799d1ebb30f39532e51819c9b447f823b380f40e2b7776a705","src/valid/expression.rs":"16dd89ba94e81a52cdcee3f804b7fd0ccc6d076d2568dca3ad251a3b1b945213","src/valid/function.rs":"53f197499c36323c15c56357b4f31965f8fff6ad4a2029defb0e5450713653a1","src/valid/handles.rs":"bf15ab0b93910c7ee27a526a734133585d5738fd95860674f902503a89ca06c4","src/valid/interface.rs":"26ababe7f89f78884fd8a2f41c30f0ba0379c2816e3f88469c89db8b7624422c","src/valid/mod.rs":"e850e5829c258e8b7f909d82c59726471ba07b6331c03172cd7170e5c2f730cf","src/valid/type.rs":"25e47fa4529112f732c07bbaf9212928be49a58f0afcbcb2168e92cd044b08d6"},"package":null} \ No newline at end of file +{"files":{".cargo/config.toml":"d7389d2a0c08ec72b79e83a3c76980903e3f9123625c32e69c798721193e2e74","CHANGELOG.md":"772216b2e6d35621ab36c985e8114b54403e2f3fb49bf1f8540c451fd06f8387","Cargo.toml":"1122c734de573c60738de4d56b11c06376a1ff25412f59caf98a7d7acc39d7b0","README.md":"a8209bed7462de253558ee8b197c0ce706d505243b2d23614f74464f1d62e15f","benches/criterion.rs":"f45e38b26e1323e934d32623572ff5395a53fed06f760eb1e07b22ed07858a38","src/arena.rs":"33ed2ec7b36429b133ed2a7de6fb9735827f69ea8b6c2ce97f64746a24a5bf36","src/back/dot/mod.rs":"a40050a73ac00c8fa43dd0b45a84fca6959d28c8c99ab3046b01f33c02f8c8f4","src/back/glsl/features.rs":"05a7d77dd8db1fb65e8961b7c22a3d8ed51863f552fc46d665a9cf979bb979d9","src/back/glsl/keywords.rs":"fbc6dad7e0da457f682ee21f86f5b9ebf0b080d750e34e5600085e14f07ab1c1","src/back/glsl/mod.rs":"82db970ba4e4e50c5a69f1d0bce66b9b82ef0825cfd0f99ede49d0140ff88316","src/back/hlsl/conv.rs":"8aef3788a6441178ed30fe265840ae85b3c5a7c013e4946eacd6c025ec8209f2","src/back/hlsl/help.rs":"e574193fec91f0f44786d54be3d9e8fbb81c3987af8fb6597cb0d3e75f618011","src/back/hlsl/keywords.rs":"eb4af8d697fb7f3991859d66339b5b2eac27c9fe41b73146ac838b207f462c9c","src/back/hlsl/mod.rs":"c677ebbb649a1c5f85f350d8da7129457d50ff52b1c88c1f0fac4a9d11eb020c","src/back/hlsl/storage.rs":"6b5b81ba147ad2b20bfb7c069d3881d46d3da974c033a2669a9cf86c9b9baf5b","src/back/hlsl/writer.rs":"6de37ac96310f8d26ceb0335de73e216d9eabd8d84223d42322c72d910d67c0f","src/back/mod.rs":"b941caed50c086f49d25e76228d247ba6c2da6dbeea18d968c02dc68bb97f409","src/back/msl/keywords.rs":"998c0d86a26e5cf031c75f35cde28f2b390fe207a2e7d0eed8516ffdb99c1a8e","src/back/msl/mod.rs":"16d905902e30cf900ec924b66ff496adbbbc54af15c59713f358bfac042a625a","src/back/msl/sampler.rs":"9b01d68669e12ff7123243284b85e1a9d2c4d49140bd74ca32dedc007cbf15af","src/back/msl/writer.rs":"90b4819ad14240cf92b317c08370d25821f4b9e61018d589dcaf5189102b348d","src/back/spv/block.rs":"52afd6ad5a0556b57f3b60f563b02f8cac3879195cb99087ed670c635526a0cd","src/back/spv/helpers.rs":"a4e260130f39c7345decec40dadf1e94419c8f6d236ce7a53b5300aa72952a1b","src/back/spv/image.rs":"d81d770645913fd57725237f6f96cc4d0a6674b7096fab7e620fb58dc0087e64","src/back/spv/index.rs":"26611dd50df5cfd214900e19415f5374dd301d3b7d3bfedbc5ec0f254328287a","src/back/spv/instructions.rs":"d0ced535fdec49323105a7d6ee40a8ed6b4966ac5f0f40b062f0eb11a531b106","src/back/spv/layout.rs":"e263de53cd2f9a03ad94b82b434ce636609bc1ed435a2d1132951663bfaa8ebd","src/back/spv/mod.rs":"f15774663582721f463436b18e1cf5aefb590ea9c891d2fbb05a6c7f0a1ef719","src/back/spv/ray.rs":"a34bf6b26d873f7270caa45841d9ef291aca8d9732ecd086b14d8856038e1e41","src/back/spv/recyclable.rs":"114db0ea12774d6514f995d07295cb9a42631ab75165fc60980c10e9b5ecb832","src/back/spv/selection.rs":"81e404abfa0a977f7c1f76ccb37a78d13ccadbda229048dad53cc67687cc39db","src/back/spv/writer.rs":"847142ca3587bd1c9701d53afa77d50667b3cf7987232b96f5301b2651bbdb28","src/back/wgsl/mod.rs":"2dd12bbea9ace835850192bb68c5760953da6bac6a636073d1eca19381c0c0b6","src/back/wgsl/writer.rs":"2d1cf8a690e0447e541666a682e1909c25ab4e46386b420bd6c04d5b0b12643a","src/block.rs":"c69089e5bbb6de6ba24efb15b21d5d434fcabfbc4d48feae948d2a4da135aae7","src/compact/expressions.rs":"7a4c916282a5b484519ed29ab451c7b595d8dea73c83c5c2cf7efc6fbc648fda","src/compact/functions.rs":"174bd9167ecf6353afb8c36d365ba3f9b483233eb4bacf578e50183c7433aa15","src/compact/handle_set_map.rs":"817c5193352d5fd6a61a5c970daba23224e14a65aea15f8f1c8679c99f834ca2","src/compact/mod.rs":"6b171f09fcd9423eecb2865fa854ed5cec725ff30dc572baaee68b3b24cef0eb","src/compact/statements.rs":"4df33ee9589300e769e75c674bdc30578e93704ec3eb2aabc7132121745b55c8","src/compact/types.rs":"18343f2ca2c123eea2531cffc1d54a7798797caccecaa1f9b8c4fd5dd6ca1a05","src/front/glsl/ast.rs":"2ae292c09fed43951e20ec6ce0b7b99efe91726cf90487d6c129137b08618e01","src/front/glsl/builtins.rs":"0c3697ec7ecccb5cd46653adf2749ea8b600c5f66b5292910868aac034cae5a7","src/front/glsl/context.rs":"a2116a163a670ae8b93a48057d3e91c3c59b3df1aed7703e901bb4bf63365dc2","src/front/glsl/error.rs":"08409f8cc13b73c6eda938ff7ebf2b46e9d27c66d718d640ad8d6a33fddbb7a1","src/front/glsl/functions.rs":"a43f4004f1da7d30d8d8e2c77388d5091fc33cb7240d0d916369d0cb724a3f2a","src/front/glsl/lex.rs":"08736ae8beb955da5b0e6e3e0f45995a824995f7096d516a2910417e9c7afa32","src/front/glsl/mod.rs":"c8e435a894d641e6c92fcd7d357d51e1136af1a69410fbaedb88c7262b2269df","src/front/glsl/offset.rs":"3b8f6c298b31eeae14f7a7bbfcfce77cfd3bd534ef9f77c3b9f012b7bf56ccb6","src/front/glsl/parser.rs":"fe5291512db412b33b6c09d5b3dcf7c54ff6ec55b47f0a078dcc11695e78471d","src/front/glsl/parser/declarations.rs":"2229e491a58e3584b5e9f8d58248505f9f164221037aea9520d850d932ce4ad2","src/front/glsl/parser/expressions.rs":"520cfc9402d5fbd48e52ef1d36562c6b74794c09ec33ec1ebb10aa48d129b66f","src/front/glsl/parser/functions.rs":"670ca6bba5a56f3919968658966b05ba824c2d76427b8927d4b111f715091629","src/front/glsl/parser/types.rs":"0971bc98cbde1d70c0ced1717c8726a12a5bfafa9a72b127ed242db5435ec8a8","src/front/glsl/parser_tests.rs":"fc2120fff132af850b52f9ac5d19f0c2fce7997b17ba49881b155d173359cfd3","src/front/glsl/token.rs":"42325adbef5bfc9e6f9e40e7ed3cbfa7ef498c05799fdb7694b123e6450410e1","src/front/glsl/types.rs":"2aa7a25a550a2262501fbef6d7c9fcee69ec5ae60e08ef8c42a9b542a9995fed","src/front/glsl/variables.rs":"fb2a09e386b6e98ca9fb8fb744afac1e8b19d1b67c6ede5c474e3ba860d3d4cb","src/front/interpolator.rs":"9b6ca498d5fbd9bc1515510a04e303a00b324121d7285da3c955cfe18eb4224c","src/front/mod.rs":"6f77ca3ff925cc16e5ae46132bd6196ca4c9c80a7db123f0d2ec09aae93ba51f","src/front/spv/convert.rs":"9b4ecc53131b6250cde4cae93557eb467127e9e8d0536d29a851538684ae5371","src/front/spv/error.rs":"3129fd1fe346441d61d0f641734df7e919919db15df706788d16402ebf480607","src/front/spv/function.rs":"3a3f0c07862750f79f8ebc273c5df11efc67272566458410f776bd8fa271a0f8","src/front/spv/image.rs":"5d55cfbf6752732a594114cd09a9a207216e1ee85d8f2c9bc4310217a55ea321","src/front/spv/mod.rs":"cdeba305d13d753cdc777111a7990f212aec5706ee16ca9c5cfca3954995e073","src/front/spv/null.rs":"e1446d99d04c76a9c3bbd24dd9b20c4711ce8a918a9b403be6cccbde1175b3b4","src/front/type_gen.rs":"5a4c0b98338e0ee9b525943c60706b8c3066fb372f080faea0d53448468f9f71","src/front/wgsl/error.rs":"1b8202553bd69fb91064ad7bd588b52036799bef943d9fdab34a727f02b8da46","src/front/wgsl/index.rs":"2b9a4929a46bd822d3ed6f9a150e24d437e5bdca8293eb748aebe80ce7e74153","src/front/wgsl/lower/construction.rs":"f9cdfff82f0b0ccfa8aa9206f13fd349e5856c866f9b3d3c9df47bf783cb2273","src/front/wgsl/lower/mod.rs":"acb9f7ecdf5498a7b726c8cb34003cc77c4d151555e82b654db97157d1b69203","src/front/wgsl/mod.rs":"02b194a0a29ef7281f71b424564e18ada4a8b1a0d8c26ec40b6be195bd4c4904","src/front/wgsl/parse/ast.rs":"c7eaae40179f0889f2b142d3b31968cbfab6d3cfe02e425912c6da8dadac51df","src/front/wgsl/parse/conv.rs":"01b25edbe80b263a3fa51bc980c075630bb31d4af851441323383eb4f3b83360","src/front/wgsl/parse/lexer.rs":"a26d1c6f76b4883a4b79a617f9914eb3f4b05fbae4d4112606a9598d31a46b8e","src/front/wgsl/parse/mod.rs":"3b4895a2baf91c719b95f0afb6441ffac2036c2a9ff817e633882fd257afcc38","src/front/wgsl/parse/number.rs":"64fe5d5f6b1d6a6acf15f4cad3cda8045ad5d82efda179627f848aba9c47e605","src/front/wgsl/tests.rs":"ab26c5c82a21a4e8072fcdc8b3c47c3d6ac1d854047dd88db99e02aa67502e48","src/front/wgsl/to_wgsl.rs":"b1df60b137f5c7f6106d54f70548b19ddef5d55b1da19f4d28a8970723dc0f4a","src/keywords/mod.rs":"0138f3931f8af0b0a05174549d0fd2152945b027dc3febefc1bbd676581d2e45","src/keywords/wgsl.rs":"7c3b364b60ca29cb8a68ef781de9ecd28b76b74bed18bf18a35d2ebffaa855ab","src/lib.rs":"78eb0f9a1c5f4d59eb40725c1d28153df99f4bda3f7ba0460d0dc1eef70320f4","src/proc/constant_evaluator.rs":"4a3bfc67e055012036ef850a1f5302c681d41d967156d6fd4c616bd1a3075219","src/proc/emitter.rs":"39ac886c651e2ad33c06a676a7e4826a0e93de0af660c01e8e4b1f7406742f88","src/proc/index.rs":"f4250f6944c2b631e8140979024e8deb86fa8d5352d8641ba954a388b2c0940e","src/proc/layouter.rs":"58f608bb889136cce365d4ec460074749dcc5fcd63c341179914faae6583e2d4","src/proc/mod.rs":"0457fcad6a61b699fafb720f96b4efeadd50e0f5b8abbc35cd748165b5ff62bb","src/proc/namer.rs":"7328fac41e40890c64c7ee2fa985a4395424f18b08d30f30ca2583fdabd2fd35","src/proc/terminator.rs":"13c59bf00f5b26171d971effc421091f5e00dedddd246c2daa44fe65aeda060a","src/proc/typifier.rs":"7235f82a241b9e64299754e8762fecb6b23a65e918d6ec699c3f2d78b776f436","src/span.rs":"6560599f20b8bc2de746ee9fd6b05c32bb630af914fce8845d84fdc72f9a636c","src/valid/analyzer.rs":"8472b98f16a4a4a0fa7079197db25696f77ef3e1602a7cddea1930daebd27917","src/valid/compose.rs":"99a910b4d59ad9b62151d39e4f025b9195f832fb19bbab172a0cc907510d79e5","src/valid/expression.rs":"8086c82552985cec11b05171cbcd9edfc1415910b9e29ce6dcbab85430cb603d","src/valid/function.rs":"9782913bb4d3df874ca976aa86b06bffd6c30401b8861440a9430203fef9852a","src/valid/handles.rs":"0878915e67b16d7c41cf8245d9ab3b3f4a604e7d4e87527ea40e03efcbf1f74a","src/valid/interface.rs":"6ddf5f8d5150342d2e8c754a71c92c85d8533fd1d4c6b7a83a05b508e8e8114d","src/valid/mod.rs":"9e2bafa06bea16db2c5a8f825eed4d008c474b87cda2fc7e82ca7a21229c6f20","src/valid/type.rs":"25e47fa4529112f732c07bbaf9212928be49a58f0afcbcb2168e92cd044b08d6"},"package":null} \ No newline at end of file diff --git a/third_party/rust/naga/Cargo.toml b/third_party/rust/naga/Cargo.toml index 212f8dcd55120..0164a545721c9 100644 --- a/third_party/rust/naga/Cargo.toml +++ b/third_party/rust/naga/Cargo.toml @@ -55,7 +55,6 @@ optional = true [dependencies.codespan-reporting] version = "0.11.0" -optional = true [dependencies.hexf-parse] version = "0.2.1" @@ -84,7 +83,6 @@ optional = true [dependencies.termcolor] version = "1.4.0" -optional = true [dependencies.unicode-xid] version = "0.2.3" @@ -137,20 +135,13 @@ serialize = [ "bitflags/serde", "indexmap/serde", ] -span = [ - "codespan-reporting", - "termcolor", -] spv-in = [ "petgraph", "spirv", ] spv-out = ["spirv"] -validate = [] wgsl-in = [ - "codespan-reporting", "hexf-parse", - "termcolor", "unicode-xid", ] wgsl-out = [] diff --git a/third_party/rust/naga/benches/criterion.rs b/third_party/rust/naga/benches/criterion.rs index 697467faa6d24..e57c58a8476ac 100644 --- a/third_party/rust/naga/benches/criterion.rs +++ b/third_party/rust/naga/benches/criterion.rs @@ -119,7 +119,6 @@ fn gather_modules() -> Vec { fn validation(c: &mut Criterion) { let inputs = gather_modules(); let mut group = c.benchmark_group("valid"); - #[cfg(feature = "validate")] group.bench_function("safe", |b| { let mut validator = naga::valid::Validator::new( naga::valid::ValidationFlags::all(), @@ -131,7 +130,6 @@ fn validation(c: &mut Criterion) { } }); }); - #[cfg(feature = "validate")] group.bench_function("unsafe", |b| { let mut validator = naga::valid::Validator::new( naga::valid::ValidationFlags::empty(), @@ -146,7 +144,6 @@ fn validation(c: &mut Criterion) { } fn backends(c: &mut Criterion) { - #[cfg(feature = "validate")] let inputs = { let mut validator = naga::valid::Validator::new( naga::valid::ValidationFlags::empty(), @@ -158,8 +155,6 @@ fn backends(c: &mut Criterion) { .flat_map(|module| validator.validate(&module).ok().map(|info| (module, info))) .collect::>() }; - #[cfg(not(feature = "validate"))] - let inputs = Vec::<(naga::Module, naga::valid::ModuleInfo)>::new(); let mut group = c.benchmark_group("back"); #[cfg(feature = "wgsl-out")] diff --git a/third_party/rust/naga/src/arena.rs b/third_party/rust/naga/src/arena.rs index 54c92e849a760..c37538667fd67 100644 --- a/third_party/rust/naga/src/arena.rs +++ b/third_party/rust/naga/src/arena.rs @@ -247,7 +247,6 @@ impl Range { pub struct Arena { /// Values of this arena. data: Vec, - #[cfg(feature = "span")] #[cfg_attr(feature = "serialize", serde(skip))] span_info: Vec, } @@ -269,7 +268,6 @@ impl Arena { pub const fn new() -> Self { Arena { data: Vec::new(), - #[cfg(feature = "span")] span_info: Vec::new(), } } @@ -310,11 +308,8 @@ impl Arena { /// Adds a new value to the arena, returning a typed handle. pub fn append(&mut self, value: T, span: Span) -> Handle { - #[cfg(not(feature = "span"))] - let _ = span; let index = self.data.len(); self.data.push(value); - #[cfg(feature = "span")] self.span_info.push(span); Handle::from_usize(index) } @@ -377,18 +372,10 @@ impl Arena { } pub fn get_span(&self, handle: Handle) -> Span { - #[cfg(feature = "span")] - { - *self - .span_info - .get(handle.index()) - .unwrap_or(&Span::default()) - } - #[cfg(not(feature = "span"))] - { - let _ = handle; - Span::default() - } + *self + .span_info + .get(handle.index()) + .unwrap_or(&Span::default()) } /// Assert that `handle` is valid for this arena. @@ -438,7 +425,6 @@ impl Arena { // Since `predicate` needs mutable access to each element, // we can't feasibly call it twice, so we have to compact // spans by hand in parallel as part of this iteration. - #[cfg(feature = "span")] if keep { self.span_info[retained] = self.span_info[index]; retained += 1; @@ -448,7 +434,6 @@ impl Arena { keep }); - #[cfg(feature = "span")] self.span_info.truncate(retained); } } @@ -463,16 +448,11 @@ where D: serde::Deserializer<'de>, { let data = Vec::deserialize(deserializer)?; - #[cfg(feature = "span")] let span_info = std::iter::repeat(Span::default()) .take(data.len()) .collect(); - Ok(Self { - data, - #[cfg(feature = "span")] - span_info, - }) + Ok(Self { data, span_info }) } } @@ -561,7 +541,6 @@ pub struct UniqueArena { /// promises that its elements "are indexed in a compact range, without /// holes in the range 0..set.len()", so we can always use the indices /// returned by insertion as indices into this vector. - #[cfg(feature = "span")] span_info: Vec, } @@ -570,7 +549,6 @@ impl UniqueArena { pub fn new() -> Self { UniqueArena { set: FastIndexSet::default(), - #[cfg(feature = "span")] span_info: Vec::new(), } } @@ -588,7 +566,6 @@ impl UniqueArena { /// Clears the arena, keeping all allocations. pub fn clear(&mut self) { self.set.clear(); - #[cfg(feature = "span")] self.span_info.clear(); } @@ -596,29 +573,17 @@ impl UniqueArena { /// /// If a value has been inserted multiple times, the span returned is the /// one provided with the first insertion. - /// - /// If the `span` feature is not enabled, always return `Span::default`. - /// This can be detected with [`Span::is_defined`]. pub fn get_span(&self, handle: Handle) -> Span { - #[cfg(feature = "span")] - { - *self - .span_info - .get(handle.index()) - .unwrap_or(&Span::default()) - } - #[cfg(not(feature = "span"))] - { - let _ = handle; - Span::default() - } + *self + .span_info + .get(handle.index()) + .unwrap_or(&Span::default()) } #[cfg(feature = "compact")] pub(crate) fn drain_all(&mut self) -> UniqueArenaDrain { UniqueArenaDrain { inner_elts: self.set.drain(..), - #[cfg(feature = "span")] inner_spans: self.span_info.drain(..), index: Index::new(1).unwrap(), } @@ -628,7 +593,6 @@ impl UniqueArena { #[cfg(feature = "compact")] pub(crate) struct UniqueArenaDrain<'a, T> { inner_elts: indexmap::set::Drain<'a, T>, - #[cfg(feature = "span")] inner_spans: std::vec::Drain<'a, Span>, index: Index, } @@ -642,10 +606,7 @@ impl<'a, T> Iterator for UniqueArenaDrain<'a, T> { Some(elt) => { let handle = Handle::new(self.index); self.index = self.index.checked_add(1).unwrap(); - #[cfg(feature = "span")] let span = self.inner_spans.next().unwrap(); - #[cfg(not(feature = "span"))] - let span = Span::default(); Some((handle, elt, span)) } None => None, @@ -672,27 +633,21 @@ impl UniqueArena { /// If this arena already contains an element that is `Eq` to `value`, /// return a `Handle` to the existing element, and drop `value`. /// - /// When the `span` feature is enabled, if `value` is inserted into the - /// arena, associate `span` with it. An element's span can be retrieved with - /// the [`get_span`] method. + /// If `value` is inserted into the arena, associate `span` with + /// it. An element's span can be retrieved with the [`get_span`] + /// method. /// /// [`Handle`]: Handle /// [`get_span`]: UniqueArena::get_span pub fn insert(&mut self, value: T, span: Span) -> Handle { let (index, added) = self.set.insert_full(value); - #[cfg(feature = "span")] - { - if added { - debug_assert!(index == self.span_info.len()); - self.span_info.push(span); - } - - debug_assert!(self.set.len() == self.span_info.len()); + if added { + debug_assert!(index == self.span_info.len()); + self.span_info.push(span); } - #[cfg(not(feature = "span"))] - let _ = (span, added); + debug_assert!(self.set.len() == self.span_info.len()); Handle::from_usize(index) } @@ -779,14 +734,9 @@ where D: serde::Deserializer<'de>, { let set = FastIndexSet::deserialize(deserializer)?; - #[cfg(feature = "span")] let span_info = std::iter::repeat(Span::default()).take(set.len()).collect(); - Ok(Self { - set, - #[cfg(feature = "span")] - span_info, - }) + Ok(Self { set, span_info }) } } @@ -800,7 +750,6 @@ where let mut arena = Self::default(); for elem in u.arbitrary_iter()? { arena.set.insert(elem?); - #[cfg(feature = "span")] arena.span_info.push(Span::UNDEFINED); } Ok(arena) @@ -810,7 +759,6 @@ where let mut arena = Self::default(); for elem in u.arbitrary_take_rest_iter()? { arena.set.insert(elem?); - #[cfg(feature = "span")] arena.span_info.push(Span::UNDEFINED); } Ok(arena) diff --git a/third_party/rust/naga/src/back/msl/keywords.rs b/third_party/rust/naga/src/back/msl/keywords.rs index f34b618db8efe..f0025bf239e8e 100644 --- a/third_party/rust/naga/src/back/msl/keywords.rs +++ b/third_party/rust/naga/src/back/msl/keywords.rs @@ -1,114 +1,238 @@ -//TODO: find a complete list +// MSLS - Metal Shading Language Specification: +// https://developer.apple.com/metal/Metal-Shading-Language-Specification.pdf +// +// C++ - Standard for Programming Language C++ (N4431) +// https://www.open-std.org/jtc1/sc22/wg21/docs/papers/2015/n4431.pdf pub const RESERVED: &[&str] = &[ - // control flow + // Standard for Programming Language C++ (N4431): 2.5 Alternative tokens + "and", + "bitor", + "or", + "xor", + "compl", + "bitand", + "and_eq", + "or_eq", + "xor_eq", + "not", + "not_eq", + // Standard for Programming Language C++ (N4431): 2.11 Keywords + "alignas", + "alignof", + "asm", + "auto", + "bool", "break", - "if", - "else", - "continue", - "goto", - "do", - "while", - "for", - "switch", "case", - // types and values - "void", - "unsigned", - "signed", - "bool", + "catch", "char", - "int", - "uint", - "long", - "float", - "double", - "char8_t", - "wchar_t", - "true", - "false", - "nullptr", - "union", + "char16_t", + "char32_t", "class", - "struct", - "enum", - // other - "main", - "using", + "const", + "constexpr", + "const_cast", + "continue", "decltype", - "sizeof", - "typeof", - "typedef", + "default", + "delete", + "do", + "double", + "dynamic_cast", + "else", + "enum", "explicit", "export", + "extern", + "false", + "float", + "for", "friend", + "goto", + "if", + "inline", + "int", + "long", + "mutable", "namespace", + "new", + "noexcept", + "nullptr", "operator", + "private", + "protected", "public", + "register", + "reinterpret_cast", + "return", + "short", + "signed", + "sizeof", + "static", + "static_assert", + "static_cast", + "struct", + "switch", "template", - "typename", - "typeid", - "co_await", - "co_return", - "co_yield", - "module", - "import", - "ray_data", - "vec_step", - "visible", - "as_type", "this", - // qualifiers - "mutable", - "static", - "volatile", - "restrict", - "const", - "non-temporal", - "dereferenceable", - "invariant", - // exceptions + "thread_local", "throw", + "true", "try", - "catch", - // operators - "const_cast", - "dynamic_cast", - "reinterpret_cast", - "static_cast", - "new", - "delete", - "and", - "and_eq", - "bitand", - "bitor", - "compl", - "not", - "not_eq", - "or", - "or_eq", - "xor", - "xor_eq", - "compl", - // Metal-specific - "constant", + "typedef", + "typeid", + "typename", + "union", + "unsigned", + "using", + "virtual", + "void", + "volatile", + "wchar_t", + "while", + // Metal Shading Language Specification: 1.4.4 Restrictions + "main", + // Metal Shading Language Specification: 2.1 Scalar Data Types + "int8_t", + "uchar", + "uint8_t", + "int16_t", + "ushort", + "uint16_t", + "int32_t", + "uint", + "uint32_t", + "int64_t", + "uint64_t", + "half", + "bfloat", + "size_t", + "ptrdiff_t", + // Metal Shading Language Specification: 2.2 Vector Data Types + "bool2", + "bool3", + "bool4", + "char2", + "char3", + "char4", + "short2", + "short3", + "short4", + "int2", + "int3", + "int4", + "long2", + "long3", + "long4", + "uchar2", + "uchar3", + "uchar4", + "ushort2", + "ushort3", + "ushort4", + "uint2", + "uint3", + "uint4", + "ulong2", + "ulong3", + "ulong4", + "half2", + "half3", + "half4", + "bfloat2", + "bfloat3", + "bfloat4", + "float2", + "float3", + "float4", + "vec", + // Metal Shading Language Specification: 2.2.3 Packed Vector Types + "packed_bool2", + "packed_bool3", + "packed_bool4", + "packed_char2", + "packed_char3", + "packed_char4", + "packed_short2", + "packed_short3", + "packed_short4", + "packed_int2", + "packed_int3", + "packed_int4", + "packed_uchar2", + "packed_uchar3", + "packed_uchar4", + "packed_ushort2", + "packed_ushort3", + "packed_ushort4", + "packed_uint2", + "packed_uint3", + "packed_uint4", + "packed_half2", + "packed_half3", + "packed_half4", + "packed_bfloat2", + "packed_bfloat3", + "packed_bfloat4", + "packed_float2", + "packed_float3", + "packed_float4", + "packed_long2", + "packed_long3", + "packed_long4", + "packed_vec", + // Metal Shading Language Specification: 2.3 Matrix Data Types + "half2x2", + "half2x3", + "half2x4", + "half3x2", + "half3x3", + "half3x4", + "half4x2", + "half4x3", + "half4x4", + "float2x2", + "float2x3", + "float2x4", + "float3x2", + "float3x3", + "float3x4", + "float4x2", + "float4x3", + "float4x4", + "matrix", + // Metal Shading Language Specification: 2.6 Atomic Data Types + "atomic", + "atomic_int", + "atomic_uint", + "atomic_bool", + "atomic_ulong", + "atomic_float", + // Metal Shading Language Specification: 2.20 Type Conversions and Re-interpreting Data + "as_type", + // Metal Shading Language Specification: 4 Address Spaces "device", + "constant", + "thread", "threadgroup", "threadgroup_imageblock", - "kernel", - "compute", + "ray_data", + "object_data", + // Metal Shading Language Specification: 5.1 Functions "vertex", "fragment", - "read_only", - "write_only", - "read_write", - "auto", - // Metal reserved types + "kernel", + // Metal Shading Language Specification: 6.1 Namespace and Header Files + "metal", + // C99 / C++ extension: + "restrict", + // Metal reserved types in : "llong", "ullong", "quad", "complex", "imaginary", - // Metal constants + // Constants in : "CHAR_BIT", "SCHAR_MAX", "SCHAR_MIN", @@ -213,7 +337,6 @@ pub const RESERVED: &[&str] = &[ "M_SQRT1_2", // Naga utilities "DefaultConstructible", - "clamped_lod_e", super::writer::FREXP_FUNCTION, super::writer::MODF_FUNCTION, ]; diff --git a/third_party/rust/naga/src/back/msl/writer.rs b/third_party/rust/naga/src/back/msl/writer.rs index 5e36949fe0e73..07077a5c160c2 100644 --- a/third_party/rust/naga/src/back/msl/writer.rs +++ b/third_party/rust/naga/src/back/msl/writer.rs @@ -3118,7 +3118,7 @@ impl Writer { super::keywords::RESERVED, &[], &[], - &[], + &[CLAMPED_LOD_LOAD_PREFIX], &mut self.names, ); self.struct_member_pads.clear(); diff --git a/third_party/rust/naga/src/block.rs b/third_party/rust/naga/src/block.rs index b375132ef726f..0abda9da7cebe 100644 --- a/third_party/rust/naga/src/block.rs +++ b/third_party/rust/naga/src/block.rs @@ -8,7 +8,6 @@ use std::ops::{Deref, DerefMut, RangeBounds}; #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] pub struct Block { body: Vec, - #[cfg(feature = "span")] #[cfg_attr(feature = "serialize", serde(skip))] span_info: Vec, } @@ -17,27 +16,20 @@ impl Block { pub const fn new() -> Self { Self { body: Vec::new(), - #[cfg(feature = "span")] span_info: Vec::new(), } } pub fn from_vec(body: Vec) -> Self { - #[cfg(feature = "span")] let span_info = std::iter::repeat(Span::default()) .take(body.len()) .collect(); - Self { - body, - #[cfg(feature = "span")] - span_info, - } + Self { body, span_info } } pub fn with_capacity(capacity: usize) -> Self { Self { body: Vec::with_capacity(capacity), - #[cfg(feature = "span")] span_info: Vec::with_capacity(capacity), } } @@ -45,7 +37,6 @@ impl Block { #[allow(unused_variables)] pub fn push(&mut self, end: Statement, span: Span) { self.body.push(end); - #[cfg(feature = "span")] self.span_info.push(span); } @@ -56,43 +47,31 @@ impl Block { } pub fn extend_block(&mut self, other: Self) { - #[cfg(feature = "span")] self.span_info.extend(other.span_info); self.body.extend(other.body); } pub fn append(&mut self, other: &mut Self) { - #[cfg(feature = "span")] self.span_info.append(&mut other.span_info); self.body.append(&mut other.body); } pub fn cull + Clone>(&mut self, range: R) { - #[cfg(feature = "span")] self.span_info.drain(range.clone()); self.body.drain(range); } pub fn splice + Clone>(&mut self, range: R, other: Self) { - #[cfg(feature = "span")] self.span_info.splice(range.clone(), other.span_info); self.body.splice(range, other.body); } pub fn span_iter(&self) -> impl Iterator { - #[cfg(feature = "span")] let span_iter = self.span_info.iter(); - #[cfg(not(feature = "span"))] - let span_iter = std::iter::repeat_with(|| &Span::UNDEFINED); - self.body.iter().zip(span_iter) } pub fn span_iter_mut(&mut self) -> impl Iterator)> { - #[cfg(feature = "span")] let span_iter = self.span_info.iter_mut().map(Some); - #[cfg(not(feature = "span"))] - let span_iter = std::iter::repeat_with(|| None); - self.body.iter_mut().zip(span_iter) } diff --git a/third_party/rust/naga/src/front/wgsl/to_wgsl.rs b/third_party/rust/naga/src/front/wgsl/to_wgsl.rs index 7af3debc4c4e6..bfff363b3a6a7 100644 --- a/third_party/rust/naga/src/front/wgsl/to_wgsl.rs +++ b/third_party/rust/naga/src/front/wgsl/to_wgsl.rs @@ -57,16 +57,14 @@ impl crate::TypeInner { format!("atomic<{}>", scalar.to_wgsl()) } Ti::Pointer { base, .. } => { - let base = &gctx.types[base]; - let name = base.name.as_deref().unwrap_or("unknown"); + let name = base.to_wgsl(gctx); format!("ptr<{name}>") } Ti::ValuePointer { scalar, .. } => { format!("ptr<{}>", scalar.to_wgsl()) } Ti::Array { base, size, .. } => { - let member_type = &gctx.types[base]; - let base = member_type.name.as_deref().unwrap_or("unknown"); + let base = base.to_wgsl(gctx); match size { crate::ArraySize::Constant(size) => format!("array<{base}, {size}>"), crate::ArraySize::Dynamic => format!("array<{base}>"), diff --git a/third_party/rust/naga/src/proc/constant_evaluator.rs b/third_party/rust/naga/src/proc/constant_evaluator.rs index b9247b3c857ba..6adc97ac3e107 100644 --- a/third_party/rust/naga/src/proc/constant_evaluator.rs +++ b/third_party/rust/naga/src/proc/constant_evaluator.rs @@ -863,6 +863,22 @@ impl<'a> ConstantEvaluator<'a> { } } + /// Lower [`ZeroValue`] expressions to [`Literal`] and [`Compose`] expressions. + /// + /// [`ZeroValue`]: Expression::ZeroValue + /// [`Literal`]: Expression::Literal + /// [`Compose`]: Expression::Compose + fn eval_zero_value( + &mut self, + expr: Handle, + span: Span, + ) -> Result, ConstantEvaluatorError> { + match self.expressions[expr] { + Expression::ZeroValue(ty) => self.eval_zero_value_impl(ty, span), + _ => Ok(expr), + } + } + /// Lower [`ZeroValue`] expressions to [`Literal`] and [`Compose`] expressions. /// /// [`ZeroValue`]: Expression::ZeroValue @@ -953,7 +969,7 @@ impl<'a> ConstantEvaluator<'a> { ) -> Result, ConstantEvaluatorError> { use crate::Scalar as Sc; - let expr = self.eval_zero_value_and_splat(expr, span)?; + let expr = self.eval_zero_value(expr, span)?; let expr = match self.expressions[expr] { Expression::Literal(literal) => { @@ -1022,6 +1038,14 @@ impl<'a> ConstantEvaluator<'a> { Expression::Compose { ty, components } } + Expression::Splat { size, value } => { + let value_span = self.expressions.get_span(value); + let cast_value = self.cast(value, target, value_span)?; + Expression::Splat { + size, + value: cast_value, + } + } _ => return Err(ConstantEvaluatorError::InvalidCastArg), }; diff --git a/third_party/rust/naga/src/proc/emitter.rs b/third_party/rust/naga/src/proc/emitter.rs index 281a55e2ad62a..0df804fff2eba 100644 --- a/third_party/rust/naga/src/proc/emitter.rs +++ b/third_party/rust/naga/src/proc/emitter.rs @@ -28,7 +28,6 @@ impl Emitter { #[allow(unused_mut)] let mut span = crate::span::Span::default(); let range = arena.range_from(start_len); - #[cfg(feature = "span")] for handle in range.clone() { span.subsume(arena.get_span(handle)) } diff --git a/third_party/rust/naga/src/span.rs b/third_party/rust/naga/src/span.rs index 5c617515ff8d2..53246b25d6ef3 100644 --- a/third_party/rust/naga/src/span.rs +++ b/third_party/rust/naga/src/span.rs @@ -128,7 +128,6 @@ pub type SpanContext = (Span, String); #[derive(Debug, Clone)] pub struct WithSpan { inner: E, - #[cfg(feature = "span")] spans: Vec, } @@ -165,7 +164,6 @@ impl WithSpan { pub const fn new(inner: E) -> Self { Self { inner, - #[cfg(feature = "span")] spans: Vec::new(), } } @@ -182,22 +180,14 @@ impl WithSpan { /// Iterator over stored [`SpanContext`]s. pub fn spans(&self) -> impl ExactSizeIterator { - #[cfg(feature = "span")] - return self.spans.iter(); - #[cfg(not(feature = "span"))] - return std::iter::empty(); + self.spans.iter() } /// Add a new span with description. - #[cfg_attr( - not(feature = "span"), - allow(unused_variables, unused_mut, clippy::missing_const_for_fn) - )] pub fn with_span(mut self, span: Span, description: S) -> Self where S: ToString, { - #[cfg(feature = "span")] if span.is_defined() { self.spans.push((span, description.to_string())); } @@ -223,7 +213,6 @@ impl WithSpan { { WithSpan { inner: self.inner.into(), - #[cfg(feature = "span")] spans: self.spans, } } @@ -234,14 +223,11 @@ impl WithSpan { where F: FnOnce(E) -> WithSpan, { - #[cfg_attr(not(feature = "span"), allow(unused_mut))] let mut res = func(self.inner); - #[cfg(feature = "span")] res.spans.extend(self.spans); res } - #[cfg(feature = "span")] /// Return a [`SourceLocation`] for our first span, if we have one. pub fn location(&self, source: &str) -> Option { if self.spans.is_empty() { @@ -251,14 +237,6 @@ impl WithSpan { Some(self.spans[0].0.location(source)) } - #[cfg(not(feature = "span"))] - #[allow(clippy::missing_const_for_fn)] - /// Return a [`SourceLocation`] for our first span, if we have one. - pub fn location(&self, _source: &str) -> Option { - None - } - - #[cfg(feature = "span")] fn diagnostic(&self) -> codespan_reporting::diagnostic::Diagnostic<()> where E: Error, @@ -286,7 +264,6 @@ impl WithSpan { } /// Emits a summary of the error to standard error stream. - #[cfg(feature = "span")] pub fn emit_to_stderr(&self, source: &str) where E: Error, @@ -295,7 +272,6 @@ impl WithSpan { } /// Emits a summary of the error to standard error stream. - #[cfg(feature = "span")] pub fn emit_to_stderr_with_path(&self, source: &str, path: &str) where E: Error, @@ -311,7 +287,6 @@ impl WithSpan { } /// Emits a summary of the error to a string. - #[cfg(feature = "span")] pub fn emit_to_string(&self, source: &str) -> String where E: Error, @@ -320,7 +295,6 @@ impl WithSpan { } /// Emits a summary of the error to a string. - #[cfg(feature = "span")] pub fn emit_to_string_with_path(&self, source: &str, path: &str) -> String where E: Error, diff --git a/third_party/rust/naga/src/valid/analyzer.rs b/third_party/rust/naga/src/valid/analyzer.rs index b96e482934d93..df6fc5e9b0260 100644 --- a/third_party/rust/naga/src/valid/analyzer.rs +++ b/third_party/rust/naga/src/valid/analyzer.rs @@ -778,7 +778,6 @@ impl FunctionInfo { let mut requirements = UniformityRequirements::empty(); for expr in range.clone() { let req = self.expressions[expr.index()].uniformity.requirements; - #[cfg(feature = "validate")] if self .flags .contains(super::ValidationFlags::CONTROL_FLOW_UNIFORMITY) @@ -823,7 +822,7 @@ impl FunctionInfo { // The uniformity analysis Naga uses now is less accurate than the one in the WGSL standard, // causing Naga to reject correct uses of `workgroupUniformLoad` in some interesting programs. - /* #[cfg(feature = "validate")] + /* if self .flags .contains(super::ValidationFlags::CONTROL_FLOW_UNIFORMITY) @@ -1060,7 +1059,6 @@ impl ModuleInfo { } #[test] -#[cfg(feature = "validate")] fn uniform_control_flow() { use crate::{Expression as E, Statement as S}; diff --git a/third_party/rust/naga/src/valid/compose.rs b/third_party/rust/naga/src/valid/compose.rs index b392de57b0106..a537e07af5683 100644 --- a/third_party/rust/naga/src/valid/compose.rs +++ b/third_party/rust/naga/src/valid/compose.rs @@ -1,4 +1,3 @@ -#[cfg(feature = "validate")] use crate::proc::TypeResolution; use crate::arena::Handle; @@ -14,7 +13,6 @@ pub enum ComposeError { ComponentType { index: u32 }, } -#[cfg(feature = "validate")] pub fn validate_compose( self_ty_handle: Handle, gctx: crate::proc::GlobalCtx, diff --git a/third_party/rust/naga/src/valid/expression.rs b/third_party/rust/naga/src/valid/expression.rs index dc70c2e610260..840ba90d01a1a 100644 --- a/third_party/rust/naga/src/valid/expression.rs +++ b/third_party/rust/naga/src/valid/expression.rs @@ -1,9 +1,7 @@ -#[cfg(feature = "validate")] use super::{ compose::validate_compose, validate_atomic_compare_exchange_struct, FunctionInfo, ModuleInfo, ShaderStages, TypeFlags, }; -#[cfg(feature = "validate")] use crate::arena::UniqueArena; use crate::{ @@ -155,14 +153,12 @@ pub enum LiteralError { Width(#[from] super::r#type::WidthError), } -#[cfg(feature = "validate")] struct ExpressionTypeResolver<'a> { root: Handle, types: &'a UniqueArena, info: &'a FunctionInfo, } -#[cfg(feature = "validate")] impl<'a> std::ops::Index> for ExpressionTypeResolver<'a> { type Output = crate::TypeInner; @@ -180,7 +176,6 @@ impl<'a> std::ops::Index> for ExpressionTypeResolver<' } } -#[cfg(feature = "validate")] impl super::Validator { pub(super) fn validate_const_expression( &self, diff --git a/third_party/rust/naga/src/valid/function.rs b/third_party/rust/naga/src/valid/function.rs index 126493778155e..f5da7d0764f59 100644 --- a/third_party/rust/naga/src/valid/function.rs +++ b/third_party/rust/naga/src/valid/function.rs @@ -1,8 +1,6 @@ use crate::arena::Handle; -#[cfg(feature = "validate")] use crate::arena::{Arena, UniqueArena}; -#[cfg(feature = "validate")] use super::validate_atomic_compare_exchange_struct; use super::{ @@ -10,10 +8,8 @@ use super::{ ExpressionError, FunctionInfo, ModuleInfo, }; use crate::span::WithSpan; -#[cfg(feature = "validate")] use crate::span::{AddSpan as _, MapErrWithSpan as _}; -#[cfg(feature = "validate")] use bit_set::BitSet; #[derive(Clone, Debug, thiserror::Error)] @@ -174,13 +170,11 @@ bitflags::bitflags! { } } -#[cfg(feature = "validate")] struct BlockInfo { stages: super::ShaderStages, finished: bool, } -#[cfg(feature = "validate")] struct BlockContext<'a> { abilities: ControlFlowAbility, info: &'a FunctionInfo, @@ -194,7 +188,6 @@ struct BlockContext<'a> { return_type: Option>, } -#[cfg(feature = "validate")] impl<'a> BlockContext<'a> { fn new( fun: &'a crate::Function, @@ -263,7 +256,6 @@ impl<'a> BlockContext<'a> { } impl super::Validator { - #[cfg(feature = "validate")] fn validate_call( &mut self, function: Handle, @@ -320,7 +312,6 @@ impl super::Validator { Ok(callee_info.available_stages) } - #[cfg(feature = "validate")] fn emit_expression( &mut self, handle: Handle, @@ -335,7 +326,6 @@ impl super::Validator { } } - #[cfg(feature = "validate")] fn validate_atomic( &mut self, pointer: Handle, @@ -410,7 +400,6 @@ impl super::Validator { Ok(()) } - #[cfg(feature = "validate")] fn validate_block_impl( &mut self, statements: &crate::Block, @@ -920,7 +909,6 @@ impl super::Validator { Ok(BlockInfo { stages, finished }) } - #[cfg(feature = "validate")] fn validate_block( &mut self, statements: &crate::Block, @@ -934,7 +922,6 @@ impl super::Validator { Ok(info) } - #[cfg(feature = "validate")] fn validate_local_var( &self, var: &crate::LocalVariable, @@ -971,16 +958,13 @@ impl super::Validator { fun: &crate::Function, module: &crate::Module, mod_info: &ModuleInfo, - #[cfg_attr(not(feature = "validate"), allow(unused))] entry_point: bool, + entry_point: bool, ) -> Result> { - #[cfg_attr(not(feature = "validate"), allow(unused_mut))] let mut info = mod_info.process_function(fun, module, self.flags, self.capabilities)?; - #[cfg(feature = "validate")] let expression_constness = crate::proc::ExpressionConstnessTracker::from_arena(&fun.expressions); - #[cfg(feature = "validate")] for (var_handle, var) in fun.local_variables.iter() { self.validate_local_var(var, module.to_ctx(), &info, &expression_constness) .map_err(|source| { @@ -994,7 +978,6 @@ impl super::Validator { })?; } - #[cfg(feature = "validate")] for (index, argument) in fun.arguments.iter().enumerate() { match module.types[argument.ty].inner.pointer_space() { Some(crate::AddressSpace::Private | crate::AddressSpace::Function) | None => {} @@ -1027,7 +1010,6 @@ impl super::Validator { } } - #[cfg(feature = "validate")] if let Some(ref result) = fun.result { if !self.types[result.ty.index()] .flags @@ -1049,7 +1031,6 @@ impl super::Validator { if expr.needs_pre_emit() { self.valid_expression_set.insert(handle.index()); } - #[cfg(feature = "validate")] if self.flags.contains(super::ValidationFlags::EXPRESSIONS) { match self.validate_expression(handle, expr, fun, module, &info, mod_info) { Ok(stages) => info.available_stages &= stages, @@ -1061,7 +1042,6 @@ impl super::Validator { } } - #[cfg(feature = "validate")] if self.flags.contains(super::ValidationFlags::BLOCKS) { let stages = self .validate_block( diff --git a/third_party/rust/naga/src/valid/handles.rs b/third_party/rust/naga/src/valid/handles.rs index ec6dd240c0500..e482f293bb925 100644 --- a/third_party/rust/naga/src/valid/handles.rs +++ b/third_party/rust/naga/src/valid/handles.rs @@ -5,16 +5,12 @@ use crate::{ Handle, }; -#[cfg(feature = "validate")] use crate::{Arena, UniqueArena}; -#[cfg(feature = "validate")] use super::ValidationError; -#[cfg(feature = "validate")] use std::{convert::TryInto, hash::Hash, num::NonZeroU32}; -#[cfg(feature = "validate")] impl super::Validator { /// Validates that all handles within `module` are: /// @@ -547,21 +543,18 @@ impl super::Validator { } } -#[cfg(feature = "validate")] impl From for ValidationError { fn from(source: BadHandle) -> Self { Self::InvalidHandle(source.into()) } } -#[cfg(feature = "validate")] impl From for ValidationError { fn from(source: FwdDepError) -> Self { Self::InvalidHandle(source.into()) } } -#[cfg(feature = "validate")] impl From for ValidationError { fn from(source: BadRangeError) -> Self { Self::InvalidHandle(source.into()) @@ -592,7 +585,6 @@ pub struct FwdDepError { depends_on_kind: &'static str, } -#[cfg(feature = "validate")] impl Handle { /// Check that `self` is valid within `arena` using [`Arena::check_contains_handle`]. pub(self) fn check_valid_for(self, arena: &Arena) -> Result<(), InvalidHandleError> { @@ -656,7 +648,6 @@ impl Handle { } } -#[cfg(feature = "validate")] impl crate::arena::Range { pub(self) fn check_valid_for(&self, arena: &Arena) -> Result<(), BadRangeError> { arena.check_contains_range(self) @@ -664,7 +655,6 @@ impl crate::arena::Range { } #[test] -#[cfg(feature = "validate")] fn constant_deps() { use crate::{Constant, Expression, Literal, Span, Type, TypeInner}; diff --git a/third_party/rust/naga/src/valid/interface.rs b/third_party/rust/naga/src/valid/interface.rs index 2e50f67160685..57863048e54cc 100644 --- a/third_party/rust/naga/src/valid/interface.rs +++ b/third_party/rust/naga/src/valid/interface.rs @@ -7,7 +7,6 @@ use crate::arena::{Handle, UniqueArena}; use crate::span::{AddSpan as _, MapErrWithSpan as _, SpanProvider as _, WithSpan}; use bit_set::BitSet; -#[cfg(feature = "validate")] const MAX_WORKGROUP_SIZE: u32 = 0x4000; #[derive(Clone, Debug, thiserror::Error)] @@ -110,7 +109,6 @@ pub enum EntryPointError { InvalidLocationsWhileDualSourceBlending { location_mask: BitSet }, } -#[cfg(feature = "validate")] fn storage_usage(access: crate::StorageAccess) -> GlobalUse { let mut storage_usage = GlobalUse::QUERY; if access.contains(crate::StorageAccess::LOAD) { @@ -131,8 +129,6 @@ struct VaryingContext<'a> { location_mask: &'a mut BitSet, built_ins: &'a mut crate::FastHashSet, capabilities: Capabilities, - - #[cfg(feature = "validate")] flags: super::ValidationFlags, } @@ -307,7 +303,6 @@ impl VaryingContext<'_> { self.second_blend_source = true; } else if !self.location_mask.insert(location as usize) { - #[cfg(feature = "validate")] if self.flags.contains(super::ValidationFlags::BINDINGS) { return Err(VaryingError::BindingCollision { location }); } @@ -369,15 +364,12 @@ impl VaryingContext<'_> { let span_context = self.types.get_span_context(ty); match member.binding { None => { - #[cfg(feature = "validate")] if self.flags.contains(super::ValidationFlags::BINDINGS) { return Err(VaryingError::MemberMissingBinding( index as u32, ) .with_span_context(span_context)); } - #[cfg(not(feature = "validate"))] - let _ = index; } Some(ref binding) => self .validate_impl(member.ty, binding) @@ -385,9 +377,7 @@ impl VaryingContext<'_> { } } } - _ => - { - #[cfg(feature = "validate")] + _ => { if self.flags.contains(super::ValidationFlags::BINDINGS) { return Err(VaryingError::MissingBinding.with_span()); } @@ -400,7 +390,6 @@ impl VaryingContext<'_> { } impl super::Validator { - #[cfg(feature = "validate")] pub(super) fn validate_global_var( &self, var: &crate::GlobalVariable, @@ -550,7 +539,6 @@ impl super::Validator { module: &crate::Module, mod_info: &ModuleInfo, ) -> Result> { - #[cfg(feature = "validate")] if ep.early_depth_test.is_some() { let required = Capabilities::EARLY_DEPTH_TEST; if !self.capabilities.contains(required) { @@ -565,7 +553,6 @@ impl super::Validator { } } - #[cfg(feature = "validate")] if ep.stage == crate::ShaderStage::Compute { if ep .workgroup_size @@ -578,12 +565,10 @@ impl super::Validator { return Err(EntryPointError::UnexpectedWorkgroupSize.with_span()); } - #[cfg_attr(not(feature = "validate"), allow(unused_mut))] let mut info = self .validate_function(&ep.function, module, mod_info, true) .map_err(WithSpan::into_other)?; - #[cfg(feature = "validate")] { use super::ShaderStages; @@ -611,8 +596,6 @@ impl super::Validator { location_mask: &mut self.location_mask, built_ins: &mut argument_built_ins, capabilities: self.capabilities, - - #[cfg(feature = "validate")] flags: self.flags, }; ctx.validate(fa.ty, fa.binding.as_ref()) @@ -631,13 +614,10 @@ impl super::Validator { location_mask: &mut self.location_mask, built_ins: &mut result_built_ins, capabilities: self.capabilities, - - #[cfg(feature = "validate")] flags: self.flags, }; ctx.validate(fr.ty, fr.binding.as_ref()) .map_err_inner(|e| EntryPointError::Result(e).with_span())?; - #[cfg(feature = "validate")] if ctx.second_blend_source { // Only the first location may be used whhen dual source blending if ctx.location_mask.len() == 1 && ctx.location_mask.contains(0) { @@ -650,18 +630,15 @@ impl super::Validator { } } - #[cfg(feature = "validate")] if ep.stage == crate::ShaderStage::Vertex && !result_built_ins.contains(&crate::BuiltIn::Position { invariant: false }) { return Err(EntryPointError::MissingVertexOutputPosition.with_span()); } } else if ep.stage == crate::ShaderStage::Vertex { - #[cfg(feature = "validate")] return Err(EntryPointError::MissingVertexOutputPosition.with_span()); } - #[cfg(feature = "validate")] { let used_push_constants = module .global_variables @@ -679,7 +656,6 @@ impl super::Validator { } self.ep_resource_bindings.clear(); - #[cfg(feature = "validate")] for (var_handle, var) in module.global_variables.iter() { let usage = info[var_handle]; if usage.is_empty() { diff --git a/third_party/rust/naga/src/valid/mod.rs b/third_party/rust/naga/src/valid/mod.rs index 011e90fbc23d2..70a4d39d2a924 100644 --- a/third_party/rust/naga/src/valid/mod.rs +++ b/third_party/rust/naga/src/valid/mod.rs @@ -45,31 +45,22 @@ bitflags::bitflags! { /// should never panic. /// /// The default value for `ValidationFlags` is - /// `ValidationFlags::all()`. If Naga's `"validate"` feature is - /// enabled, this requests full validation; otherwise, this - /// requests no validation. (The `"validate"` feature is disabled - /// by default.) + /// `ValidationFlags::all()`. #[cfg_attr(feature = "serialize", derive(serde::Serialize))] #[cfg_attr(feature = "deserialize", derive(serde::Deserialize))] #[derive(Clone, Copy, Debug, Eq, PartialEq)] pub struct ValidationFlags: u8 { /// Expressions. - #[cfg(feature = "validate")] const EXPRESSIONS = 0x1; /// Statements and blocks of them. - #[cfg(feature = "validate")] const BLOCKS = 0x2; /// Uniformity of control flow for operations that require it. - #[cfg(feature = "validate")] const CONTROL_FLOW_UNIFORMITY = 0x4; /// Host-shareable structure layouts. - #[cfg(feature = "validate")] const STRUCT_LAYOUTS = 0x8; /// Constants. - #[cfg(feature = "validate")] const CONSTANTS = 0x10; /// Group, binding, and location attributes. - #[cfg(feature = "validate")] const BINDINGS = 0x20; } } @@ -237,7 +228,6 @@ pub enum ValidationError { } impl crate::TypeInner { - #[cfg(feature = "validate")] const fn is_sized(&self) -> bool { match *self { Self::Scalar { .. } @@ -261,7 +251,6 @@ impl crate::TypeInner { } /// Return the `ImageDimension` for which `self` is an appropriate coordinate. - #[cfg(feature = "validate")] const fn image_storage_coordinates(&self) -> Option { match *self { Self::Scalar(crate::Scalar { @@ -316,7 +305,6 @@ impl Validator { self.valid_expression_set.clear(); } - #[cfg(feature = "validate")] fn validate_constant( &self, handle: Handle, @@ -347,7 +335,6 @@ impl Validator { self.reset(); self.reset_types(module.types.len()); - #[cfg(feature = "validate")] Self::validate_module_handles(module).map_err(|e| e.with_span())?; self.layouter.update(module.to_ctx()).map_err(|e| { @@ -397,7 +384,6 @@ impl Validator { } } - #[cfg(feature = "validate")] if self.flags.contains(ValidationFlags::CONSTANTS) { for (handle, _) in module.const_expressions.iter() { self.validate_const_expression(handle, module.to_ctx(), &mod_info) @@ -420,7 +406,6 @@ impl Validator { } } - #[cfg(feature = "validate")] for (var_handle, var) in module.global_variables.iter() { self.validate_global_var(var, module.to_ctx(), &mod_info) .map_err(|source| { @@ -479,7 +464,6 @@ impl Validator { } } -#[cfg(feature = "validate")] fn validate_atomic_compare_exchange_struct( types: &crate::UniqueArena, members: &[crate::StructMember], diff --git a/third_party/rust/wgpu-core/.cargo-checksum.json b/third_party/rust/wgpu-core/.cargo-checksum.json index fc85b87aef617..c6999a20af06d 100644 --- a/third_party/rust/wgpu-core/.cargo-checksum.json +++ b/third_party/rust/wgpu-core/.cargo-checksum.json @@ -1 +1 @@ -{"files":{"Cargo.toml":"a01dee48cee8d362ac28b8c920b914d11be2362ca6d7d6036b6d786eca348be2","LICENSE.APACHE":"a6cba85bc92e0cff7a450b1d873c0eaa2e9fc96bf472df0247a26bec77bf3ff9","LICENSE.MIT":"c7fea58d1cfe49634cd92e54fc10a9d871f4b275321a4cd8c09e449122caaeb4","src/binding_model.rs":"f6d9b170a9328751b3109c81d6f1b555a6db1410f1b7e67c59cc596a1fd1d293","src/command/bind.rs":"67b8431a32f403e358b9c1593e6e0d64519f5f0b699059e6ea5374800d5c23d9","src/command/bundle.rs":"d5456be31f973fcae55ee01781da963f1c950aa5db7eae4d3c6166d4f1a132ca","src/command/clear.rs":"9c4c1734705a79b64a31540ddf7ec4e9f10b9d923b6d21783d98314a7b10bb25","src/command/compute.rs":"8caa0f5fa1956a3747f0b845fdeec2572b9462a7e98fdb5cf7b562c472bd6c4c","src/command/draw.rs":"3687cbde422a29f28c1c3d17e132d912b3b4b2bcc98efca68d1ee0d563a5bf56","src/command/memory_init.rs":"b50d3d20dbf659052f19da2e79469ba6435e06370f19d6ef45e1b1128d9900b7","src/command/mod.rs":"4eac2ac0283c0cfdcc3512af5439fe38e7004e5f406bf52a66d3017ad1ae7632","src/command/query.rs":"d39e1b8cb6a054fd31333a916da5d79a6671a724212c90c490c13e55043a1685","src/command/render.rs":"c445f52d0b82d80033071025446ca2488ef702d4fd452fe4c67b154392630bab","src/command/transfer.rs":"1fc4d8a87eef3241134f0e6d3bb617511629afe10fddb16440f688e17bee7719","src/conv.rs":"da95b36b7680ae74ebf810ad8f1decf01bd3eeaff44b3c5af1d4b3c3f0e2059a","src/device/global.rs":"2ccbc26fddb9291fecd0fe3c5e85c806cf77319fe2818a8684a81bed3ea7e649","src/device/life.rs":"6a7cbf7d47155fe1171387221141eefab135868196a03b0bba86ab168d9dee58","src/device/mod.rs":"a7627267001bbadb7309f4f0721ecec404dcba06bd6aab4dd6fa17410fc7402b","src/device/queue.rs":"56cbce0ed748c78d2360e3e042ffade1d941231acbb873d7aab4ac65b938ad06","src/device/resource.rs":"c4173ab900d5332984bb20e5e2733c4c1aa799e06bfe1e975fe55982ec3cdf29","src/device/trace.rs":"21408dfd2c99e3ce36a77d08ba86cf52f32bb376ed82690bbbf74937bfd42cbe","src/error.rs":"ca37282283985e2b7d184b2ab7ca6f53f726432d920f8d8477bfff6fab9b34e2","src/global.rs":"cf551de97c3eb5acd0c2710da09ebd92cc863ad0bb0f53c0fd4911bf8cd3ad97","src/hal_api.rs":"92a2f0cb80f192693530ed61048919bbad446742c2370bf0944c44b1c5df8362","src/hub.rs":"b4207d0a450da9e1d9edb0abc3c99e495132793ebe26af78ea07397d2e5c0b85","src/id.rs":"ef7b3a77110277f4eb2fa1a2ae3d89318023b74d5671181684d2845ef7b7d87a","src/identity.rs":"3ce6a3b57c7c4fc0808d13cd342d928c214f32368e45c79d8e2bbf8df887f97f","src/init_tracker/buffer.rs":"a0ebf54a1e6d269c7b4aa0ac7bb8b04fd2cea3221a1d058ff33cb683b2aea3e9","src/init_tracker/mod.rs":"0867f79f83555390d0982d1dc6dcf0d4340e10cb89aa633d3c3ecc45deb3c78c","src/init_tracker/texture.rs":"37b6584aaca11c407d91f77002dcbb48d8a4876e27edd1b71b7929ef966f901d","src/instance.rs":"37b1d19ebbc03642368c0ef668f5f897a3cac8a6cccc553b50f3de5c800fde8b","src/lib.rs":"71d42899594be62c2e7074618e03f3639b5ef510b42d6dde660aaa4d5672691e","src/pipeline.rs":"4741c36fad7dedb856c3254f7869cad68dcf143aaac280f7f5920009b13c899a","src/present.rs":"a81f62ca967825f777a5f0d32defb2febb8793406c527d08c6ab0e129df5a81a","src/registry.rs":"4098413de7f48e9ff15d0246793be47a0d54c95b4c8594baf9fafd222a90ba84","src/resource.rs":"f140a1071d03dccae9859047c063dcd289e653352d635082dba76ef37a6ca4c3","src/storage.rs":"11ccae01252ae68727a256e5db6826f74973cfc753a18dedf7fabf8aef5596cc","src/track/buffer.rs":"dd6f632c6f31b15807148d705c516a8a1a8d72d02b137dd3b9d7c939447917cb","src/track/metadata.rs":"a80bd086ce825f7484ce6318a586c482d06fea0efc9c76bfa0124e480cc8b75e","src/track/mod.rs":"42b791d9a41eb6e62f6d79cae7abb5ab523eeb9e6030b0f95bbb0e26d56ad0ec","src/track/range.rs":"5bbfed6e103b3234d9de8e42057022da6d628c2cc1db6bb51b88f87f2d8adf8b","src/track/stateless.rs":"1d786b5e9558672243ba7d913736561065ef2bd5c6105c935e982486d10841f0","src/track/texture.rs":"7d60dc81ba7f7e2c2819525b90e6e6c7760cb0920e36aeefe98e76cedd49d26e","src/validation.rs":"8b4db473ef01d3603f45eac6bf613874624755bc2f85972720ca94db7b556753"},"package":null} \ No newline at end of file +{"files":{"Cargo.toml":"bde42e04a69f912ac871368e202df8af116ea0ef010a2f08a62388b6985ca61f","LICENSE.APACHE":"a6cba85bc92e0cff7a450b1d873c0eaa2e9fc96bf472df0247a26bec77bf3ff9","LICENSE.MIT":"c7fea58d1cfe49634cd92e54fc10a9d871f4b275321a4cd8c09e449122caaeb4","src/any_surface.rs":"1afed4e5e2cc1726c9887bfbf8805f9141f859615c1eaf90f4ef3e49850caf06","src/binding_model.rs":"d7add33189aa453ad19acd8ac38465f44e233d0e8da42be5a3aadfc50a87a491","src/command/bind.rs":"85bbab812222f9bc11893059304cac850616818b00857fadac4885b978e4cfe2","src/command/bundle.rs":"7836b3740ad32168fdfb4241dbc91839d695c019abd3c38e3decec332b7e82c2","src/command/clear.rs":"65499812d269e23efb12a12a455f58f52f3014f8db224d02951b7331638f6ad4","src/command/compute.rs":"296864d4f9e9a98f368d64910146480e38e2f895eee98a97d947dd593033f87c","src/command/draw.rs":"3687cbde422a29f28c1c3d17e132d912b3b4b2bcc98efca68d1ee0d563a5bf56","src/command/memory_init.rs":"ffe5c301f19a17285523ee8fd5e7bf5abd5e50e9a3716f5713ac99ab135d9f5e","src/command/mod.rs":"e7c5e83a1b6398e68279b10910f426d7dde1ce3c800752bb88688b37f69b7872","src/command/query.rs":"f31db3f1282109baa59e6dcd7b2e674c7858a2c64f58fc6eb3a4d0c546935af5","src/command/render.rs":"6553e77c2c5a6cdf496e1316dc67071247ebf118bc238bbf2222f880a458bbf6","src/command/transfer.rs":"49dd1261e3ad1effc4ebdad1bc3061f4330f991b8291df75591a1e2bc5eaa257","src/conv.rs":"7e3ffe33b47a6fd3617aabf9f11cc68f1ccbee2c7343b8dbbcd0e8f3447e1ad8","src/device/any_device.rs":"2cb2be0fd078d65039692d309d8688cf4a02fb768579cf22c93cfa514d20ad7f","src/device/global.rs":"3be88717a7d164ea73842f90b93c359f8ea06bbae253af9a195734640d5a830d","src/device/life.rs":"6c2b2e7f98f71231c97f19cf19c4d8ff254ee9862a8422f973270656658aed96","src/device/mod.rs":"9bae0d30eaf51af5545eea10d5c8af6ca3ced2518d24c42880ec3c8f1f7664b2","src/device/queue.rs":"aec77faebaa750f843f2f04ea9a2021598098783f16f95e739bc64fabcc29619","src/device/resource.rs":"0e707723d58cecfb402fe35eb9944278de96a95496bd2e62b50a71ef3e44d079","src/device/trace.rs":"21408dfd2c99e3ce36a77d08ba86cf52f32bb376ed82690bbbf74937bfd42cbe","src/error.rs":"32680e922acfb1f1d6842177179768d365c575a8baa402da9d5a43a2357b0dbf","src/global.rs":"fd2a7995bdb64f1186fd71c4f6b59f34c543ee695912b1fad7931c88ec024584","src/hal_api.rs":"bb380df266fa9754c93d55c24b1a3535629710eb04bc6858a40c38a5f02aae68","src/hub.rs":"d9435f5b12f47e0b57599dce1d38e6eb4ef2477ab634806cfccefa4c1541f87b","src/id.rs":"4684c40d56ad8f49f36455ea84f2901df587fc3574274ac132b8236ece6926a3","src/identity.rs":"0701f6f41e754dde2bebc567a87c25b353dfab40b79a322990dbfa477739ab8c","src/init_tracker/buffer.rs":"61eb9cfaa312135b7a937ff6a3117f531b5b7323fae6553a41d6de9bc106d7e0","src/init_tracker/mod.rs":"0867f79f83555390d0982d1dc6dcf0d4340e10cb89aa633d3c3ecc45deb3c78c","src/init_tracker/texture.rs":"030fd594bf9948fad391390d85c5e1fec7eaf67b6e812c60f2dd59bc4fda8fd5","src/instance.rs":"643ce09d9a623f00e66e1eb50b2facc4d072f4149c9953343c1b0e8c4a5f6915","src/lib.rs":"23ac375edd02c7808ccb901b392bb34960339027b2e068b9009f89d413946515","src/pipeline.rs":"e09adc811a7553daa4d826fd6001be14a7d21222fc8ba76040d4fd5c0f017787","src/present.rs":"5a15e583ee2b4c1afcf596c8429437a50e9fd1e17f8fbf4fafb482928940815e","src/registry.rs":"52a52b0ed41abbed3c6a84f0fb01c5f45c75953bacaa7a9b9bdb95a8825eb573","src/resource.rs":"14a3fedcf422688dc484c91abc5b2522c388fd5587c61e11a4b0bd882284fb24","src/storage.rs":"343811d28ed0926ef2d7f116d5ad8b4b6b6f530d7dfb5b9afbb90f2bb3ccfbc1","src/track/buffer.rs":"2e37fdae4e3b81f4e9d4bc35d71849ce82846afbb55c3de75c5db101c888e216","src/track/metadata.rs":"8ef7af1d0152a3a8d706402a416c2e74c9282354cad055d2c0c527486cb3019e","src/track/mod.rs":"028b04676f8a42a1f67bb9f6ffb809f56fa6349596b52561bce5b78c176e51e7","src/track/range.rs":"5bbfed6e103b3234d9de8e42057022da6d628c2cc1db6bb51b88f87f2d8adf8b","src/track/stateless.rs":"57f13386f0829cd6e1a894e454f69929eea7494565e530ed9f80bd7d50a4ba2d","src/track/texture.rs":"ea8ba2089e72401dcd37738d0583c411ec100a7436626f738d8d81bc188cbd99","src/validation.rs":"8b4db473ef01d3603f45eac6bf613874624755bc2f85972720ca94db7b556753"},"package":null} \ No newline at end of file diff --git a/third_party/rust/wgpu-core/Cargo.toml b/third_party/rust/wgpu-core/Cargo.toml index 563cb4ca8b13d..206e1304e18d8 100644 --- a/third_party/rust/wgpu-core/Cargo.toml +++ b/third_party/rust/wgpu-core/Cargo.toml @@ -56,11 +56,7 @@ package = "wgpu-hal" [dependencies.naga] version = "0.14.0" path = "../naga" -features = [ - "clone", - "span", - "validate", -] +features = ["clone"] [dependencies.profiling] version = "1" diff --git a/third_party/rust/wgpu-core/src/any_surface.rs b/third_party/rust/wgpu-core/src/any_surface.rs new file mode 100644 index 0000000000000..757e5c152b864 --- /dev/null +++ b/third_party/rust/wgpu-core/src/any_surface.rs @@ -0,0 +1,112 @@ +use wgt::Backend; + +/// The `AnySurface` type: a `Arc` of a `HalSurface` for any backend `A`. +use crate::hal_api::HalApi; +use crate::instance::HalSurface; + +use std::any::Any; +use std::fmt; +use std::sync::Arc; + +/// A `Arc` of a `HalSurface`, for any backend `A`. +/// +/// Any `AnySurface` is just like an `Arc>`, except that the +/// `A` type parameter is erased. To access the `Surface`, you must +/// downcast to a particular backend with the \[`downcast_ref`\] or +/// \[`take`\] methods. +pub struct AnySurface(Arc); + +impl AnySurface { + /// Return an `AnySurface` that holds an owning `Arc` to `HalSurface`. + pub fn new(surface: HalSurface) -> AnySurface { + AnySurface(Arc::new(surface)) + } + + pub fn backend(&self) -> Backend { + #[cfg(all(feature = "vulkan", not(target_arch = "wasm32")))] + if self.downcast_ref::().is_some() { + return Backend::Vulkan; + } + #[cfg(all(feature = "metal", any(target_os = "macos", target_os = "ios")))] + if self.downcast_ref::().is_some() { + return Backend::Metal; + } + #[cfg(all(feature = "dx12", windows))] + if self.downcast_ref::().is_some() { + return Backend::Dx12; + } + #[cfg(all(feature = "dx11", windows))] + if self.downcast_ref::().is_some() { + return Backend::Dx11; + } + #[cfg(feature = "gles")] + if self.downcast_ref::().is_some() { + return Backend::Gl; + } + Backend::Empty + } + + /// If `self` is an `Arc>`, return a reference to the + /// HalSurface. + pub fn downcast_ref(&self) -> Option<&HalSurface> { + self.0.downcast_ref::>() + } + + /// If `self` is an `Arc>`, returns that. + pub fn take(self) -> Option>> { + // `Arc::downcast` returns `Arc`, but requires that `T` be `Sync` and + // `Send`, and this is not the case for `HalSurface` in wasm builds. + // + // But as far as I can see, `Arc::downcast` has no particular reason to + // require that `T` be `Sync` and `Send`; the steps used here are sound. + if (self.0).is::>() { + // Turn the `Arc`, which is a pointer to an `ArcInner` struct, into + // a pointer to the `ArcInner`'s `data` field. Carry along the + // vtable from the original `Arc`. + let raw_erased: *const (dyn Any + 'static) = Arc::into_raw(self.0); + // Remove the vtable, and supply the concrete type of the `data`. + let raw_typed: *const HalSurface = raw_erased.cast::>(); + // Convert the pointer to the `data` field back into a pointer to + // the `ArcInner`, and restore reference-counting behavior. + let arc_typed: Arc> = unsafe { + // Safety: + // - We checked that the `dyn Any` was indeed a `HalSurface` above. + // - We're calling `Arc::from_raw` on the same pointer returned + // by `Arc::into_raw`, except that we stripped off the vtable + // pointer. + // - The pointer must still be live, because we've borrowed `self`, + // which holds another reference to it. + // - The format of a `ArcInner` must be the same as + // that of an `ArcInner>`, or else `AnyHalSurface::new` + // wouldn't be possible. + Arc::from_raw(raw_typed) + }; + Some(arc_typed) + } else { + None + } + } +} + +impl fmt::Debug for AnySurface { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str("AnySurface") + } +} + +#[cfg(any( + not(target_arch = "wasm32"), + all( + feature = "fragile-send-sync-non-atomic-wasm", + not(target_feature = "atomics") + ) +))] +unsafe impl Send for AnySurface {} +#[cfg(any( + not(target_arch = "wasm32"), + all( + feature = "fragile-send-sync-non-atomic-wasm", + not(target_feature = "atomics") + ) +))] +unsafe impl Sync for AnySurface {} diff --git a/third_party/rust/wgpu-core/src/binding_model.rs b/third_party/rust/wgpu-core/src/binding_model.rs index efdd79e059b9f..5f973da66237a 100644 --- a/third_party/rust/wgpu-core/src/binding_model.rs +++ b/third_party/rust/wgpu-core/src/binding_model.rs @@ -1,23 +1,27 @@ use crate::{ - device::{DeviceError, MissingDownlevelFlags, MissingFeatures, SHADER_STAGE_COUNT}, + device::{Device, DeviceError, MissingDownlevelFlags, MissingFeatures, SHADER_STAGE_COUNT}, error::{ErrorFormatter, PrettyError}, hal_api::HalApi, - id::{BindGroupLayoutId, BufferId, DeviceId, SamplerId, TextureId, TextureViewId, Valid}, + id::{ + BindGroupId, BindGroupLayoutId, BufferId, PipelineLayoutId, SamplerId, TextureId, + TextureViewId, + }, init_tracker::{BufferInitTrackerAction, TextureInitTrackerAction}, - resource::Resource, + resource::{Resource, ResourceInfo, ResourceType}, track::{BindGroupStates, UsageConflict}, validation::{MissingBufferUsageError, MissingTextureUsageError}, - FastHashMap, Label, LifeGuard, MultiRefCount, Stored, + FastHashMap, Label, }; use arrayvec::ArrayVec; +use parking_lot::RwLock; #[cfg(feature = "replay")] use serde::Deserialize; #[cfg(feature = "trace")] use serde::Serialize; -use std::{borrow::Cow, ops::Range}; +use std::{borrow::Cow, ops::Range, sync::Arc}; use thiserror::Error; @@ -446,96 +450,53 @@ pub type BindGroupLayouts = crate::storage::Storage, BindG /// - produced bind groups /// - produced pipeline layouts /// - pipelines with implicit layouts -pub struct BindGroupLayout { - pub(crate) device_id: Stored, - pub(crate) multi_ref_count: MultiRefCount, - // When a layout created and there already exists a compatible layout the new layout - // keeps a reference to the older compatible one. In some places we substitute the - // bind group layout id with its compatible sibling. - // Since this substitution can come at a cost, it is skipped when wgpu-core generates - // its own resource IDs. - pub(crate) inner: BglOrDuplicate, -} - -pub(crate) enum BglOrDuplicate { - Inner(BindGroupLayoutInner), - Duplicate(Valid), -} - -pub struct BindGroupLayoutInner { - pub(crate) raw: A::BindGroupLayout, +#[derive(Debug)] +pub struct BindGroupLayout { + pub(crate) raw: Option, + pub(crate) device: Arc>, pub(crate) entries: BindEntryMap, #[allow(unused)] pub(crate) dynamic_count: usize, pub(crate) count_validator: BindingTypeMaxCountValidator, + pub(crate) info: ResourceInfo, #[cfg(debug_assertions)] pub(crate) label: String, } -impl BindGroupLayout { - #[track_caller] - pub(crate) fn assume_deduplicated(&self) -> &BindGroupLayoutInner { - self.as_inner().unwrap() - } - - pub(crate) fn as_inner(&self) -> Option<&BindGroupLayoutInner> { - match self.inner { - BglOrDuplicate::Inner(ref inner) => Some(inner), - BglOrDuplicate::Duplicate(_) => None, +impl Drop for BindGroupLayout { + fn drop(&mut self) { + log::info!("Destroying BindGroupLayout {:?}", self.info.label()); + if let Some(raw) = self.raw.take() { + unsafe { + use hal::Device; + self.device.raw().destroy_bind_group_layout(raw); + } } } +} - pub(crate) fn into_inner(self) -> Option> { - match self.inner { - BglOrDuplicate::Inner(inner) => Some(inner), - BglOrDuplicate::Duplicate(_) => None, - } - } +impl Resource for BindGroupLayout { + const TYPE: ResourceType = "BindGroupLayout"; - pub(crate) fn as_duplicate(&self) -> Option> { - match self.inner { - BglOrDuplicate::Duplicate(id) => Some(id), - BglOrDuplicate::Inner(_) => None, - } + fn as_info(&self) -> &ResourceInfo { + &self.info } -} -impl Resource for BindGroupLayout { - const TYPE: &'static str = "BindGroupLayout"; - - fn life_guard(&self) -> &LifeGuard { - unreachable!() + fn as_info_mut(&mut self) -> &mut ResourceInfo { + &mut self.info } - fn label(&self) -> &str { + fn label(&self) -> String { #[cfg(debug_assertions)] - return self.as_inner().map_or("", |inner| &inner.label); + return self.label.clone(); #[cfg(not(debug_assertions))] - return ""; + return String::new(); } } - -// If a bindgroup needs to be substitued with its compatible equivalent, return the latter. -pub(crate) fn try_get_bind_group_layout( - layouts: &BindGroupLayouts, - id: BindGroupLayoutId, -) -> Option<&BindGroupLayout> { - let layout = layouts.get(id).ok()?; - if let BglOrDuplicate::Duplicate(original_id) = layout.inner { - return Some(&layouts[original_id]); +impl BindGroupLayout { + pub(crate) fn raw(&self) -> &A::BindGroupLayout { + self.raw.as_ref().unwrap() } - - Some(layout) -} - -pub(crate) fn get_bind_group_layout( - layouts: &BindGroupLayouts, - id: Valid, -) -> (Valid, &BindGroupLayout) { - let layout = &layouts[id]; - layout - .as_duplicate() - .map_or((id, layout), |deduped| (deduped, &layouts[deduped])) } #[derive(Clone, Debug, Error)] @@ -635,15 +596,30 @@ pub struct PipelineLayoutDescriptor<'a> { } #[derive(Debug)] -pub struct PipelineLayout { - pub(crate) raw: A::PipelineLayout, - pub(crate) device_id: Stored, - pub(crate) life_guard: LifeGuard, - pub(crate) bind_group_layout_ids: ArrayVec, { hal::MAX_BIND_GROUPS }>, +pub struct PipelineLayout { + pub(crate) raw: Option, + pub(crate) device: Arc>, + pub(crate) info: ResourceInfo, + pub(crate) bind_group_layouts: ArrayVec>, { hal::MAX_BIND_GROUPS }>, pub(crate) push_constant_ranges: ArrayVec, } -impl PipelineLayout { +impl Drop for PipelineLayout { + fn drop(&mut self) { + log::info!("Destroying PipelineLayout {:?}", self.info.label()); + if let Some(raw) = self.raw.take() { + unsafe { + use hal::Device; + self.device.raw().destroy_pipeline_layout(raw); + } + } + } +} + +impl PipelineLayout { + pub(crate) fn raw(&self) -> &A::PipelineLayout { + self.raw.as_ref().unwrap() + } /// Validate push constants match up with expected ranges. pub(crate) fn validate_push_constant_ranges( &self, @@ -723,11 +699,15 @@ impl PipelineLayout { } } -impl Resource for PipelineLayout { - const TYPE: &'static str = "PipelineLayout"; +impl Resource for PipelineLayout { + const TYPE: ResourceType = "PipelineLayout"; + + fn as_info(&self) -> &ResourceInfo { + &self.info + } - fn life_guard(&self) -> &LifeGuard { - &self.life_guard + fn as_info_mut(&mut self) -> &mut ResourceInfo { + &mut self.info } } @@ -830,37 +810,54 @@ pub(crate) fn buffer_binding_type_alignment( } } +#[derive(Debug)] pub struct BindGroup { - pub(crate) raw: A::BindGroup, - pub(crate) device_id: Stored, - pub(crate) layout_id: Valid, - pub(crate) life_guard: LifeGuard, + pub(crate) raw: Option, + pub(crate) device: Arc>, + pub(crate) layout: Arc>, + pub(crate) info: ResourceInfo, pub(crate) used: BindGroupStates, - pub(crate) used_buffer_ranges: Vec, - pub(crate) used_texture_ranges: Vec, - pub(crate) dynamic_binding_info: Vec, + pub(crate) used_buffer_ranges: RwLock>>, + pub(crate) used_texture_ranges: RwLock>>, + pub(crate) dynamic_binding_info: RwLock>, /// Actual binding sizes for buffers that don't have `min_binding_size` /// specified in BGL. Listed in the order of iteration of `BGL.entries`. pub(crate) late_buffer_binding_sizes: Vec, } +impl Drop for BindGroup { + fn drop(&mut self) { + log::info!("Destroying BindGroup {:?}", self.info.label()); + if let Some(raw) = self.raw.take() { + unsafe { + use hal::Device; + self.device.raw().destroy_bind_group(raw); + } + } + } +} + impl BindGroup { + pub(crate) fn raw(&self) -> &A::BindGroup { + self.raw.as_ref().unwrap() + } pub(crate) fn validate_dynamic_bindings( &self, bind_group_index: u32, offsets: &[wgt::DynamicOffset], limits: &wgt::Limits, ) -> Result<(), BindError> { - if self.dynamic_binding_info.len() != offsets.len() { + if self.dynamic_binding_info.read().len() != offsets.len() { return Err(BindError::MismatchedDynamicOffsetCount { group: bind_group_index, - expected: self.dynamic_binding_info.len(), + expected: self.dynamic_binding_info.read().len(), actual: offsets.len(), }); } for (idx, (info, &offset)) in self .dynamic_binding_info + .read() .iter() .zip(offsets.iter()) .enumerate() @@ -894,11 +891,15 @@ impl BindGroup { } } -impl Resource for BindGroup { - const TYPE: &'static str = "BindGroup"; +impl Resource for BindGroup { + const TYPE: ResourceType = "BindGroup"; + + fn as_info(&self) -> &ResourceInfo { + &self.info + } - fn life_guard(&self) -> &LifeGuard { - &self.life_guard + fn as_info_mut(&mut self) -> &mut ResourceInfo { + &mut self.info } } diff --git a/third_party/rust/wgpu-core/src/command/bind.rs b/third_party/rust/wgpu-core/src/command/bind.rs index 1cdec465e26c8..66b95a6df97c3 100644 --- a/third_party/rust/wgpu-core/src/command/bind.rs +++ b/third_party/rust/wgpu-core/src/command/bind.rs @@ -1,13 +1,12 @@ +use std::sync::Arc; + use crate::{ - binding_model::{ - BindGroup, BindGroupLayouts, LateMinBufferBindingSizeMismatch, PipelineLayout, - }, + binding_model::{BindGroup, LateMinBufferBindingSizeMismatch, PipelineLayout}, device::SHADER_STAGE_COUNT, hal_api::HalApi, - id::{BindGroupId, PipelineLayoutId, Valid}, + id::BindGroupId, pipeline::LateSizedBufferGroup, - storage::Storage, - Stored, + resource::Resource, }; use arrayvec::ArrayVec; @@ -15,73 +14,85 @@ use arrayvec::ArrayVec; type BindGroupMask = u8; mod compat { - use crate::{ - binding_model::BindGroupLayouts, - id::{BindGroupLayoutId, Valid}, - }; - use std::ops::Range; + use arrayvec::ArrayVec; - #[derive(Debug, Default)] - struct Entry { - assigned: Option>, - expected: Option>, + use crate::{binding_model::BindGroupLayout, hal_api::HalApi, resource::Resource}; + use std::{ops::Range, sync::Arc}; + + #[derive(Debug, Clone)] + struct Entry { + assigned: Option>>, + expected: Option>>, } - impl Entry { + impl Entry { + fn empty() -> Self { + Self { + assigned: None, + expected: None, + } + } fn is_active(&self) -> bool { self.assigned.is_some() && self.expected.is_some() } - fn is_valid(&self, bind_group_layouts: &BindGroupLayouts) -> bool { - if self.expected.is_none() || self.expected == self.assigned { + fn is_valid(&self) -> bool { + if self.expected.is_none() { return true; } - - if let Some(id) = self.assigned { - return bind_group_layouts[id].as_duplicate() == self.expected; + if let Some(expected_bgl) = self.expected.as_ref() { + if let Some(assigned_bgl) = self.assigned.as_ref() { + if expected_bgl.is_equal(assigned_bgl) { + return true; + } + } } - false } + + fn is_incompatible(&self) -> bool { + self.expected.is_none() || !self.is_valid() + } } - #[derive(Debug)] - pub(crate) struct BoundBindGroupLayouts { - entries: [Entry; hal::MAX_BIND_GROUPS], + #[derive(Debug, Default)] + pub(crate) struct BoundBindGroupLayouts { + entries: ArrayVec, { hal::MAX_BIND_GROUPS }>, } - impl BoundBindGroupLayouts { + impl BoundBindGroupLayouts { pub fn new() -> Self { Self { - entries: Default::default(), + entries: (0..hal::MAX_BIND_GROUPS).map(|_| Entry::empty()).collect(), } } - fn make_range(&self, start_index: usize) -> Range { // find first incompatible entry let end = self .entries .iter() - .position(|e| e.expected.is_none() || e.assigned != e.expected) + .position(|e| e.is_incompatible()) .unwrap_or(self.entries.len()); start_index..end.max(start_index) } pub fn update_expectations( &mut self, - expectations: &[Valid], + expectations: &[Arc>], ) -> Range { let start_index = self .entries .iter() .zip(expectations) - .position(|(e, &expect)| e.expected != Some(expect)) + .position(|(e, expect)| { + e.expected.is_none() || !e.expected.as_ref().unwrap().is_equal(expect) + }) .unwrap_or(expectations.len()); - for (e, &expect) in self.entries[start_index..] + for (e, expect) in self.entries[start_index..] .iter_mut() .zip(expectations[start_index..].iter()) { - e.expected = Some(expect); + e.expected = Some(expect.clone()); } for e in self.entries[expectations.len()..].iter_mut() { e.expected = None; @@ -89,7 +100,7 @@ mod compat { self.make_range(start_index) } - pub fn assign(&mut self, index: usize, value: Valid) -> Range { + pub fn assign(&mut self, index: usize, value: Arc>) -> Range { self.entries[index].assigned = Some(value); self.make_range(index) } @@ -101,12 +112,9 @@ mod compat { .filter_map(|(i, e)| if e.is_active() { Some(i) } else { None }) } - pub fn invalid_mask( - &self, - bind_group_layouts: &BindGroupLayouts, - ) -> super::BindGroupMask { + pub fn invalid_mask(&self) -> super::BindGroupMask { self.entries.iter().enumerate().fold(0, |mask, (i, entry)| { - if entry.is_valid(bind_group_layouts) { + if entry.is_valid() { mask } else { mask | 1u8 << i @@ -114,40 +122,6 @@ mod compat { }) } } - - #[test] - fn test_compatibility() { - fn id(val: u32) -> Valid { - BindGroupLayoutId::dummy(val) - } - - let mut man = BoundBindGroupLayouts::new(); - man.entries[0] = Entry { - expected: Some(id(3)), - assigned: Some(id(2)), - }; - man.entries[1] = Entry { - expected: Some(id(1)), - assigned: Some(id(1)), - }; - man.entries[2] = Entry { - expected: Some(id(4)), - assigned: Some(id(5)), - }; - // check that we rebind [1] after [0] became compatible - assert_eq!(man.assign(0, id(3)), 0..2); - // check that nothing is rebound - assert_eq!(man.update_expectations(&[id(3), id(2)]), 1..1); - // check that [1] and [2] are rebound on expectations change - assert_eq!(man.update_expectations(&[id(3), id(1), id(5)]), 1..3); - // reset the first two bindings - assert_eq!(man.update_expectations(&[id(4), id(6), id(5)]), 0..0); - // check that nothing is rebound, even if there is a match, - // since earlier binding is incompatible. - assert_eq!(man.assign(1, id(6)), 1..1); - // finally, bind everything - assert_eq!(man.assign(0, id(4)), 0..3); - } } #[derive(Debug)] @@ -156,9 +130,9 @@ struct LateBufferBinding { bound_size: wgt::BufferAddress, } -#[derive(Debug, Default)] -pub(super) struct EntryPayload { - pub(super) group_id: Option>, +#[derive(Debug)] +pub(super) struct EntryPayload { + pub(super) group: Option>>, pub(super) dynamic_offsets: Vec, late_buffer_bindings: Vec, /// Since `LateBufferBinding` may contain information about the bindings @@ -166,49 +140,57 @@ pub(super) struct EntryPayload { pub(super) late_bindings_effective_count: usize, } -impl EntryPayload { +impl Default for EntryPayload { + fn default() -> Self { + Self { + group: None, + dynamic_offsets: Default::default(), + late_buffer_bindings: Default::default(), + late_bindings_effective_count: Default::default(), + } + } +} + +impl EntryPayload { fn reset(&mut self) { - self.group_id = None; + self.group = None; self.dynamic_offsets.clear(); self.late_buffer_bindings.clear(); self.late_bindings_effective_count = 0; } } -#[derive(Debug)] -pub(super) struct Binder { - pub(super) pipeline_layout_id: Option>, //TODO: strongly `Stored` - manager: compat::BoundBindGroupLayouts, - payloads: [EntryPayload; hal::MAX_BIND_GROUPS], +#[derive(Debug, Default)] +pub(super) struct Binder { + pub(super) pipeline_layout: Option>>, + manager: compat::BoundBindGroupLayouts, + payloads: [EntryPayload; hal::MAX_BIND_GROUPS], } -impl Binder { +impl Binder { pub(super) fn new() -> Self { Self { - pipeline_layout_id: None, + pipeline_layout: None, manager: compat::BoundBindGroupLayouts::new(), payloads: Default::default(), } } - pub(super) fn reset(&mut self) { - self.pipeline_layout_id = None; + self.pipeline_layout = None; self.manager = compat::BoundBindGroupLayouts::new(); for payload in self.payloads.iter_mut() { payload.reset(); } } - pub(super) fn change_pipeline_layout<'a, A: HalApi>( + pub(super) fn change_pipeline_layout<'a>( &'a mut self, - guard: &Storage, PipelineLayoutId>, - new_id: Valid, + new: &Arc>, late_sized_buffer_groups: &[LateSizedBufferGroup], - ) -> (usize, &'a [EntryPayload]) { - let old_id_opt = self.pipeline_layout_id.replace(new_id); - let new = &guard[new_id]; + ) -> (usize, &'a [EntryPayload]) { + let old_id_opt = self.pipeline_layout.replace(new.clone()); - let mut bind_range = self.manager.update_expectations(&new.bind_group_layout_ids); + let mut bind_range = self.manager.update_expectations(&new.bind_group_layouts); // Update the buffer binding sizes that are required by shaders. for (payload, late_group) in self.payloads.iter_mut().zip(late_sized_buffer_groups) { @@ -232,8 +214,7 @@ impl Binder { } } - if let Some(old_id) = old_id_opt { - let old = &guard[old_id]; + if let Some(old) = old_id_opt { // root constants are the base compatibility property if old.push_constant_ranges != new.push_constant_ranges { bind_range.start = 0; @@ -243,21 +224,18 @@ impl Binder { (bind_range.start, &self.payloads[bind_range]) } - pub(super) fn assign_group<'a, A: HalApi>( + pub(super) fn assign_group<'a>( &'a mut self, index: usize, - bind_group_id: Valid, - bind_group: &BindGroup, + bind_group: &Arc>, offsets: &[wgt::DynamicOffset], - ) -> &'a [EntryPayload] { + ) -> &'a [EntryPayload] { + let bind_group_id = bind_group.as_info().id(); log::trace!("\tBinding [{}] = group {:?}", index, bind_group_id); - debug_assert_eq!(A::VARIANT, bind_group_id.0.backend()); + debug_assert_eq!(A::VARIANT, bind_group_id.backend()); let payload = &mut self.payloads[index]; - payload.group_id = Some(Stored { - value: bind_group_id, - ref_count: bind_group.life_guard.add_ref(), - }); + payload.group = Some(bind_group.clone()); payload.dynamic_offsets.clear(); payload.dynamic_offsets.extend_from_slice(offsets); @@ -281,22 +259,19 @@ impl Binder { } } - let bind_range = self.manager.assign(index, bind_group.layout_id); + let bind_range = self.manager.assign(index, bind_group.layout.clone()); &self.payloads[bind_range] } - pub(super) fn list_active(&self) -> impl Iterator> + '_ { + pub(super) fn list_active(&self) -> impl Iterator + '_ { let payloads = &self.payloads; self.manager .list_active() - .map(move |index| payloads[index].group_id.as_ref().unwrap().value) + .map(move |index| payloads[index].group.as_ref().unwrap().as_info().id()) } - pub(super) fn invalid_mask( - &self, - bind_group_layouts: &BindGroupLayouts, - ) -> BindGroupMask { - self.manager.invalid_mask(bind_group_layouts) + pub(super) fn invalid_mask(&self) -> BindGroupMask { + self.manager.invalid_mask() } /// Scan active buffer bindings corresponding to layouts without `min_binding_size` specified. diff --git a/third_party/rust/wgpu-core/src/command/bundle.rs b/third_party/rust/wgpu-core/src/command/bundle.rs index 54416da95ec0d..82648e0e1c66e 100644 --- a/third_party/rust/wgpu-core/src/command/bundle.rs +++ b/third_party/rust/wgpu-core/src/command/bundle.rs @@ -79,7 +79,7 @@ index format changes. #![allow(clippy::reversed_empty_ranges)] use crate::{ - binding_model::{self, buffer_binding_type_alignment}, + binding_model::{buffer_binding_type_alignment, BindGroup, BindGroupLayout, PipelineLayout}, command::{ BasePass, BindGroupStateChange, ColorAttachmentError, DrawError, MapPassErr, PassErrorScope, RenderCommand, RenderCommandError, StateChange, @@ -91,19 +91,18 @@ use crate::{ }, error::{ErrorFormatter, PrettyError}, hal_api::HalApi, - hub::{Hub, Token}, - id, - identity::GlobalIdentityHandlerFactory, + hub::Hub, + id::{self, RenderBundleId}, init_tracker::{BufferInitTrackerAction, MemoryInitKind, TextureInitTrackerAction}, - pipeline::{self, PipelineFlags}, - resource::{self, Resource}, - storage::Storage, + pipeline::{self, PipelineFlags, RenderPipeline}, + resource::{Resource, ResourceInfo, ResourceType}, track::RenderBundleScope, validation::check_buffer_usage, - Label, LabelHelpers, LifeGuard, Stored, + Label, LabelHelpers, }; use arrayvec::ArrayVec; -use std::{borrow::Cow, mem, num::NonZeroU32, ops::Range}; + +use std::{borrow::Cow, mem, num::NonZeroU32, ops::Range, sync::Arc}; use thiserror::Error; use hal::CommandEncoder as _; @@ -251,19 +250,17 @@ impl RenderBundleEncoder { /// and accumulate buffer and texture initialization actions. /// /// [`ExecuteBundle`]: RenderCommand::ExecuteBundle - pub(crate) fn finish( + pub(crate) fn finish( self, desc: &RenderBundleDescriptor, - device: &Device, - hub: &Hub, - token: &mut Token>, + device: &Arc>, + hub: &Hub, ) -> Result, RenderBundleError> { - let (pipeline_layout_guard, mut token) = hub.pipeline_layouts.read(token); - let (bind_group_guard, mut token) = hub.bind_groups.read(&mut token); - let (pipeline_guard, mut token) = hub.render_pipelines.read(&mut token); - let (query_set_guard, mut token) = hub.query_sets.read(&mut token); - let (buffer_guard, mut token) = hub.buffers.read(&mut token); - let (texture_guard, _) = hub.textures.read(&mut token); + let bind_group_guard = hub.bind_groups.read(); + let pipeline_guard = hub.render_pipelines.read(); + let query_set_guard = hub.query_sets.read(); + let buffer_guard = hub.buffers.read(); + let texture_guard = hub.textures.read(); let mut state = State { trackers: RenderBundleScope::new( @@ -295,13 +292,14 @@ impl RenderBundleEncoder { } => { let scope = PassErrorScope::SetBindGroup(bind_group_id); - let bind_group: &binding_model::BindGroup = state + let bind_group = state .trackers .bind_groups + .write() .add_single(&*bind_group_guard, bind_group_id) .ok_or(RenderCommandError::InvalidBindGroup(bind_group_id)) .map_pass_err(scope)?; - self.check_valid_to_use(bind_group.device_id.value) + self.check_valid_to_use(bind_group.device.info.id()) .map_pass_err(scope)?; let max_bind_groups = device.limits.max_bind_groups; @@ -320,10 +318,10 @@ impl RenderBundleEncoder { next_dynamic_offset = offsets_range.end; let offsets = &base.dynamic_offsets[offsets_range.clone()]; - if bind_group.dynamic_binding_info.len() != offsets.len() { + if bind_group.dynamic_binding_info.read().len() != offsets.len() { return Err(RenderCommandError::InvalidDynamicOffsetCount { actual: offsets.len(), - expected: bind_group.dynamic_binding_info.len(), + expected: bind_group.dynamic_binding_info.read().len(), }) .map_pass_err(scope); } @@ -332,7 +330,7 @@ impl RenderBundleEncoder { for (offset, info) in offsets .iter() .map(|offset| *offset as wgt::BufferAddress) - .zip(bind_group.dynamic_binding_info.iter()) + .zip(bind_group.dynamic_binding_info.read().iter()) { let (alignment, limit_name) = buffer_binding_type_alignment(&device.limits, info.binding_type); @@ -344,14 +342,14 @@ impl RenderBundleEncoder { } } - buffer_memory_init_actions.extend_from_slice(&bind_group.used_buffer_ranges); - texture_memory_init_actions.extend_from_slice(&bind_group.used_texture_ranges); + buffer_memory_init_actions.extend_from_slice(&bind_group.used_buffer_ranges.read()); + texture_memory_init_actions.extend_from_slice(&bind_group.used_texture_ranges.read()); - state.set_bind_group(index, bind_group_id, bind_group.layout_id, offsets_range); + state.set_bind_group(index, bind_group_guard.get(bind_group_id).as_ref().unwrap(), &bind_group.layout, offsets_range); unsafe { state .trackers - .merge_bind_group(&*texture_guard, &bind_group.used) + .merge_bind_group(&bind_group.used) .map_pass_err(scope)? }; //Note: stateless trackers are not merged: the lifetime reference @@ -360,13 +358,14 @@ impl RenderBundleEncoder { RenderCommand::SetPipeline(pipeline_id) => { let scope = PassErrorScope::SetPipelineRender(pipeline_id); - let pipeline: &pipeline::RenderPipeline = state + let pipeline = state .trackers .render_pipelines + .write() .add_single(&*pipeline_guard, pipeline_id) .ok_or(RenderCommandError::InvalidPipeline(pipeline_id)) .map_pass_err(scope)?; - self.check_valid_to_use(pipeline.device_id.value) + self.check_valid_to_use(pipeline.device.info.id()) .map_pass_err(scope)?; self.context @@ -383,8 +382,7 @@ impl RenderBundleEncoder { .map_pass_err(scope); } - let layout = &pipeline_layout_guard[pipeline.layout_id.value]; - let pipeline_state = PipelineState::new(pipeline_id, pipeline, layout); + let pipeline_state = PipelineState::new(pipeline); commands.push(command); @@ -393,7 +391,7 @@ impl RenderBundleEncoder { commands.extend(iter) } - state.invalidate_bind_groups(&pipeline_state, layout); + state.invalidate_bind_groups(&pipeline_state, &pipeline.layout); state.pipeline = Some(pipeline_state); } RenderCommand::SetIndexBuffer { @@ -403,12 +401,13 @@ impl RenderBundleEncoder { size, } => { let scope = PassErrorScope::SetIndexBuffer(buffer_id); - let buffer: &resource::Buffer = state + let buffer = state .trackers .buffers + .write() .merge_single(&*buffer_guard, buffer_id, hal::BufferUses::INDEX) .map_pass_err(scope)?; - self.check_valid_to_use(buffer.device_id.value) + self.check_valid_to_use(buffer.device.info.id()) .map_pass_err(scope)?; check_buffer_usage(buffer.usage, wgt::BufferUsages::INDEX) .map_pass_err(scope)?; @@ -417,8 +416,8 @@ impl RenderBundleEncoder { Some(s) => offset + s.get(), None => buffer.size, }; - buffer_memory_init_actions.extend(buffer.initialization_status.create_action( - buffer_id, + buffer_memory_init_actions.extend(buffer.initialization_status.read().create_action( + buffer, offset..end, MemoryInitKind::NeedsInitializedMemory, )); @@ -431,12 +430,13 @@ impl RenderBundleEncoder { size, } => { let scope = PassErrorScope::SetVertexBuffer(buffer_id); - let buffer: &resource::Buffer = state + let buffer = state .trackers .buffers + .write() .merge_single(&*buffer_guard, buffer_id, hal::BufferUses::VERTEX) .map_pass_err(scope)?; - self.check_valid_to_use(buffer.device_id.value) + self.check_valid_to_use(buffer.device.info.id()) .map_pass_err(scope)?; check_buffer_usage(buffer.usage, wgt::BufferUsages::VERTEX) .map_pass_err(scope)?; @@ -445,8 +445,8 @@ impl RenderBundleEncoder { Some(s) => offset + s.get(), None => buffer.size, }; - buffer_memory_init_actions.extend(buffer.initialization_status.create_action( - buffer_id, + buffer_memory_init_actions.extend(buffer.initialization_status.read().create_action( + buffer, offset..end, MemoryInitKind::NeedsInitializedMemory, )); @@ -461,10 +461,9 @@ impl RenderBundleEncoder { let scope = PassErrorScope::SetPushConstant; let end_offset = offset + size_bytes; - let pipeline = state.pipeline(scope)?; - let pipeline_layout = &pipeline_layout_guard[pipeline.layout_id]; + let pipeline_state = state.pipeline(scope)?; - pipeline_layout + pipeline_state.pipeline.layout .validate_push_constant_ranges(stages, offset, end_offset) .map_pass_err(scope)?; @@ -567,18 +566,19 @@ impl RenderBundleEncoder { let pipeline = state.pipeline(scope)?; let used_bind_groups = pipeline.used_bind_groups; - let buffer: &resource::Buffer = state + let buffer = state .trackers .buffers + .write() .merge_single(&*buffer_guard, buffer_id, hal::BufferUses::INDIRECT) .map_pass_err(scope)?; - self.check_valid_to_use(buffer.device_id.value) + self.check_valid_to_use(buffer.device.info.id()) .map_pass_err(scope)?; check_buffer_usage(buffer.usage, wgt::BufferUsages::INDIRECT) .map_pass_err(scope)?; - buffer_memory_init_actions.extend(buffer.initialization_status.create_action( - buffer_id, + buffer_memory_init_actions.extend(buffer.initialization_status.read().create_action( + buffer, offset..(offset + mem::size_of::() as u64), MemoryInitKind::NeedsInitializedMemory, )); @@ -605,18 +605,19 @@ impl RenderBundleEncoder { let pipeline = state.pipeline(scope)?; let used_bind_groups = pipeline.used_bind_groups; - let buffer: &resource::Buffer = state + let buffer = state .trackers .buffers + .write() .merge_single(&*buffer_guard, buffer_id, hal::BufferUses::INDIRECT) .map_pass_err(scope)?; - self.check_valid_to_use(buffer.device_id.value) + self.check_valid_to_use(buffer.device.info.id()) .map_pass_err(scope)?; check_buffer_usage(buffer.usage, wgt::BufferUsages::INDIRECT) .map_pass_err(scope)?; - buffer_memory_init_actions.extend(buffer.initialization_status.create_action( - buffer_id, + buffer_memory_init_actions.extend(buffer.initialization_status.read().create_action( + buffer, offset..(offset + mem::size_of::() as u64), MemoryInitKind::NeedsInitializedMemory, )); @@ -659,26 +660,20 @@ impl RenderBundleEncoder { }, is_depth_read_only: self.is_depth_read_only, is_stencil_read_only: self.is_stencil_read_only, - device_id: Stored { - value: id::Valid(self.parent_id), - ref_count: device.life_guard.add_ref(), - }, + device: device.clone(), used: state.trackers, buffer_memory_init_actions, texture_memory_init_actions, context: self.context, - life_guard: LifeGuard::new(desc.label.borrow_or_default()), + info: ResourceInfo::new(desc.label.borrow_or_default()), discard_hal_labels: device .instance_flags .contains(wgt::InstanceFlags::DISCARD_HAL_LABELS), }) } - fn check_valid_to_use( - &self, - device_id: id::Valid, - ) -> Result<(), RenderBundleErrorInner> { - if device_id.0 != self.parent_id { + fn check_valid_to_use(&self, device_id: id::DeviceId) -> Result<(), RenderBundleErrorInner> { + if device_id != self.parent_id { return Err(RenderBundleErrorInner::NotValidToUse); } @@ -737,18 +732,19 @@ pub type RenderBundleDescriptor<'a> = wgt::RenderBundleDescriptor>; //Note: here, `RenderBundle` is just wrapping a raw stream of render commands. // The plan is to back it by an actual Vulkan secondary buffer, D3D12 Bundle, // or Metal indirect command buffer. +#[derive(Debug)] pub struct RenderBundle { // Normalized command stream. It can be executed verbatim, // without re-binding anything on the pipeline change. base: BasePass, pub(super) is_depth_read_only: bool, pub(super) is_stencil_read_only: bool, - pub(crate) device_id: Stored, + pub(crate) device: Arc>, pub(crate) used: RenderBundleScope, - pub(super) buffer_memory_init_actions: Vec, - pub(super) texture_memory_init_actions: Vec, + pub(super) buffer_memory_init_actions: Vec>, + pub(super) texture_memory_init_actions: Vec>, pub(super) context: RenderPassContext, - pub(crate) life_guard: LifeGuard, + pub(crate) info: ResourceInfo, discard_hal_labels: bool, } @@ -779,19 +775,10 @@ impl RenderBundle { /// Note that the function isn't expected to fail, generally. /// All the validation has already been done by this point. /// The only failure condition is if some of the used buffers are destroyed. - pub(super) unsafe fn execute( - &self, - raw: &mut A::CommandEncoder, - pipeline_layout_guard: &Storage< - crate::binding_model::PipelineLayout, - id::PipelineLayoutId, - >, - bind_group_guard: &Storage, id::BindGroupId>, - pipeline_guard: &Storage, id::RenderPipelineId>, - buffer_guard: &Storage, id::BufferId>, - ) -> Result<(), ExecutionError> { + pub(super) unsafe fn execute(&self, raw: &mut A::CommandEncoder) -> Result<(), ExecutionError> { + let trackers = &self.used; let mut offsets = self.base.dynamic_offsets.as_slice(); - let mut pipeline_layout_id = None::>; + let mut pipeline_layout = None::>>; if !self.discard_hal_labels { if let Some(ref label) = self.base.label { unsafe { raw.begin_debug_marker(label) }; @@ -805,22 +792,24 @@ impl RenderBundle { num_dynamic_offsets, bind_group_id, } => { - let bind_group = bind_group_guard.get(bind_group_id).unwrap(); + let bind_groups = trackers.bind_groups.read(); + let bind_group = bind_groups.get(bind_group_id).unwrap(); unsafe { raw.set_bind_group( - &pipeline_layout_guard[pipeline_layout_id.unwrap()].raw, + pipeline_layout.as_ref().unwrap().raw(), index, - &bind_group.raw, + bind_group.raw(), &offsets[..num_dynamic_offsets as usize], ) }; offsets = &offsets[num_dynamic_offsets as usize..]; } RenderCommand::SetPipeline(pipeline_id) => { - let pipeline = pipeline_guard.get(pipeline_id).unwrap(); - unsafe { raw.set_render_pipeline(&pipeline.raw) }; + let render_pipelines = trackers.render_pipelines.read(); + let pipeline = render_pipelines.get(pipeline_id).unwrap(); + unsafe { raw.set_render_pipeline(pipeline.raw()) }; - pipeline_layout_id = Some(pipeline.layout_id.value); + pipeline_layout = Some(pipeline.layout.clone()); } RenderCommand::SetIndexBuffer { buffer_id, @@ -828,12 +817,8 @@ impl RenderBundle { offset, size, } => { - let buffer = buffer_guard - .get(buffer_id) - .unwrap() - .raw - .as_ref() - .ok_or(ExecutionError::DestroyedBuffer(buffer_id))?; + let buffers = trackers.buffers.read(); + let buffer = buffers.get(buffer_id).unwrap().raw(); let bb = hal::BufferBinding { buffer, offset, @@ -847,12 +832,8 @@ impl RenderBundle { offset, size, } => { - let buffer = buffer_guard - .get(buffer_id) - .unwrap() - .raw - .as_ref() - .ok_or(ExecutionError::DestroyedBuffer(buffer_id))?; + let buffers = trackers.buffers.read(); + let buffer = buffers.get(buffer_id).unwrap().raw(); let bb = hal::BufferBinding { buffer, offset, @@ -866,8 +847,7 @@ impl RenderBundle { size_bytes, values_offset, } => { - let pipeline_layout_id = pipeline_layout_id.unwrap(); - let pipeline_layout = &pipeline_layout_guard[pipeline_layout_id]; + let pipeline_layout = pipeline_layout.as_ref().unwrap(); if let Some(values_offset) = values_offset { let values_end_offset = @@ -876,7 +856,12 @@ impl RenderBundle { [(values_offset as usize)..values_end_offset]; unsafe { - raw.set_push_constants(&pipeline_layout.raw, stages, offset, data_slice) + raw.set_push_constants( + pipeline_layout.raw(), + stages, + offset, + data_slice, + ) } } else { super::push_constant_clear( @@ -885,7 +870,7 @@ impl RenderBundle { |clear_offset, clear_data| { unsafe { raw.set_push_constants( - &pipeline_layout.raw, + pipeline_layout.raw(), stages, clear_offset, clear_data, @@ -926,12 +911,8 @@ impl RenderBundle { count: None, indexed: false, } => { - let buffer = buffer_guard - .get(buffer_id) - .unwrap() - .raw - .as_ref() - .ok_or(ExecutionError::DestroyedBuffer(buffer_id))?; + let buffers = trackers.buffers.read(); + let buffer = buffers.get(buffer_id).unwrap().raw(); unsafe { raw.draw_indirect(buffer, offset, 1) }; } RenderCommand::MultiDrawIndirect { @@ -940,12 +921,8 @@ impl RenderBundle { count: None, indexed: true, } => { - let buffer = buffer_guard - .get(buffer_id) - .unwrap() - .raw - .as_ref() - .ok_or(ExecutionError::DestroyedBuffer(buffer_id))?; + let buffers = trackers.buffers.read(); + let buffer = buffers.get(buffer_id).unwrap().raw(); unsafe { raw.draw_indexed_indirect(buffer, offset, 1) }; } RenderCommand::MultiDrawIndirect { .. } @@ -982,11 +959,15 @@ impl RenderBundle { } } -impl Resource for RenderBundle { - const TYPE: &'static str = "RenderBundle"; +impl Resource for RenderBundle { + const TYPE: ResourceType = "RenderBundle"; + + fn as_info(&self) -> &ResourceInfo { + &self.info + } - fn life_guard(&self) -> &LifeGuard { - &self.life_guard + fn as_info_mut(&mut self) -> &mut ResourceInfo { + &mut self.info } } @@ -1077,12 +1058,12 @@ impl VertexState { /// A bind group that has been set at a particular index during render bundle encoding. #[derive(Debug)] -struct BindState { +struct BindState { /// The id of the bind group set at this index. - bind_group_id: id::BindGroupId, + bind_group: Arc>, /// The layout of `group`. - layout_id: id::Valid, + layout: Arc>, /// The range of dynamic offsets for this bind group, in the original /// command stream's `BassPass::dynamic_offsets` array. @@ -1106,12 +1087,9 @@ struct VertexLimitState { } /// The bundle's current pipeline, and some cached information needed for validation. -struct PipelineState { - /// The pipeline's id. - id: id::RenderPipelineId, - - /// The id of the pipeline's layout. - layout_id: id::Valid, +struct PipelineState { + /// The pipeline + pipeline: Arc>, /// How this pipeline's vertex shader traverses each vertex buffer, indexed /// by vertex buffer slot number. @@ -1125,18 +1103,18 @@ struct PipelineState { used_bind_groups: usize, } -impl PipelineState { - fn new( - pipeline_id: id::RenderPipelineId, - pipeline: &pipeline::RenderPipeline, - layout: &binding_model::PipelineLayout, - ) -> Self { +impl PipelineState { + fn new(pipeline: &Arc>) -> Self { Self { - id: pipeline_id, - layout_id: pipeline.layout_id.value, + pipeline: pipeline.clone(), steps: pipeline.vertex_steps.to_vec(), - push_constant_ranges: layout.push_constant_ranges.iter().cloned().collect(), - used_bind_groups: layout.bind_group_layout_ids.len(), + push_constant_ranges: pipeline + .layout + .push_constant_ranges + .iter() + .cloned() + .collect(), + used_bind_groups: pipeline.layout.bind_group_layouts.len(), } } @@ -1178,10 +1156,10 @@ struct State { trackers: RenderBundleScope, /// The currently set pipeline, if any. - pipeline: Option, + pipeline: Option>, /// The bind group set at each index, if any. - bind: ArrayVec, { hal::MAX_BIND_GROUPS }>, + bind: ArrayVec>, { hal::MAX_BIND_GROUPS }>, /// The state of each vertex buffer slot. vertex: ArrayVec, { hal::MAX_VERTEX_BUFFERS }>, @@ -1200,7 +1178,7 @@ struct State { } impl State { - fn vertex_limits(&self, pipeline: &PipelineState) -> VertexLimitState { + fn vertex_limits(&self, pipeline: &PipelineState) -> VertexLimitState { let mut vert_state = VertexLimitState { vertex_limit: u32::MAX, vertex_limit_slot: 0, @@ -1231,11 +1209,11 @@ impl State { /// Return the id of the current pipeline, if any. fn pipeline_id(&self) -> Option { - self.pipeline.as_ref().map(|p| p.id) + self.pipeline.as_ref().map(|p| p.pipeline.as_info().id()) } /// Return the current pipeline state. Return an error if none is set. - fn pipeline(&self, scope: PassErrorScope) -> Result<&PipelineState, RenderBundleError> { + fn pipeline(&self, scope: PassErrorScope) -> Result<&PipelineState, RenderBundleError> { self.pipeline .as_ref() .ok_or(DrawError::MissingPipeline) @@ -1252,8 +1230,8 @@ impl State { fn set_bind_group( &mut self, slot: u32, - bind_group_id: id::BindGroupId, - layout_id: id::Valid, + bind_group: &Arc>, + layout: &Arc>, dynamic_offsets: Range, ) { // If this call wouldn't actually change this index's state, we can @@ -1261,7 +1239,7 @@ impl State { // be different.) if dynamic_offsets.is_empty() { if let Some(ref contents) = self.bind[slot as usize] { - if contents.bind_group_id == bind_group_id { + if contents.bind_group.is_equal(bind_group) { return; } } @@ -1269,8 +1247,8 @@ impl State { // Record the index's new state. self.bind[slot as usize] = Some(BindState { - bind_group_id, - layout_id, + bind_group: bind_group.clone(), + layout: layout.clone(), dynamic_offsets, is_dirty: true, }); @@ -1293,18 +1271,14 @@ impl State { /// /// - Changing the push constant ranges at all requires re-establishing /// all bind groups. - fn invalidate_bind_groups( - &mut self, - new: &PipelineState, - layout: &binding_model::PipelineLayout, - ) { + fn invalidate_bind_groups(&mut self, new: &PipelineState, layout: &PipelineLayout) { match self.pipeline { None => { // Establishing entirely new pipeline state. self.invalidate_bind_group_from(0); } Some(ref old) => { - if old.id == new.id { + if old.pipeline.is_equal(&new.pipeline) { // Everything is derived from the pipeline, so if the id has // not changed, there's no need to consider anything else. return; @@ -1314,14 +1288,12 @@ impl State { if old.push_constant_ranges != new.push_constant_ranges { self.invalidate_bind_group_from(0); } else { - let first_changed = self - .bind - .iter() - .zip(&layout.bind_group_layout_ids) - .position(|(entry, &layout_id)| match *entry { - Some(ref contents) => contents.layout_id != layout_id, + let first_changed = self.bind.iter().zip(&layout.bind_group_layouts).position( + |(entry, layout)| match *entry { + Some(ref contents) => !contents.layout.is_equal(layout), None => false, - }); + }, + ); if let Some(slot) = first_changed { self.invalidate_bind_group_from(slot); } @@ -1395,7 +1367,7 @@ impl State { let offsets = &contents.dynamic_offsets; return Some(RenderCommand::SetBindGroup { index: i.try_into().unwrap(), - bind_group_id: contents.bind_group_id, + bind_group_id: contents.bind_group.as_info().id(), num_dynamic_offsets: (offsets.end - offsets.start) as u8, }); } diff --git a/third_party/rust/wgpu-core/src/command/clear.rs b/third_party/rust/wgpu-core/src/command/clear.rs index 214060a29530b..b702c25c67f3b 100644 --- a/third_party/rust/wgpu-core/src/command/clear.rs +++ b/third_party/rust/wgpu-core/src/command/clear.rs @@ -1,4 +1,4 @@ -use std::ops::Range; +use std::{ops::Range, sync::Arc}; #[cfg(feature = "trace")] use crate::device::trace::Command as TraceCommand; @@ -7,12 +7,10 @@ use crate::{ get_lowest_common_denom, global::Global, hal_api::HalApi, - hub::Token, - id::{BufferId, CommandEncoderId, DeviceId, TextureId, Valid}, + id::{BufferId, CommandEncoderId, DeviceId, TextureId}, identity::GlobalIdentityHandlerFactory, init_tracker::{MemoryInitKind, TextureInitRange}, - resource::{Texture, TextureClearMode}, - storage, + resource::{Resource, Texture, TextureClearMode}, track::{TextureSelector, TextureTracker}, }; @@ -83,22 +81,28 @@ impl Global { log::trace!("CommandEncoder::clear_buffer {dst:?}"); let hub = A::hub(self); - let mut token = Token::root(); - let (mut cmd_buf_guard, mut token) = hub.command_buffers.write(&mut token); - let cmd_buf = CommandBuffer::get_encoder_mut(&mut *cmd_buf_guard, command_encoder_id) + + let cmd_buf = CommandBuffer::get_encoder(hub, command_encoder_id) .map_err(|_| ClearError::InvalidCommandEncoder(command_encoder_id))?; - let (buffer_guard, _) = hub.buffers.read(&mut token); + let mut cmd_buf_data = cmd_buf.data.lock(); + let cmd_buf_data = cmd_buf_data.as_mut().unwrap(); #[cfg(feature = "trace")] - if let Some(ref mut list) = cmd_buf.commands { + if let Some(ref mut list) = cmd_buf_data.commands { list.push(TraceCommand::ClearBuffer { dst, offset, size }); } - let (dst_buffer, dst_pending) = cmd_buf - .trackers - .buffers - .set_single(&*buffer_guard, dst, hal::BufferUses::COPY_DST) - .ok_or(ClearError::InvalidBuffer(dst))?; + let (dst_buffer, dst_pending) = { + let buffer_guard = hub.buffers.read(); + let dst_buffer = buffer_guard + .get(dst) + .map_err(|_| ClearError::InvalidBuffer(dst))?; + cmd_buf_data + .trackers + .buffers + .set_single(dst_buffer, hal::BufferUses::COPY_DST) + .ok_or(ClearError::InvalidBuffer(dst))? + }; let dst_raw = dst_buffer .raw .as_ref() @@ -135,16 +139,16 @@ impl Global { } // Mark dest as initialized. - cmd_buf - .buffer_memory_init_actions - .extend(dst_buffer.initialization_status.create_action( - dst, + cmd_buf_data.buffer_memory_init_actions.extend( + dst_buffer.initialization_status.read().create_action( + &dst_buffer, offset..end, MemoryInitKind::ImplicitlyInitialized, - )); + ), + ); // actual hal barrier & operation - let dst_barrier = dst_pending.map(|pending| pending.into_hal(dst_buffer)); - let cmd_buf_raw = cmd_buf.encoder.open(); + let dst_barrier = dst_pending.map(|pending| pending.into_hal(&dst_buffer)); + let cmd_buf_raw = cmd_buf_data.encoder.open(); unsafe { cmd_buf_raw.transition_buffers(dst_barrier.into_iter()); cmd_buf_raw.clear_buffer(dst_raw, offset..end); @@ -162,16 +166,14 @@ impl Global { log::trace!("CommandEncoder::clear_texture {dst:?}"); let hub = A::hub(self); - let mut token = Token::root(); - let (device_guard, mut token) = hub.devices.write(&mut token); - let (mut cmd_buf_guard, mut token) = hub.command_buffers.write(&mut token); - let cmd_buf = CommandBuffer::get_encoder_mut(&mut *cmd_buf_guard, command_encoder_id) + + let cmd_buf = CommandBuffer::get_encoder(hub, command_encoder_id) .map_err(|_| ClearError::InvalidCommandEncoder(command_encoder_id))?; - let (_, mut token) = hub.buffers.read(&mut token); // skip token - let (texture_guard, _) = hub.textures.read(&mut token); + let mut cmd_buf_data = cmd_buf.data.lock(); + let cmd_buf_data = cmd_buf_data.as_mut().unwrap(); #[cfg(feature = "trace")] - if let Some(ref mut list) = cmd_buf.commands { + if let Some(ref mut list) = cmd_buf_data.commands { list.push(TraceCommand::ClearTexture { dst, subresource_range: *subresource_range, @@ -182,7 +184,8 @@ impl Global { return Err(ClearError::MissingClearTextureFeature); } - let dst_texture = texture_guard + let dst_texture = hub + .textures .get(dst) .map_err(|_| ClearError::InvalidTexture(dst))?; @@ -220,51 +223,54 @@ impl Global { }); } - let device = &device_guard[cmd_buf.device_id.value]; + let device = &cmd_buf.device; if !device.is_valid() { - return Err(ClearError::InvalidDevice(cmd_buf.device_id.value.0)); + return Err(ClearError::InvalidDevice(cmd_buf.device.as_info().id())); } + let (encoder, tracker) = cmd_buf_data.open_encoder_and_tracker(); clear_texture( - &*texture_guard, - Valid(dst), + &dst_texture, TextureInitRange { mip_range: subresource_mip_range, layer_range: subresource_layer_range, }, - cmd_buf.encoder.open(), - &mut cmd_buf.trackers.textures, + encoder, + &mut tracker.textures, &device.alignments, - &device.zero_buffer, + device.zero_buffer.as_ref().unwrap(), ) } } pub(crate) fn clear_texture( - storage: &storage::Storage, TextureId>, - dst_texture_id: Valid, + dst_texture: &Arc>, range: TextureInitRange, encoder: &mut A::CommandEncoder, texture_tracker: &mut TextureTracker, alignments: &hal::Alignments, zero_buffer: &A::Buffer, ) -> Result<(), ClearError> { - let dst_texture = &storage[dst_texture_id]; - - let dst_raw = dst_texture - .inner + let dst_inner = dst_texture.inner(); + let dst_raw = dst_inner + .as_ref() + .unwrap() .as_raw() - .ok_or(ClearError::InvalidTexture(dst_texture_id.0))?; + .ok_or_else(|| ClearError::InvalidTexture(dst_texture.as_info().id()))?; // Issue the right barrier. - let clear_usage = match dst_texture.clear_mode { + let clear_usage = match *dst_texture.clear_mode.read() { TextureClearMode::BufferCopy => hal::TextureUses::COPY_DST, TextureClearMode::RenderPass { is_color: false, .. } => hal::TextureUses::DEPTH_STENCIL_WRITE, - TextureClearMode::RenderPass { is_color: true, .. } => hal::TextureUses::COLOR_TARGET, + TextureClearMode::Surface { .. } | TextureClearMode::RenderPass { is_color: true, .. } => { + hal::TextureUses::COLOR_TARGET + } TextureClearMode::None => { - return Err(ClearError::NoValidTextureClearMode(dst_texture_id.0)); + return Err(ClearError::NoValidTextureClearMode( + dst_texture.as_info().id(), + )); } }; @@ -287,15 +293,15 @@ pub(crate) fn clear_texture( // clear_texture api in order to remove this check and call the cheaper // change_replace_tracked whenever possible. let dst_barrier = texture_tracker - .set_single(dst_texture, dst_texture_id.0, selector, clear_usage) + .set_single(dst_texture, selector, clear_usage) .unwrap() - .map(|pending| pending.into_hal(dst_texture)); + .map(|pending| pending.into_hal(dst_inner.as_ref().unwrap())); unsafe { encoder.transition_textures(dst_barrier.into_iter()); } // Record actual clearing - match dst_texture.clear_mode { + match *dst_texture.clear_mode.read() { TextureClearMode::BufferCopy => clear_texture_via_buffer_copies::( &dst_texture.desc, alignments, @@ -304,17 +310,22 @@ pub(crate) fn clear_texture( encoder, dst_raw, ), + TextureClearMode::Surface { .. } => { + clear_texture_via_render_passes(dst_texture, range, true, encoder)? + } TextureClearMode::RenderPass { is_color, .. } => { clear_texture_via_render_passes(dst_texture, range, is_color, encoder)? } TextureClearMode::None => { - return Err(ClearError::NoValidTextureClearMode(dst_texture_id.0)); + return Err(ClearError::NoValidTextureClearMode( + dst_texture.as_info().id(), + )); } } Ok(()) } -fn clear_texture_via_buffer_copies( +fn clear_texture_via_buffer_copies( texture_desc: &wgt::TextureDescriptor<(), Vec>, alignments: &hal::Alignments, zero_buffer: &A::Buffer, // Buffer of size device::ZERO_BUFFER_SIZE @@ -406,7 +417,7 @@ fn clear_texture_via_buffer_copies( } } -fn clear_texture_via_render_passes( +fn clear_texture_via_render_passes( dst_texture: &Texture, range: TextureInitRange, is_color: bool, @@ -419,6 +430,7 @@ fn clear_texture_via_render_passes( height: dst_texture.desc.size.height, depth_or_array_layers: 1, // Only one layer is cleared at a time. }; + let clear_mode = &dst_texture.clear_mode.read(); for mip_level in range.mip_range { let extent = extent_base.mip_level_size(mip_level, dst_texture.desc.dimension); @@ -427,7 +439,12 @@ fn clear_texture_via_render_passes( let (color_attachments, depth_stencil_attachment) = if is_color { color_attachments_tmp = [Some(hal::ColorAttachment { target: hal::Attachment { - view: dst_texture.get_clear_view(mip_level, depth_or_layer), + view: Texture::get_clear_view( + clear_mode, + &dst_texture.desc, + mip_level, + depth_or_layer, + ), usage: hal::TextureUses::COLOR_TARGET, }, resolve_target: None, @@ -440,7 +457,12 @@ fn clear_texture_via_render_passes( &[][..], Some(hal::DepthStencilAttachment { target: hal::Attachment { - view: dst_texture.get_clear_view(mip_level, depth_or_layer), + view: Texture::get_clear_view( + clear_mode, + &dst_texture.desc, + mip_level, + depth_or_layer, + ), usage: hal::TextureUses::DEPTH_STENCIL_WRITE, }, depth_ops: hal::AttachmentOps::STORE, diff --git a/third_party/rust/wgpu-core/src/command/compute.rs b/third_party/rust/wgpu-core/src/command/compute.rs index 4918afdc4993f..c457a33186731 100644 --- a/third_party/rust/wgpu-core/src/command/compute.rs +++ b/third_party/rust/wgpu-core/src/command/compute.rs @@ -1,7 +1,7 @@ +use crate::resource::Resource; use crate::{ binding_model::{ - BindError, BindGroup, BindGroupLayouts, LateMinBufferBindingSizeMismatch, - PushConstantUploadError, + BindError, BindGroup, LateMinBufferBindingSizeMismatch, PushConstantUploadError, }, command::{ bind::Binder, @@ -14,14 +14,12 @@ use crate::{ error::{ErrorFormatter, PrettyError}, global::Global, hal_api::HalApi, - hal_label, - hub::Token, - id, + hal_label, id, id::DeviceId, identity::GlobalIdentityHandlerFactory, init_tracker::MemoryInitKind, pipeline, - resource::{self, Buffer, Texture}, + resource::{self}, storage::Storage, track::{Tracker, UsageConflict, UsageScope}, validation::{check_buffer_usage, MissingBufferUsageError}, @@ -281,15 +279,15 @@ where } struct State { - binder: Binder, + binder: Binder, pipeline: Option, scope: UsageScope, debug_scope_depth: u32, } impl State { - fn is_ready(&self, bind_group_layouts: &BindGroupLayouts) -> Result<(), DispatchError> { - let bind_mask = self.binder.invalid_mask(bind_group_layouts); + fn is_ready(&self) -> Result<(), DispatchError> { + let bind_mask = self.binder.invalid_mask(); if bind_mask != 0 { //let (expected, provided) = self.binder.entries[index as usize].info(); return Err(DispatchError::IncompatibleBindGroup { @@ -311,15 +309,10 @@ impl State { raw_encoder: &mut A::CommandEncoder, base_trackers: &mut Tracker, bind_group_guard: &Storage, id::BindGroupId>, - buffer_guard: &Storage, id::BufferId>, - texture_guard: &Storage, id::TextureId>, - indirect_buffer: Option>, + indirect_buffer: Option, ) -> Result<(), UsageConflict> { for id in self.binder.list_active() { - unsafe { - self.scope - .merge_bind_group(texture_guard, &bind_group_guard[id].used)? - }; + unsafe { self.scope.merge_bind_group(&bind_group_guard[id].used)? }; // Note: stateless trackers are not merged: the lifetime reference // is held to the bind group itself. } @@ -327,7 +320,6 @@ impl State { for id in self.binder.list_active() { unsafe { base_trackers.set_and_remove_from_usage_scope_sparse( - texture_guard, &mut self.scope, &bind_group_guard[id].used, ) @@ -343,7 +335,7 @@ impl State { log::trace!("Encoding dispatch barriers"); - CommandBuffer::drain_barriers(raw_encoder, base_trackers, buffer_guard, texture_guard); + CommandBuffer::drain_barriers(raw_encoder, base_trackers); Ok(()) } } @@ -374,49 +366,46 @@ impl Global { let init_scope = PassErrorScope::Pass(encoder_id); let hub = A::hub(self); - let mut token = Token::root(); - - let (device_guard, mut token) = hub.devices.read(&mut token); - - let (mut cmd_buf_guard, mut token) = hub.command_buffers.write(&mut token); - // Spell out the type, to placate rust-analyzer. - // https://github.com/rust-lang/rust-analyzer/issues/12247 - let cmd_buf: &mut CommandBuffer = - CommandBuffer::get_encoder_mut(&mut *cmd_buf_guard, encoder_id) - .map_pass_err(init_scope)?; - - // We automatically keep extending command buffers over time, and because - // we want to insert a command buffer _before_ what we're about to record, - // we need to make sure to close the previous one. - cmd_buf.encoder.close(); - // We will reset this to `Recording` if we succeed, acts as a fail-safe. - cmd_buf.status = CommandEncoderStatus::Error; - let raw = cmd_buf.encoder.open(); - let device = &device_guard[cmd_buf.device_id.value]; + let cmd_buf = CommandBuffer::get_encoder(hub, encoder_id).map_pass_err(init_scope)?; + let device = &cmd_buf.device; if !device.is_valid() { return Err(ComputePassErrorInner::InvalidDevice( - cmd_buf.device_id.value.0, + cmd_buf.device.as_info().id(), )) .map_pass_err(init_scope); } + let mut cmd_buf_data = cmd_buf.data.lock(); + let cmd_buf_data = cmd_buf_data.as_mut().unwrap(); + #[cfg(feature = "trace")] - if let Some(ref mut list) = cmd_buf.commands { + if let Some(ref mut list) = cmd_buf_data.commands { list.push(crate::device::trace::Command::RunComputePass { base: BasePass::from_ref(base), timestamp_writes: timestamp_writes.cloned(), }); } - let (_, mut token) = hub.render_bundles.read(&mut token); - let (pipeline_layout_guard, mut token) = hub.pipeline_layouts.read(&mut token); - let (bind_group_guard, mut token) = hub.bind_groups.read(&mut token); - let (pipeline_guard, mut token) = hub.compute_pipelines.read(&mut token); - let (query_set_guard, mut token) = hub.query_sets.read(&mut token); - let (bind_group_layout_guard, mut token) = hub.bind_group_layouts.read(&mut token); - let (buffer_guard, mut token) = hub.buffers.read(&mut token); - let (texture_guard, _) = hub.textures.read(&mut token); + let encoder = &mut cmd_buf_data.encoder; + let status = &mut cmd_buf_data.status; + let tracker = &mut cmd_buf_data.trackers; + let buffer_memory_init_actions = &mut cmd_buf_data.buffer_memory_init_actions; + let texture_memory_actions = &mut cmd_buf_data.texture_memory_actions; + + // We automatically keep extending command buffers over time, and because + // we want to insert a command buffer _before_ what we're about to record, + // we need to make sure to close the previous one. + encoder.close(); + // will be reset to true if recording is done without errors + *status = CommandEncoderStatus::Error; + let raw = encoder.open(); + + let bind_group_guard = hub.bind_groups.read(); + let pipeline_guard = hub.compute_pipelines.read(); + let query_set_guard = hub.query_sets.read(); + let buffer_guard = hub.buffers.read(); + let texture_guard = hub.textures.read(); let mut state = State { binder: Binder::new(), @@ -430,8 +419,7 @@ impl Global { let mut active_query = None; let timestamp_writes = if let Some(tw) = timestamp_writes { - let query_set: &resource::QuerySet = cmd_buf - .trackers + let query_set: &resource::QuerySet = tracker .query_sets .add_single(&*query_set_guard, tw.query_set) .ok_or(ComputePassErrorInner::InvalidQuerySet(tw.query_set)) @@ -452,12 +440,12 @@ impl Global { // But no point in erroring over that nuance here! if let Some(range) = range { unsafe { - raw.reset_queries(&query_set.raw, range); + raw.reset_queries(query_set.raw.as_ref().unwrap(), range); } } Some(hal::ComputePassTimestampWrites { - query_set: &query_set.raw, + query_set: query_set.raw.as_ref().unwrap(), beginning_of_pass_write_index: tw.beginning_of_pass_write_index, end_of_pass_write_index: tw.end_of_pass_write_index, }) @@ -465,7 +453,7 @@ impl Global { None }; - cmd_buf.trackers.set_size( + tracker.set_size( Some(&*buffer_guard), Some(&*texture_guard), None, @@ -521,8 +509,7 @@ impl Global { ); dynamic_offset_count += num_dynamic_offsets as usize; - let bind_group: &BindGroup = cmd_buf - .trackers + let bind_group = tracker .bind_groups .add_single(&*bind_group_guard, bind_group_id) .ok_or(ComputePassErrorInner::InvalidBindGroup(bind_group_id)) @@ -531,42 +518,43 @@ impl Global { .validate_dynamic_bindings(index, &temp_offsets, &cmd_buf.limits) .map_pass_err(scope)?; - cmd_buf.buffer_memory_init_actions.extend( - bind_group.used_buffer_ranges.iter().filter_map( - |action| match buffer_guard.get(action.id) { - Ok(buffer) => buffer.initialization_status.check_action(action), - Err(_) => None, - }, - ), + buffer_memory_init_actions.extend( + bind_group + .used_buffer_ranges + .read() + .iter() + .filter_map(|action| { + action + .buffer + .initialization_status + .read() + .check_action(action) + }), ); - for action in bind_group.used_texture_ranges.iter() { - pending_discard_init_fixups.extend( - cmd_buf - .texture_memory_actions - .register_init_action(action, &texture_guard), - ); + for action in bind_group.used_texture_ranges.read().iter() { + pending_discard_init_fixups + .extend(texture_memory_actions.register_init_action(action)); } - let pipeline_layout_id = state.binder.pipeline_layout_id; - let entries = state.binder.assign_group( - index as usize, - id::Valid(bind_group_id), - bind_group, - &temp_offsets, - ); - if !entries.is_empty() { - let pipeline_layout = - &pipeline_layout_guard[pipeline_layout_id.unwrap()].raw; + let pipeline_layout = state.binder.pipeline_layout.clone(); + let entries = + state + .binder + .assign_group(index as usize, bind_group, &temp_offsets); + if !entries.is_empty() && pipeline_layout.is_some() { + let pipeline_layout = pipeline_layout.as_ref().unwrap().raw(); for (i, e) in entries.iter().enumerate() { - let raw_bg = &bind_group_guard[e.group_id.as_ref().unwrap().value].raw; - unsafe { - raw.set_bind_group( - pipeline_layout, - index + i as u32, - raw_bg, - &e.dynamic_offsets, - ); + if let Some(group) = e.group.as_ref() { + let raw_bg = group.raw(); + unsafe { + raw.set_bind_group( + pipeline_layout, + index + i as u32, + raw_bg, + &e.dynamic_offsets, + ); + } } } } @@ -576,44 +564,48 @@ impl Global { state.pipeline = Some(pipeline_id); - let pipeline: &pipeline::ComputePipeline = cmd_buf - .trackers + let pipeline: &pipeline::ComputePipeline = tracker .compute_pipelines .add_single(&*pipeline_guard, pipeline_id) .ok_or(ComputePassErrorInner::InvalidPipeline(pipeline_id)) .map_pass_err(scope)?; unsafe { - raw.set_compute_pipeline(&pipeline.raw); + raw.set_compute_pipeline(pipeline.raw()); } // Rebind resources - if state.binder.pipeline_layout_id != Some(pipeline.layout_id.value) { - let pipeline_layout = &pipeline_layout_guard[pipeline.layout_id.value]; - + if state.binder.pipeline_layout.is_none() + || !state + .binder + .pipeline_layout + .as_ref() + .unwrap() + .is_equal(&pipeline.layout) + { let (start_index, entries) = state.binder.change_pipeline_layout( - &*pipeline_layout_guard, - pipeline.layout_id.value, + &pipeline.layout, &pipeline.late_sized_buffer_groups, ); if !entries.is_empty() { for (i, e) in entries.iter().enumerate() { - let raw_bg = - &bind_group_guard[e.group_id.as_ref().unwrap().value].raw; - unsafe { - raw.set_bind_group( - &pipeline_layout.raw, - start_index as u32 + i as u32, - raw_bg, - &e.dynamic_offsets, - ); + if let Some(group) = e.group.as_ref() { + let raw_bg = group.raw(); + unsafe { + raw.set_bind_group( + pipeline.layout.raw(), + start_index as u32 + i as u32, + raw_bg, + &e.dynamic_offsets, + ); + } } } } // Clear push constant ranges let non_overlapping = super::bind::compute_nonoverlapping_ranges( - &pipeline_layout.push_constant_ranges, + &pipeline.layout.push_constant_ranges, ); for range in non_overlapping { let offset = range.range.start; @@ -623,7 +615,7 @@ impl Global { size_bytes, |clear_offset, clear_data| unsafe { raw.set_push_constants( - &pipeline_layout.raw, + pipeline.layout.raw(), wgt::ShaderStages::COMPUTE, clear_offset, clear_data, @@ -646,15 +638,15 @@ impl Global { let data_slice = &base.push_constant_data[(values_offset as usize)..values_end_offset]; - let pipeline_layout_id = state + let pipeline_layout = state .binder - .pipeline_layout_id + .pipeline_layout + .as_ref() //TODO: don't error here, lazily update the push constants .ok_or(ComputePassErrorInner::Dispatch( DispatchError::MissingPipeline, )) .map_pass_err(scope)?; - let pipeline_layout = &pipeline_layout_guard[pipeline_layout_id]; pipeline_layout .validate_push_constant_ranges( @@ -666,7 +658,7 @@ impl Global { unsafe { raw.set_push_constants( - &pipeline_layout.raw, + pipeline_layout.raw(), wgt::ShaderStages::COMPUTE, offset, data_slice, @@ -678,19 +670,10 @@ impl Global { indirect: false, pipeline: state.pipeline, }; + state.is_ready().map_pass_err(scope)?; state - .is_ready(&*bind_group_layout_guard) - .map_pass_err(scope)?; - state - .flush_states( - raw, - &mut intermediate_trackers, - &*bind_group_guard, - &*buffer_guard, - &*texture_guard, - None, - ) + .flush_states(raw, &mut intermediate_trackers, &*bind_group_guard, None) .map_pass_err(scope)?; let groups_size_limit = cmd_buf.limits.max_compute_workgroups_per_dimension; @@ -718,15 +701,13 @@ impl Global { pipeline: state.pipeline, }; - state - .is_ready(&*bind_group_layout_guard) - .map_pass_err(scope)?; + state.is_ready().map_pass_err(scope)?; device .require_downlevel_flags(wgt::DownlevelFlags::INDIRECT_EXECUTION) .map_pass_err(scope)?; - let indirect_buffer: &Buffer = state + let indirect_buffer = state .scope .buffers .merge_single(&*buffer_guard, buffer_id, hal::BufferUses::INDIRECT) @@ -752,9 +733,9 @@ impl Global { let stride = 3 * 4; // 3 integers, x/y/z group size - cmd_buf.buffer_memory_init_actions.extend( - indirect_buffer.initialization_status.create_action( - buffer_id, + buffer_memory_init_actions.extend( + indirect_buffer.initialization_status.read().create_action( + indirect_buffer, offset..(offset + stride), MemoryInitKind::NeedsInitializedMemory, ), @@ -765,9 +746,7 @@ impl Global { raw, &mut intermediate_trackers, &*bind_group_guard, - &*buffer_guard, - &*texture_guard, - Some(id::Valid(buffer_id)), + Some(buffer_id), ) .map_pass_err(scope)?; unsafe { @@ -819,8 +798,7 @@ impl Global { .require_features(wgt::Features::TIMESTAMP_QUERY_INSIDE_PASSES) .map_pass_err(scope)?; - let query_set: &resource::QuerySet = cmd_buf - .trackers + let query_set: &resource::QuerySet = tracker .query_sets .add_single(&*query_set_guard, query_set_id) .ok_or(ComputePassErrorInner::InvalidQuerySet(query_set_id)) @@ -836,8 +814,7 @@ impl Global { } => { let scope = PassErrorScope::BeginPipelineStatisticsQuery; - let query_set: &resource::QuerySet = cmd_buf - .trackers + let query_set: &resource::QuerySet = tracker .query_sets .add_single(&*query_set_guard, query_set_id) .ok_or(ComputePassErrorInner::InvalidQuerySet(query_set_id)) @@ -865,33 +842,27 @@ impl Global { unsafe { raw.end_compute_pass(); } + // We've successfully recorded the compute pass, bring the // command buffer out of the error state. - cmd_buf.status = CommandEncoderStatus::Recording; + *status = CommandEncoderStatus::Recording; // Stop the current command buffer. - cmd_buf.encoder.close(); + encoder.close(); // Create a new command buffer, which we will insert _before_ the body of the compute pass. // // Use that buffer to insert barriers and clear discarded images. - let transit = cmd_buf.encoder.open(); + let transit = encoder.open(); fixup_discarded_surfaces( pending_discard_init_fixups.into_iter(), transit, - &texture_guard, - &mut cmd_buf.trackers.textures, + &mut tracker.textures, device, ); - CommandBuffer::insert_barriers_from_tracker( - transit, - &mut cmd_buf.trackers, - &intermediate_trackers, - &*buffer_guard, - &*texture_guard, - ); + CommandBuffer::insert_barriers_from_tracker(transit, tracker, &intermediate_trackers); // Close the command buffer, and swap it with the previous. - cmd_buf.encoder.close_and_swap(); + encoder.close_and_swap(); Ok(()) } diff --git a/third_party/rust/wgpu-core/src/command/memory_init.rs b/third_party/rust/wgpu-core/src/command/memory_init.rs index e8d5f73139c94..f10c85e2be273 100644 --- a/third_party/rust/wgpu-core/src/command/memory_init.rs +++ b/third_party/rust/wgpu-core/src/command/memory_init.rs @@ -1,14 +1,12 @@ -use std::{collections::hash_map::Entry, ops::Range, vec::Drain}; +use std::{collections::hash_map::Entry, ops::Range, sync::Arc, vec::Drain}; use hal::CommandEncoder; use crate::{ device::Device, hal_api::HalApi, - id::{self, TextureId}, init_tracker::*, - resource::{Buffer, Texture}, - storage::Storage, + resource::{Resource, Texture}, track::{TextureTracker, Tracker}, FastHashMap, }; @@ -18,31 +16,39 @@ use super::{clear::clear_texture, BakedCommands, DestroyedBufferError, Destroyed /// Surface that was discarded by `StoreOp::Discard` of a preceding renderpass. /// Any read access to this surface needs to be preceded by a texture initialization. #[derive(Clone)] -pub(crate) struct TextureSurfaceDiscard { - pub texture: TextureId, +pub(crate) struct TextureSurfaceDiscard { + pub texture: Arc>, pub mip_level: u32, pub layer: u32, } -pub(crate) type SurfacesInDiscardState = Vec; +pub(crate) type SurfacesInDiscardState = Vec>; -#[derive(Default)] -pub(crate) struct CommandBufferTextureMemoryActions { +pub(crate) struct CommandBufferTextureMemoryActions { /// The tracker actions that we need to be executed before the command /// buffer is executed. - init_actions: Vec, + init_actions: Vec>, /// All the discards that haven't been followed by init again within the /// command buffer i.e. everything in this list resets the texture init /// state *after* the command buffer execution - discards: Vec, + discards: Vec>, } -impl CommandBufferTextureMemoryActions { - pub(crate) fn drain_init_actions(&mut self) -> Drain { +impl Default for CommandBufferTextureMemoryActions { + fn default() -> Self { + Self { + init_actions: Default::default(), + discards: Default::default(), + } + } +} + +impl CommandBufferTextureMemoryActions { + pub(crate) fn drain_init_actions(&mut self) -> Drain> { self.init_actions.drain(..) } - pub(crate) fn discard(&mut self, discard: TextureSurfaceDiscard) { + pub(crate) fn discard(&mut self, discard: TextureSurfaceDiscard) { self.discards.push(discard); } @@ -50,11 +56,10 @@ impl CommandBufferTextureMemoryActions { // Returns previously discarded surface that need to be initialized *immediately* now. // Only returns a non-empty list if action is MemoryInitKind::NeedsInitializedMemory. #[must_use] - pub(crate) fn register_init_action( + pub(crate) fn register_init_action( &mut self, - action: &TextureInitTrackerAction, - texture_guard: &Storage, TextureId>, - ) -> SurfacesInDiscardState { + action: &TextureInitTrackerAction, + ) -> SurfacesInDiscardState { let mut immediately_necessary_clears = SurfacesInDiscardState::new(); // Note that within a command buffer we may stack arbitrary memory init @@ -64,18 +69,20 @@ impl CommandBufferTextureMemoryActions { // We don't need to add MemoryInitKind::NeedsInitializedMemory to // init_actions if a surface is part of the discard list. But that would // mean splitting up the action which is more than we'd win here. - self.init_actions - .extend(match texture_guard.get(action.id) { - Ok(texture) => texture.initialization_status.check_action(action), - Err(_) => return immediately_necessary_clears, // texture no longer exists - }); + self.init_actions.extend( + action + .texture + .initialization_status + .read() + .check_action(action), + ); // We expect very few discarded surfaces at any point in time which is // why a simple linear search is likely best. (i.e. most of the time // self.discards is empty!) let init_actions = &mut self.init_actions; self.discards.retain(|discarded_surface| { - if discarded_surface.texture == action.id + if discarded_surface.texture.as_info().id() == action.texture.as_info().id() && action.range.layer_range.contains(&discarded_surface.layer) && action .range @@ -89,7 +96,7 @@ impl CommandBufferTextureMemoryActions { // because it might have been uninitialized prior to // discarding init_actions.push(TextureInitTrackerAction { - id: discarded_surface.texture, + texture: discarded_surface.texture.clone(), range: TextureInitRange { mip_range: discarded_surface.mip_level ..(discarded_surface.mip_level + 1), @@ -109,20 +116,16 @@ impl CommandBufferTextureMemoryActions { // Shortcut for register_init_action when it is known that the action is an // implicit init, not requiring any immediate resource init. - pub(crate) fn register_implicit_init( + pub(crate) fn register_implicit_init( &mut self, - id: id::Valid, + texture: &Arc>, range: TextureInitRange, - texture_guard: &Storage, TextureId>, ) { - let must_be_empty = self.register_init_action( - &TextureInitTrackerAction { - id: id.0, - range, - kind: MemoryInitKind::ImplicitlyInitialized, - }, - texture_guard, - ); + let must_be_empty = self.register_init_action(&TextureInitTrackerAction { + texture: texture.clone(), + range, + kind: MemoryInitKind::ImplicitlyInitialized, + }); assert!(must_be_empty.is_empty()); } } @@ -133,18 +136,16 @@ impl CommandBufferTextureMemoryActions { // Takes care of barriers as well! pub(crate) fn fixup_discarded_surfaces< A: HalApi, - InitIter: Iterator, + InitIter: Iterator>, >( inits: InitIter, encoder: &mut A::CommandEncoder, - texture_guard: &Storage, TextureId>, texture_tracker: &mut TextureTracker, device: &Device, ) { for init in inits { clear_texture( - texture_guard, - id::Valid(init.texture), + &init.texture, TextureInitRange { mip_range: init.mip_level..(init.mip_level + 1), layer_range: init.layer..(init.layer + 1), @@ -152,7 +153,7 @@ pub(crate) fn fixup_discarded_surfaces< encoder, texture_tracker, &device.alignments, - &device.zero_buffer, + device.zero_buffer.as_ref().unwrap(), ) .unwrap(); } @@ -164,16 +165,13 @@ impl BakedCommands { pub(crate) fn initialize_buffer_memory( &mut self, device_tracker: &mut Tracker, - buffer_guard: &mut Storage, id::BufferId>, ) -> Result<(), DestroyedBufferError> { // Gather init ranges for each buffer so we can collapse them. // It is not possible to do this at an earlier point since previously // executed command buffer change the resource init state. let mut uninitialized_ranges_per_buffer = FastHashMap::default(); for buffer_use in self.buffer_memory_init_actions.drain(..) { - let buffer = buffer_guard - .get_mut(buffer_use.id) - .map_err(|_| DestroyedBufferError(buffer_use.id))?; + let mut initialization_status = buffer_use.buffer.initialization_status.write(); // align the end to 4 let end_remainder = buffer_use.range.end % wgt::COPY_BUFFER_ALIGNMENT; @@ -182,28 +180,27 @@ impl BakedCommands { } else { buffer_use.range.end + wgt::COPY_BUFFER_ALIGNMENT - end_remainder }; - let uninitialized_ranges = buffer - .initialization_status - .drain(buffer_use.range.start..end); + let uninitialized_ranges = initialization_status.drain(buffer_use.range.start..end); match buffer_use.kind { MemoryInitKind::ImplicitlyInitialized => {} MemoryInitKind::NeedsInitializedMemory => { - match uninitialized_ranges_per_buffer.entry(buffer_use.id) { + match uninitialized_ranges_per_buffer.entry(buffer_use.buffer.as_info().id()) { Entry::Vacant(e) => { - e.insert( + e.insert(( + buffer_use.buffer.clone(), uninitialized_ranges.collect::>>(), - ); + )); } Entry::Occupied(mut e) => { - e.get_mut().extend(uninitialized_ranges); + e.get_mut().1.extend(uninitialized_ranges); } } } } } - for (buffer_id, mut ranges) in uninitialized_ranges_per_buffer { + for (buffer_id, (buffer, mut ranges)) in uninitialized_ranges_per_buffer { // Collapse touching ranges. ranges.sort_by_key(|r| r.start); for i in (1..ranges.len()).rev() { @@ -222,19 +219,16 @@ impl BakedCommands { // must already know about it. let transition = device_tracker .buffers - .set_single(buffer_guard, buffer_id, hal::BufferUses::COPY_DST) + .set_single(&buffer, hal::BufferUses::COPY_DST) .unwrap() .1; - let buffer = buffer_guard - .get_mut(buffer_id) - .map_err(|_| DestroyedBufferError(buffer_id))?; let raw_buf = buffer.raw.as_ref().ok_or(DestroyedBufferError(buffer_id))?; unsafe { self.encoder.transition_buffers( transition - .map(|pending| pending.into_hal(buffer)) + .map(|pending| pending.into_hal(&buffer)) .into_iter(), ); } @@ -270,18 +264,13 @@ impl BakedCommands { pub(crate) fn initialize_texture_memory( &mut self, device_tracker: &mut Tracker, - texture_guard: &mut Storage, TextureId>, device: &Device, ) -> Result<(), DestroyedTextureError> { let mut ranges: Vec = Vec::new(); for texture_use in self.texture_memory_actions.drain_init_actions() { - let texture = texture_guard - .get_mut(texture_use.id) - .map_err(|_| DestroyedTextureError(texture_use.id))?; - + let mut initialization_status = texture_use.texture.initialization_status.write(); let use_range = texture_use.range; - let affected_mip_trackers = texture - .initialization_status + let affected_mip_trackers = initialization_status .mips .iter_mut() .enumerate() @@ -309,13 +298,12 @@ impl BakedCommands { // TODO: Could we attempt some range collapsing here? for range in ranges.drain(..) { clear_texture( - texture_guard, - id::Valid(texture_use.id), + &texture_use.texture, range, &mut self.encoder, &mut device_tracker.textures, &device.alignments, - &device.zero_buffer, + device.zero_buffer.as_ref().unwrap(), ) .unwrap(); } @@ -325,11 +313,10 @@ impl BakedCommands { // cmdbuf start, we discard init states for textures it left discarded // after its execution. for surface_discard in self.texture_memory_actions.discards.iter() { - let texture = texture_guard - .get_mut(surface_discard.texture) - .map_err(|_| DestroyedTextureError(surface_discard.texture))?; - texture + surface_discard + .texture .initialization_status + .write() .discard(surface_discard.mip_level, surface_discard.layer); } diff --git a/third_party/rust/wgpu-core/src/command/mod.rs b/third_party/rust/wgpu-core/src/command/mod.rs index 843803a0f6314..2c088b7a37eaa 100644 --- a/third_party/rust/wgpu-core/src/command/mod.rs +++ b/third_party/rust/wgpu-core/src/command/mod.rs @@ -9,6 +9,7 @@ mod render; mod transfer; use std::slice; +use std::sync::Arc; pub(crate) use self::clear::clear_texture; pub use self::{ @@ -17,21 +18,18 @@ pub use self::{ use self::memory_init::CommandBufferTextureMemoryActions; +use crate::device::Device; use crate::error::{ErrorFormatter, PrettyError}; +use crate::hub::Hub; +use crate::id::CommandBufferId; + use crate::init_tracker::BufferInitTrackerAction; +use crate::resource::{Resource, ResourceInfo, ResourceType}; use crate::track::{Tracker, UsageScope}; -use crate::{ - global::Global, - hal_api::HalApi, - hub::Token, - id, - identity::GlobalIdentityHandlerFactory, - resource::{Buffer, Texture}, - storage::Storage, - Label, Stored, -}; +use crate::{global::Global, hal_api::HalApi, id, identity::GlobalIdentityHandlerFactory, Label}; use hal::CommandEncoder as _; +use parking_lot::Mutex; use thiserror::Error; #[cfg(feature = "trace")] @@ -40,13 +38,13 @@ use crate::device::trace::Command as TraceCommand; const PUSH_CONSTANT_CLEAR_ARRAY: &[u32] = &[0_u32; 64]; #[derive(Debug)] -enum CommandEncoderStatus { +pub(crate) enum CommandEncoderStatus { Recording, Finished, Error, } -struct CommandEncoder { +pub(crate) struct CommandEncoder { raw: A::CommandEncoder, list: Vec, is_open: bool, @@ -54,7 +52,7 @@ struct CommandEncoder { } //TODO: handle errors better -impl CommandEncoder { +impl CommandEncoder { /// Closes the live encoder fn close_and_swap(&mut self) { if self.is_open { @@ -98,58 +96,93 @@ pub struct BakedCommands { pub(crate) encoder: A::CommandEncoder, pub(crate) list: Vec, pub(crate) trackers: Tracker, - buffer_memory_init_actions: Vec, - texture_memory_actions: CommandBufferTextureMemoryActions, + buffer_memory_init_actions: Vec>, + texture_memory_actions: CommandBufferTextureMemoryActions, } pub(crate) struct DestroyedBufferError(pub id::BufferId); pub(crate) struct DestroyedTextureError(pub id::TextureId); -pub struct CommandBuffer { +pub struct CommandBufferMutable { encoder: CommandEncoder, status: CommandEncoderStatus, - pub(crate) device_id: Stored, pub(crate) trackers: Tracker, - buffer_memory_init_actions: Vec, - texture_memory_actions: CommandBufferTextureMemoryActions, + buffer_memory_init_actions: Vec>, + texture_memory_actions: CommandBufferTextureMemoryActions, pub(crate) pending_query_resets: QueryResetMap, - limits: wgt::Limits, - support_clear_texture: bool, #[cfg(feature = "trace")] pub(crate) commands: Option>, } +impl CommandBufferMutable { + pub(crate) fn open_encoder_and_tracker(&mut self) -> (&mut A::CommandEncoder, &mut Tracker) { + let encoder = self.encoder.open(); + let tracker = &mut self.trackers; + (encoder, tracker) + } +} + +pub struct CommandBuffer { + pub(crate) device: Arc>, + limits: wgt::Limits, + support_clear_texture: bool, + pub(crate) info: ResourceInfo, + pub(crate) data: Mutex>>, +} + +impl Drop for CommandBuffer { + fn drop(&mut self) { + if self.data.lock().is_none() { + return; + } + log::info!("Destroying CommandBuffer {:?}", self.info.label()); + let mut baked = self.extract_baked_commands(); + unsafe { + baked.encoder.reset_all(baked.list.into_iter()); + } + unsafe { + use hal::Device; + self.device.raw().destroy_command_encoder(baked.encoder); + } + } +} + impl CommandBuffer { pub(crate) fn new( encoder: A::CommandEncoder, - device_id: Stored, - limits: wgt::Limits, - _downlevel: wgt::DownlevelCapabilities, - features: wgt::Features, + device: &Arc>, #[cfg(feature = "trace")] enable_tracing: bool, label: Option, ) -> Self { CommandBuffer { - encoder: CommandEncoder { - raw: encoder, - is_open: false, - list: Vec::new(), - label, - }, - status: CommandEncoderStatus::Recording, - device_id, - trackers: Tracker::new(), - buffer_memory_init_actions: Default::default(), - texture_memory_actions: Default::default(), - pending_query_resets: QueryResetMap::new(), - limits, - support_clear_texture: features.contains(wgt::Features::CLEAR_TEXTURE), - #[cfg(feature = "trace")] - commands: if enable_tracing { - Some(Vec::new()) - } else { - None - }, + device: device.clone(), + limits: device.limits.clone(), + support_clear_texture: device.features.contains(wgt::Features::CLEAR_TEXTURE), + info: ResourceInfo::new( + label + .as_ref() + .unwrap_or(&String::from("")) + .as_str(), + ), + data: Mutex::new(Some(CommandBufferMutable { + encoder: CommandEncoder { + raw: encoder, + is_open: false, + list: Vec::new(), + label, + }, + status: CommandEncoderStatus::Recording, + trackers: Tracker::new(), + buffer_memory_init_actions: Default::default(), + texture_memory_actions: Default::default(), + pending_query_resets: QueryResetMap::new(), + #[cfg(feature = "trace")] + commands: if enable_tracing { + Some(Vec::new()) + } else { + None + }, + })), } } @@ -157,50 +190,37 @@ impl CommandBuffer { raw: &mut A::CommandEncoder, base: &mut Tracker, head: &Tracker, - buffer_guard: &Storage, id::BufferId>, - texture_guard: &Storage, id::TextureId>, ) { profiling::scope!("insert_barriers"); base.buffers.set_from_tracker(&head.buffers); - base.textures - .set_from_tracker(texture_guard, &head.textures); + base.textures.set_from_tracker(&head.textures); - Self::drain_barriers(raw, base, buffer_guard, texture_guard); + Self::drain_barriers(raw, base); } pub(crate) fn insert_barriers_from_scope( raw: &mut A::CommandEncoder, base: &mut Tracker, head: &UsageScope, - buffer_guard: &Storage, id::BufferId>, - texture_guard: &Storage, id::TextureId>, ) { profiling::scope!("insert_barriers"); base.buffers.set_from_usage_scope(&head.buffers); - base.textures - .set_from_usage_scope(texture_guard, &head.textures); + base.textures.set_from_usage_scope(&head.textures); - Self::drain_barriers(raw, base, buffer_guard, texture_guard); + Self::drain_barriers(raw, base); } - pub(crate) fn drain_barriers( - raw: &mut A::CommandEncoder, - base: &mut Tracker, - buffer_guard: &Storage, id::BufferId>, - texture_guard: &Storage, id::TextureId>, - ) { + pub(crate) fn drain_barriers(raw: &mut A::CommandEncoder, base: &mut Tracker) { profiling::scope!("drain_barriers"); - let buffer_barriers = base.buffers.drain().map(|pending| { - let buf = unsafe { &buffer_guard.get_unchecked(pending.id) }; - pending.into_hal(buf) - }); - let texture_barriers = base.textures.drain().map(|pending| { - let tex = unsafe { texture_guard.get_unchecked(pending.id) }; - pending.into_hal(tex) - }); + let buffer_barriers = base.buffers.drain_transitions(); + let (transitions, textures) = base.textures.drain_transitions(); + let texture_barriers = transitions + .into_iter() + .enumerate() + .map(|(i, p)| p.into_hal(textures[i].as_ref().unwrap())); unsafe { raw.transition_buffers(buffer_barriers); @@ -210,13 +230,14 @@ impl CommandBuffer { } impl CommandBuffer { - fn get_encoder_mut( - storage: &mut Storage, + fn get_encoder( + hub: &Hub, id: id::CommandEncoderId, - ) -> Result<&mut Self, CommandEncoderError> { - match storage.get_mut(id) { - Ok(cmd_buf) => match cmd_buf.status { - CommandEncoderStatus::Recording => Ok(cmd_buf), + ) -> Result, CommandEncoderError> { + let storage = hub.command_buffers.read(); + match storage.get(id) { + Ok(cmd_buf) => match cmd_buf.data.lock().as_ref().unwrap().status { + CommandEncoderStatus::Recording => Ok(cmd_buf.clone()), CommandEncoderStatus::Finished => Err(CommandEncoderError::NotRecording), CommandEncoderStatus::Error => Err(CommandEncoderError::Invalid), }, @@ -225,32 +246,53 @@ impl CommandBuffer { } pub fn is_finished(&self) -> bool { - match self.status { + match self.data.lock().as_ref().unwrap().status { CommandEncoderStatus::Finished => true, _ => false, } } - pub(crate) fn into_baked(self) -> BakedCommands { + pub(crate) fn extract_baked_commands(&mut self) -> BakedCommands { + log::info!( + "Extracting BakedCommands from CommandBuffer {:?}", + self.info.label() + ); + let data = self.data.lock().take().unwrap(); BakedCommands { - encoder: self.encoder.raw, - list: self.encoder.list, - trackers: self.trackers, - buffer_memory_init_actions: self.buffer_memory_init_actions, - texture_memory_actions: self.texture_memory_actions, + encoder: data.encoder.raw, + list: data.encoder.list, + trackers: data.trackers, + buffer_memory_init_actions: data.buffer_memory_init_actions, + texture_memory_actions: data.texture_memory_actions, + } + } + + pub(crate) fn from_arc_into_baked(self: Arc) -> BakedCommands { + if let Ok(mut command_buffer) = Arc::try_unwrap(self) { + command_buffer.extract_baked_commands() + } else { + panic!("CommandBuffer cannot be destroyed because is still in use"); } } } -impl crate::resource::Resource for CommandBuffer { - const TYPE: &'static str = "CommandBuffer"; +impl Resource for CommandBuffer { + const TYPE: ResourceType = "CommandBuffer"; - fn life_guard(&self) -> &crate::LifeGuard { - unreachable!() + fn as_info(&self) -> &ResourceInfo { + &self.info } - fn label(&self) -> &str { - self.encoder.label.as_ref().map_or("", |s| s.as_str()) + fn as_info_mut(&mut self) -> &mut ResourceInfo { + &mut self.info + } + + fn label(&self) -> String { + let str = match self.data.lock().as_ref().unwrap().encoder.label.as_ref() { + Some(label) => label.clone(), + _ => String::new(), + }; + str } } @@ -356,29 +398,31 @@ impl Global { &self, encoder_id: id::CommandEncoderId, _desc: &wgt::CommandBufferDescriptor, } -impl QueryResetMap { +impl QueryResetMap { pub fn new() -> Self { Self { map: FastHashMap::default(), @@ -69,7 +68,7 @@ impl QueryResetMap { // We've hit the end of a run, dispatch a reset (Some(start), false) => { run_start = None; - unsafe { raw_encoder.reset_queries(&query_set.raw, start..idx as u32) }; + unsafe { raw_encoder.reset_queries(query_set.raw(), start..idx as u32) }; } // We're starting a run (None, true) => { @@ -211,7 +210,7 @@ impl QuerySet { }); } - Ok(&self.raw) + Ok(self.raw()) } pub(super) fn validate_and_write_timestamp( @@ -232,7 +231,7 @@ impl QuerySet { unsafe { // If we don't have a reset state tracker which can defer resets, we must reset now. if needs_reset { - raw_encoder.reset_queries(&self.raw, query_index..(query_index + 1)); + raw_encoder.reset_queries(self.raw(), query_index..(query_index + 1)); } raw_encoder.write_timestamp(query_set, query_index); } @@ -266,7 +265,8 @@ impl QuerySet { unsafe { // If we don't have a reset state tracker which can defer resets, we must reset now. if needs_reset { - raw_encoder.reset_queries(&self.raw, query_index..(query_index + 1)); + raw_encoder + .reset_queries(self.raw.as_ref().unwrap(), query_index..(query_index + 1)); } raw_encoder.begin_query(query_set, query_index); } @@ -300,7 +300,7 @@ impl QuerySet { unsafe { // If we don't have a reset state tracker which can defer resets, we must reset now. if needs_reset { - raw_encoder.reset_queries(&self.raw, query_index..(query_index + 1)); + raw_encoder.reset_queries(self.raw(), query_index..(query_index + 1)); } raw_encoder.begin_query(query_set, query_index); } @@ -318,7 +318,7 @@ pub(super) fn end_occlusion_query( // We can unwrap here as the validity was validated when the active query was set let query_set = storage.get(query_set_id).unwrap(); - unsafe { raw_encoder.end_query(&query_set.raw, query_index) }; + unsafe { raw_encoder.end_query(query_set.raw.as_ref().unwrap(), query_index) }; Ok(()) } else { @@ -335,7 +335,7 @@ pub(super) fn end_pipeline_statistics_query( // We can unwrap here as the validity was validated when the active query was set let query_set = storage.get(query_set_id).unwrap(); - unsafe { raw_encoder.end_query(&query_set.raw, query_index) }; + unsafe { raw_encoder.end_query(query_set.raw(), query_index) }; Ok(()) } else { @@ -351,24 +351,26 @@ impl Global { query_index: u32, ) -> Result<(), QueryError> { let hub = A::hub(self); - let mut token = Token::root(); - let (mut cmd_buf_guard, mut token) = hub.command_buffers.write(&mut token); - let (query_set_guard, _) = hub.query_sets.read(&mut token); - - let cmd_buf = CommandBuffer::get_encoder_mut(&mut cmd_buf_guard, command_encoder_id)?; - let raw_encoder = cmd_buf.encoder.open(); + let cmd_buf = CommandBuffer::get_encoder(hub, command_encoder_id)?; + let mut cmd_buf_data = cmd_buf.data.lock(); + let cmd_buf_data = cmd_buf_data.as_mut().unwrap(); #[cfg(feature = "trace")] - if let Some(ref mut list) = cmd_buf.commands { + if let Some(ref mut list) = cmd_buf_data.commands { list.push(TraceCommand::WriteTimestamp { query_set_id, query_index, }); } - let query_set = cmd_buf - .trackers + let encoder = &mut cmd_buf_data.encoder; + let tracker = &mut cmd_buf_data.trackers; + + let raw_encoder = encoder.open(); + + let query_set_guard = hub.query_sets.read(); + let query_set = tracker .query_sets .add_single(&*query_set_guard, query_set_id) .ok_or(QueryError::InvalidQuerySet(query_set_id))?; @@ -388,17 +390,13 @@ impl Global { destination_offset: BufferAddress, ) -> Result<(), QueryError> { let hub = A::hub(self); - let mut token = Token::root(); - - let (mut cmd_buf_guard, mut token) = hub.command_buffers.write(&mut token); - let (query_set_guard, mut token) = hub.query_sets.read(&mut token); - let (buffer_guard, _) = hub.buffers.read(&mut token); - let cmd_buf = CommandBuffer::get_encoder_mut(&mut cmd_buf_guard, command_encoder_id)?; - let raw_encoder = cmd_buf.encoder.open(); + let cmd_buf = CommandBuffer::get_encoder(hub, command_encoder_id)?; + let mut cmd_buf_data = cmd_buf.data.lock(); + let cmd_buf_data = cmd_buf_data.as_mut().unwrap(); #[cfg(feature = "trace")] - if let Some(ref mut list) = cmd_buf.commands { + if let Some(ref mut list) = cmd_buf_data.commands { list.push(TraceCommand::ResolveQuerySet { query_set_id, start_query, @@ -408,22 +406,31 @@ impl Global { }); } + let encoder = &mut cmd_buf_data.encoder; + let tracker = &mut cmd_buf_data.trackers; + let buffer_memory_init_actions = &mut cmd_buf_data.buffer_memory_init_actions; + let raw_encoder = encoder.open(); + if destination_offset % wgt::QUERY_RESOLVE_BUFFER_ALIGNMENT != 0 { return Err(QueryError::Resolve(ResolveError::BufferOffsetAlignment)); } - - let query_set = cmd_buf - .trackers + let query_set_guard = hub.query_sets.read(); + let query_set = tracker .query_sets .add_single(&*query_set_guard, query_set_id) .ok_or(QueryError::InvalidQuerySet(query_set_id))?; - let (dst_buffer, dst_pending) = cmd_buf - .trackers - .buffers - .set_single(&*buffer_guard, destination, hal::BufferUses::COPY_DST) - .ok_or(QueryError::InvalidBuffer(destination))?; - let dst_barrier = dst_pending.map(|pending| pending.into_hal(dst_buffer)); + let (dst_buffer, dst_pending) = { + let buffer_guard = hub.buffers.read(); + let dst_buffer = buffer_guard + .get(destination) + .map_err(|_| QueryError::InvalidBuffer(destination))?; + tracker + .buffers + .set_single(dst_buffer, hal::BufferUses::COPY_DST) + .ok_or(QueryError::InvalidBuffer(destination))? + }; + let dst_barrier = dst_pending.map(|pending| pending.into_hal(&dst_buffer)); if !dst_buffer.usage.contains(wgt::BufferUsages::QUERY_RESOLVE) { return Err(ResolveError::MissingBufferUsage.into()); @@ -463,20 +470,18 @@ impl Global { } // TODO(https://github.com/gfx-rs/wgpu/issues/3993): Need to track initialization state. - cmd_buf - .buffer_memory_init_actions - .extend(dst_buffer.initialization_status.create_action( - destination, - buffer_start_offset..buffer_end_offset, - MemoryInitKind::ImplicitlyInitialized, - )); + buffer_memory_init_actions.extend(dst_buffer.initialization_status.read().create_action( + &dst_buffer, + buffer_start_offset..buffer_end_offset, + MemoryInitKind::ImplicitlyInitialized, + )); unsafe { raw_encoder.transition_buffers(dst_barrier.into_iter()); raw_encoder.copy_query_results( - &query_set.raw, + query_set.raw(), start_query..end_query, - dst_buffer.raw.as_ref().unwrap(), + dst_buffer.raw(), destination_offset, wgt::BufferSize::new_unchecked(stride as u64), ); diff --git a/third_party/rust/wgpu-core/src/command/render.rs b/third_party/rust/wgpu-core/src/command/render.rs index 1d0f7cf717878..bda4addb13aa8 100644 --- a/third_party/rust/wgpu-core/src/command/render.rs +++ b/third_party/rust/wgpu-core/src/command/render.rs @@ -1,5 +1,6 @@ +use crate::resource::Resource; use crate::{ - binding_model::{BindError, BindGroupLayouts}, + binding_model::BindError, command::{ self, bind::Binder, @@ -16,19 +17,17 @@ use crate::{ error::{ErrorFormatter, PrettyError}, global::Global, hal_api::HalApi, - hal_label, - hub::Token, - id, + hal_label, id, identity::GlobalIdentityHandlerFactory, init_tracker::{MemoryInitKind, TextureInitRange, TextureInitTrackerAction}, pipeline::{self, PipelineFlags}, resource::{Buffer, QuerySet, Texture, TextureView, TextureViewNotRenderableReason}, storage::Storage, - track::{TextureSelector, UsageConflict, UsageScope}, + track::{TextureSelector, Tracker, UsageConflict, UsageScope}, validation::{ check_buffer_usage, check_texture_usage, MissingBufferUsageError, MissingTextureUsageError, }, - Label, Stored, + Label, }; use arrayvec::ArrayVec; @@ -44,9 +43,13 @@ use serde::Deserialize; #[cfg(any(feature = "serial-pass", feature = "trace"))] use serde::Serialize; +use std::sync::Arc; use std::{borrow::Cow, fmt, iter, marker::PhantomData, mem, num::NonZeroU32, ops::Range, str}; -use super::{memory_init::TextureSurfaceDiscard, CommandBufferTextureMemoryActions}; +use super::{ + memory_init::TextureSurfaceDiscard, CommandBufferTextureMemoryActions, CommandEncoder, + QueryResetMap, +}; /// Operation to perform to the output attachment at the start of a renderpass. #[repr(C)] @@ -314,7 +317,7 @@ impl OptionalState { #[derive(Debug, Default)] struct IndexState { - bound_buffer_view: Option<(id::Valid, Range)>, + bound_buffer_view: Option<(id::BufferId, Range)>, format: Option, pipeline_format: Option, limit: u32, @@ -410,9 +413,9 @@ impl VertexState { } #[derive(Debug)] -struct State { +struct State { pipeline_flags: PipelineFlags, - binder: Binder, + binder: Binder, blend_constant: OptionalState, stencil_reference: u32, pipeline: Option, @@ -421,12 +424,8 @@ struct State { debug_scope_depth: u32, } -impl State { - fn is_ready( - &self, - indexed: bool, - bind_group_layouts: &BindGroupLayouts, - ) -> Result<(), DrawError> { +impl State { + fn is_ready(&self, indexed: bool) -> Result<(), DrawError> { // Determine how many vertex buffers have already been bound let vertex_buffer_count = self.vertex.inputs.iter().take_while(|v| v.bound).count() as u32; // Compare with the needed quantity @@ -436,7 +435,7 @@ impl State { }); } - let bind_mask = self.binder.invalid_mask(bind_group_layouts); + let bind_mask = self.binder.invalid_mask(); if bind_mask != 0 { //let (expected, provided) = self.binder.entries[index as usize].info(); return Err(DrawError::IncompatibleBindGroup { @@ -573,7 +572,7 @@ pub enum RenderPassErrorInner { }, #[error("Surface texture is dropped before the render pass is finished")] SurfaceTextureDropped, - #[error("Not enough memory left")] + #[error("Not enough memory left for render pass")] OutOfMemory, #[error("Unable to clear non-present/read-only depth")] InvalidDepthOps, @@ -693,16 +692,16 @@ where } } -struct RenderAttachment<'a> { - texture_id: &'a Stored, +struct RenderAttachment<'a, A: HalApi> { + texture: Arc>, selector: &'a TextureSelector, usage: hal::TextureUses, } -impl TextureView { - fn to_render_attachment(&self, usage: hal::TextureUses) -> RenderAttachment { +impl TextureView { + fn to_render_attachment(&self, usage: hal::TextureUses) -> RenderAttachment { RenderAttachment { - texture_id: &self.parent_id, + texture: self.parent.read().as_ref().unwrap().clone(), selector: &self.selector, usage, } @@ -716,13 +715,13 @@ struct RenderPassInfo<'a, A: HalApi> { context: RenderPassContext, usage_scope: UsageScope, /// All render attachments, including depth/stencil - render_attachments: AttachmentDataVec>, + render_attachments: AttachmentDataVec>, is_depth_read_only: bool, is_stencil_read_only: bool, extent: wgt::Extent3d, _phantom: PhantomData, - pending_discard_init_fixups: SurfacesInDiscardState, + pending_discard_init_fixups: SurfacesInDiscardState, divergent_discarded_depth_stencil_aspect: Option<(wgt::TextureAspect, &'a TextureView)>, multiview: Option, } @@ -730,27 +729,24 @@ struct RenderPassInfo<'a, A: HalApi> { impl<'a, A: HalApi> RenderPassInfo<'a, A> { fn add_pass_texture_init_actions( channel: &PassChannel, - texture_memory_actions: &mut CommandBufferTextureMemoryActions, + texture_memory_actions: &mut CommandBufferTextureMemoryActions, view: &TextureView, - texture_guard: &Storage, id::TextureId>, - pending_discard_init_fixups: &mut SurfacesInDiscardState, + pending_discard_init_fixups: &mut SurfacesInDiscardState, ) { if channel.load_op == LoadOp::Load { pending_discard_init_fixups.extend(texture_memory_actions.register_init_action( &TextureInitTrackerAction { - id: view.parent_id.value.0, + texture: view.parent.read().as_ref().unwrap().clone(), range: TextureInitRange::from(view.selector.clone()), // Note that this is needed even if the target is discarded, kind: MemoryInitKind::NeedsInitializedMemory, }, - texture_guard, )); } else if channel.store_op == StoreOp::Store { // Clear + Store texture_memory_actions.register_implicit_init( - view.parent_id.value, + view.parent.read().as_ref().unwrap(), TextureInitRange::from(view.selector.clone()), - texture_guard, ); } if channel.store_op == StoreOp::Discard { @@ -758,7 +754,7 @@ impl<'a, A: HalApi> RenderPassInfo<'a, A> { // discard right away be alright since the texture can't be used // during the pass anyways texture_memory_actions.discard(TextureSurfaceDiscard { - texture: view.parent_id.value.0, + texture: view.parent.read().as_ref().unwrap().clone(), mip_level: view.selector.mips.start, layer: view.selector.layers.start, }); @@ -772,7 +768,10 @@ impl<'a, A: HalApi> RenderPassInfo<'a, A> { depth_stencil_attachment: Option<&RenderPassDepthStencilAttachment>, timestamp_writes: Option<&RenderPassTimestampWrites>, occlusion_query_set: Option, - cmd_buf: &mut CommandBuffer, + encoder: &mut CommandEncoder, + trackers: &mut Tracker, + texture_memory_actions: &mut CommandBufferTextureMemoryActions, + pending_query_resets: &mut QueryResetMap, view_guard: &'a Storage, id::TextureViewId>, buffer_guard: &'a Storage, id::BufferId>, texture_guard: &'a Storage, id::TextureId>, @@ -786,7 +785,7 @@ impl<'a, A: HalApi> RenderPassInfo<'a, A> { let mut is_depth_read_only = false; let mut is_stencil_read_only = false; - let mut render_attachments = AttachmentDataVec::::new(); + let mut render_attachments = AttachmentDataVec::>::new(); let mut discarded_surfaces = AttachmentDataVec::new(); let mut pending_discard_init_fixups = SurfacesInDiscardState::new(); let mut divergent_discarded_depth_stencil_aspect = None; @@ -866,8 +865,7 @@ impl<'a, A: HalApi> RenderPassInfo<'a, A> { let mut depth_stencil = None; if let Some(at) = depth_stencil_attachment { - let view: &TextureView = cmd_buf - .trackers + let view: &TextureView = trackers .views .add_single(view_guard, at.view) .ok_or(RenderPassErrorInner::InvalidAttachment(at.view))?; @@ -887,17 +885,15 @@ impl<'a, A: HalApi> RenderPassInfo<'a, A> { { Self::add_pass_texture_init_actions( &at.depth, - &mut cmd_buf.texture_memory_actions, + texture_memory_actions, view, - texture_guard, &mut pending_discard_init_fixups, ); } else if !ds_aspects.contains(hal::FormatAspects::DEPTH) { Self::add_pass_texture_init_actions( &at.stencil, - &mut cmd_buf.texture_memory_actions, + texture_memory_actions, view, - texture_guard, &mut pending_discard_init_fixups, ); } else { @@ -926,14 +922,11 @@ impl<'a, A: HalApi> RenderPassInfo<'a, A> { at.depth.load_op == LoadOp::Load || at.stencil.load_op == LoadOp::Load; if need_init_beforehand { pending_discard_init_fixups.extend( - cmd_buf.texture_memory_actions.register_init_action( - &TextureInitTrackerAction { - id: view.parent_id.value.0, - range: TextureInitRange::from(view.selector.clone()), - kind: MemoryInitKind::NeedsInitializedMemory, - }, - texture_guard, - ), + texture_memory_actions.register_init_action(&TextureInitTrackerAction { + texture: view.parent.read().as_ref().unwrap().clone(), + range: TextureInitRange::from(view.selector.clone()), + kind: MemoryInitKind::NeedsInitializedMemory, + }), ); } @@ -947,10 +940,9 @@ impl<'a, A: HalApi> RenderPassInfo<'a, A> { // (possible optimization: Delay and potentially drop this zeroing) if at.depth.store_op != at.stencil.store_op { if !need_init_beforehand { - cmd_buf.texture_memory_actions.register_implicit_init( - view.parent_id.value, + texture_memory_actions.register_implicit_init( + view.parent.read().as_ref().unwrap(), TextureInitRange::from(view.selector.clone()), - texture_guard, ); } divergent_discarded_depth_stencil_aspect = Some(( @@ -964,7 +956,7 @@ impl<'a, A: HalApi> RenderPassInfo<'a, A> { } else if at.depth.store_op == StoreOp::Discard { // Both are discarded using the regular path. discarded_surfaces.push(TextureSurfaceDiscard { - texture: view.parent_id.value.0, + texture: view.parent.read().as_ref().unwrap().clone(), mip_level: view.selector.mips.start, layer: view.selector.layers.start, }); @@ -988,7 +980,7 @@ impl<'a, A: HalApi> RenderPassInfo<'a, A> { depth_stencil = Some(hal::DepthStencilAttachment { target: hal::Attachment { - view: &view.raw, + view: view.raw(), usage, }, depth_ops: at.depth.hal_ops(), @@ -1004,8 +996,7 @@ impl<'a, A: HalApi> RenderPassInfo<'a, A> { colors.push(None); continue; }; - let color_view: &TextureView = cmd_buf - .trackers + let color_view: &TextureView = trackers .views .add_single(view_guard, at.view) .ok_or(RenderPassErrorInner::InvalidAttachment(at.view))?; @@ -1030,9 +1021,8 @@ impl<'a, A: HalApi> RenderPassInfo<'a, A> { Self::add_pass_texture_init_actions( &at.channel, - &mut cmd_buf.texture_memory_actions, + texture_memory_actions, color_view, - texture_guard, &mut pending_discard_init_fixups, ); render_attachments @@ -1040,8 +1030,7 @@ impl<'a, A: HalApi> RenderPassInfo<'a, A> { let mut hal_resolve_target = None; if let Some(resolve_target) = at.resolve_target { - let resolve_view: &TextureView = cmd_buf - .trackers + let resolve_view: &TextureView = trackers .views .add_single(view_guard, resolve_target) .ok_or(RenderPassErrorInner::InvalidAttachment(resolve_target))?; @@ -1092,23 +1081,22 @@ impl<'a, A: HalApi> RenderPassInfo<'a, A> { }); } - cmd_buf.texture_memory_actions.register_implicit_init( - resolve_view.parent_id.value, + texture_memory_actions.register_implicit_init( + resolve_view.parent.read().as_ref().unwrap(), TextureInitRange::from(resolve_view.selector.clone()), - texture_guard, ); render_attachments .push(resolve_view.to_render_attachment(hal::TextureUses::COLOR_TARGET)); hal_resolve_target = Some(hal::Attachment { - view: &resolve_view.raw, + view: resolve_view.raw(), usage: hal::TextureUses::COLOR_TARGET, }); } colors.push(Some(hal::ColorAttachment { target: hal::Attachment { - view: &color_view.raw, + view: color_view.raw(), usage: hal::TextureUses::COLOR_TARGET, }, resolve_target: hal_resolve_target, @@ -1145,25 +1133,20 @@ impl<'a, A: HalApi> RenderPassInfo<'a, A> { }; let timestamp_writes = if let Some(tw) = timestamp_writes { - let query_set = cmd_buf - .trackers + let query_set = trackers .query_sets .add_single(query_set_guard, tw.query_set) .ok_or(RenderPassErrorInner::InvalidQuerySet(tw.query_set))?; if let Some(index) = tw.beginning_of_pass_write_index { - cmd_buf - .pending_query_resets - .use_query_set(tw.query_set, query_set, index); + pending_query_resets.use_query_set(tw.query_set, query_set, index); } if let Some(index) = tw.end_of_pass_write_index { - cmd_buf - .pending_query_resets - .use_query_set(tw.query_set, query_set, index); + pending_query_resets.use_query_set(tw.query_set, query_set, index); } Some(hal::RenderPassTimestampWrites { - query_set: &query_set.raw, + query_set: query_set.raw.as_ref().unwrap(), beginning_of_pass_write_index: tw.beginning_of_pass_write_index, end_of_pass_write_index: tw.end_of_pass_write_index, }) @@ -1172,13 +1155,12 @@ impl<'a, A: HalApi> RenderPassInfo<'a, A> { }; let occlusion_query_set = if let Some(occlusion_query_set) = occlusion_query_set { - let query_set = cmd_buf - .trackers + let query_set = trackers .query_sets .add_single(query_set_guard, occlusion_query_set) .ok_or(RenderPassErrorInner::InvalidQuerySet(occlusion_query_set))?; - Some(&query_set.raw) + Some(query_set.raw.as_ref().unwrap()) } else { None }; @@ -1194,7 +1176,7 @@ impl<'a, A: HalApi> RenderPassInfo<'a, A> { occlusion_query_set, }; unsafe { - cmd_buf.encoder.raw.begin_render_pass(&hal_desc); + encoder.raw.begin_render_pass(&hal_desc); }; Ok(Self { @@ -1214,31 +1196,21 @@ impl<'a, A: HalApi> RenderPassInfo<'a, A> { fn finish( mut self, raw: &mut A::CommandEncoder, - texture_guard: &Storage, id::TextureId>, - ) -> Result<(UsageScope, SurfacesInDiscardState), RenderPassErrorInner> { + ) -> Result<(UsageScope, SurfacesInDiscardState), RenderPassErrorInner> { profiling::scope!("RenderPassInfo::finish"); unsafe { raw.end_render_pass(); } for ra in self.render_attachments { - if !texture_guard.contains(ra.texture_id.value.0) { - return Err(RenderPassErrorInner::SurfaceTextureDropped); - } - let texture = &texture_guard[ra.texture_id.value]; + let texture = &ra.texture; check_texture_usage(texture.desc.usage, TextureUsages::RENDER_ATTACHMENT)?; // the tracker set of the pass is always in "extend" mode unsafe { self.usage_scope .textures - .merge_single( - texture_guard, - ra.texture_id.value, - Some(ra.selector.clone()), - &ra.texture_id.ref_count, - ra.usage, - ) + .merge_single(texture, Some(ra.selector.clone()), ra.usage) .map_err(UsageConflict::from)? }; } @@ -1271,7 +1243,7 @@ impl<'a, A: HalApi> RenderPassInfo<'a, A> { color_attachments: &[], depth_stencil_attachment: Some(hal::DepthStencilAttachment { target: hal::Attachment { - view: &view.raw, + view: view.raw(), usage: hal::TextureUses::DEPTH_STENCIL_WRITE, }, depth_ops, @@ -1334,27 +1306,14 @@ impl Global { let init_scope = PassErrorScope::Pass(encoder_id); let hub = A::hub(self); - let mut token = Token::root(); - let (device_guard, mut token) = hub.devices.read(&mut token); let (scope, pending_discard_init_fixups) = { - let (mut cmb_guard, mut token) = hub.command_buffers.write(&mut token); - - // Spell out the type, to placate rust-analyzer. - // https://github.com/rust-lang/rust-analyzer/issues/12247 - let cmd_buf: &mut CommandBuffer = - CommandBuffer::get_encoder_mut(&mut *cmb_guard, encoder_id) - .map_pass_err(init_scope)?; - - // We automatically keep extending command buffers over time, and because - // we want to insert a command buffer _before_ what we're about to record, - // we need to make sure to close the previous one. - cmd_buf.encoder.close(); - // We will reset this to `Recording` if we succeed, acts as a fail-safe. - cmd_buf.status = CommandEncoderStatus::Error; + let cmd_buf = CommandBuffer::get_encoder(hub, encoder_id).map_pass_err(init_scope)?; + let mut cmd_buf_data = cmd_buf.data.lock(); + let cmd_buf_data = cmd_buf_data.as_mut().unwrap(); #[cfg(feature = "trace")] - if let Some(ref mut list) = cmd_buf.commands { + if let Some(ref mut list) = cmd_buf_data.commands { list.push(crate::device::trace::Command::RunRenderPass { base: BasePass::from_ref(base), target_colors: color_attachments.to_vec(), @@ -1364,23 +1323,33 @@ impl Global { }); } - let device_id = cmd_buf.device_id.value; - - let device = &device_guard[device_id]; + let device = &cmd_buf.device; if !device.is_valid() { return Err(DeviceError::Lost).map_pass_err(init_scope); } - cmd_buf.encoder.open_pass(label); - - let (bundle_guard, mut token) = hub.render_bundles.read(&mut token); - let (pipeline_layout_guard, mut token) = hub.pipeline_layouts.read(&mut token); - let (bind_group_guard, mut token) = hub.bind_groups.read(&mut token); - let (render_pipeline_guard, mut token) = hub.render_pipelines.read(&mut token); - let (query_set_guard, mut token) = hub.query_sets.read(&mut token); - let (bind_group_layout_guard, mut token) = hub.bind_group_layouts.read(&mut token); - let (buffer_guard, mut token) = hub.buffers.read(&mut token); - let (texture_guard, mut token) = hub.textures.read(&mut token); - let (view_guard, _) = hub.texture_views.read(&mut token); + + let encoder = &mut cmd_buf_data.encoder; + let status = &mut cmd_buf_data.status; + let tracker = &mut cmd_buf_data.trackers; + let buffer_memory_init_actions = &mut cmd_buf_data.buffer_memory_init_actions; + let texture_memory_actions = &mut cmd_buf_data.texture_memory_actions; + let pending_query_resets = &mut cmd_buf_data.pending_query_resets; + + // We automatically keep extending command buffers over time, and because + // we want to insert a command buffer _before_ what we're about to record, + // we need to make sure to close the previous one. + encoder.close(); + // We will reset this to `Recording` if we succeed, acts as a fail-safe. + *status = CommandEncoderStatus::Error; + encoder.open_pass(label); + + let bundle_guard = hub.render_bundles.read(); + let bind_group_guard = hub.bind_groups.read(); + let render_pipeline_guard = hub.render_pipelines.read(); + let query_set_guard = hub.query_sets.read(); + let buffer_guard = hub.buffers.read(); + let texture_guard = hub.textures.read(); + let view_guard = hub.texture_views.read(); log::trace!( "Encoding render pass begin in command buffer {:?}", @@ -1394,7 +1363,10 @@ impl Global { depth_stencil_attachment, timestamp_writes, occlusion_query_set_id, - cmd_buf, + encoder, + tracker, + texture_memory_actions, + pending_query_resets, &*view_guard, &*buffer_guard, &*texture_guard, @@ -1402,7 +1374,7 @@ impl Global { ) .map_pass_err(init_scope)?; - cmd_buf.trackers.set_size( + tracker.set_size( Some(&*buffer_guard), Some(&*texture_guard), Some(&*view_guard), @@ -1414,7 +1386,7 @@ impl Global { Some(&*query_set_guard), ); - let raw = &mut cmd_buf.encoder.raw; + let raw = &mut encoder.raw; let mut state = State { pipeline_flags: PipelineFlags::empty(), @@ -1457,14 +1429,13 @@ impl Global { ); dynamic_offset_count += num_dynamic_offsets as usize; - let bind_group: &crate::binding_model::BindGroup = cmd_buf - .trackers + let bind_group = tracker .bind_groups .add_single(&*bind_group_guard, bind_group_id) .ok_or(RenderCommandError::InvalidBindGroup(bind_group_id)) .map_pass_err(scope)?; - if bind_group.device_id.value != device_id { + if bind_group.device.as_info().id() != device.as_info().id() { return Err(DeviceError::WrongDevice).map_pass_err(scope); } @@ -1475,49 +1446,48 @@ impl Global { // merge the resource tracker in unsafe { info.usage_scope - .merge_bind_group(&*texture_guard, &bind_group.used) + .merge_bind_group(&bind_group.used) .map_pass_err(scope)?; } //Note: stateless trackers are not merged: the lifetime reference // is held to the bind group itself. - cmd_buf.buffer_memory_init_actions.extend( - bind_group.used_buffer_ranges.iter().filter_map(|action| { - match buffer_guard.get(action.id) { - Ok(buffer) => buffer.initialization_status.check_action(action), - Err(_) => None, - } - }), + buffer_memory_init_actions.extend( + bind_group + .used_buffer_ranges + .read() + .iter() + .filter_map(|action| { + action + .buffer + .initialization_status + .read() + .check_action(action) + }), ); - for action in bind_group.used_texture_ranges.iter() { - info.pending_discard_init_fixups.extend( - cmd_buf - .texture_memory_actions - .register_init_action(action, &texture_guard), - ); + for action in bind_group.used_texture_ranges.read().iter() { + info.pending_discard_init_fixups + .extend(texture_memory_actions.register_init_action(action)); } - let pipeline_layout_id = state.binder.pipeline_layout_id; - let entries = state.binder.assign_group( - index as usize, - id::Valid(bind_group_id), - bind_group, - &temp_offsets, - ); - if !entries.is_empty() { - let pipeline_layout = - &pipeline_layout_guard[pipeline_layout_id.unwrap()].raw; + let pipeline_layout = state.binder.pipeline_layout.clone(); + let entries = + state + .binder + .assign_group(index as usize, bind_group, &temp_offsets); + if !entries.is_empty() && pipeline_layout.is_some() { + let pipeline_layout = pipeline_layout.as_ref().unwrap().raw(); for (i, e) in entries.iter().enumerate() { - let raw_bg = - &bind_group_guard[e.group_id.as_ref().unwrap().value].raw; - - unsafe { - raw.set_bind_group( - pipeline_layout, - index + i as u32, - raw_bg, - &e.dynamic_offsets, - ); + if let Some(group) = e.group.as_ref() { + let raw_bg = group.raw(); + unsafe { + raw.set_bind_group( + pipeline_layout, + index + i as u32, + raw_bg, + &e.dynamic_offsets, + ); + } } } } @@ -1528,14 +1498,13 @@ impl Global { let scope = PassErrorScope::SetPipelineRender(pipeline_id); state.pipeline = Some(pipeline_id); - let pipeline: &pipeline::RenderPipeline = cmd_buf - .trackers + let pipeline: &pipeline::RenderPipeline = tracker .render_pipelines .add_single(&*render_pipeline_guard, pipeline_id) .ok_or(RenderCommandError::InvalidPipeline(pipeline_id)) .map_pass_err(scope)?; - if pipeline.device_id.value != device_id { + if pipeline.device.as_info().id() != device.as_info().id() { return Err(DeviceError::WrongDevice).map_pass_err(scope); } @@ -1563,7 +1532,7 @@ impl Global { .require(pipeline.flags.contains(PipelineFlags::BLEND_CONSTANT)); unsafe { - raw.set_render_pipeline(&pipeline.raw); + raw.set_render_pipeline(pipeline.raw()); } if pipeline.flags.contains(PipelineFlags::STENCIL_REFERENCE) { @@ -1573,33 +1542,37 @@ impl Global { } // Rebind resource - if state.binder.pipeline_layout_id != Some(pipeline.layout_id.value) { - let pipeline_layout = &pipeline_layout_guard[pipeline.layout_id.value]; - + if state.binder.pipeline_layout.is_none() + || !state + .binder + .pipeline_layout + .as_ref() + .unwrap() + .is_equal(&pipeline.layout) + { let (start_index, entries) = state.binder.change_pipeline_layout( - &*pipeline_layout_guard, - pipeline.layout_id.value, + &pipeline.layout, &pipeline.late_sized_buffer_groups, ); if !entries.is_empty() { for (i, e) in entries.iter().enumerate() { - let raw_bg = - &bind_group_guard[e.group_id.as_ref().unwrap().value].raw; - - unsafe { - raw.set_bind_group( - &pipeline_layout.raw, - start_index as u32 + i as u32, - raw_bg, - &e.dynamic_offsets, - ); + if let Some(group) = e.group.as_ref() { + let raw_bg = group.raw(); + unsafe { + raw.set_bind_group( + pipeline.layout.raw(), + start_index as u32 + i as u32, + raw_bg, + &e.dynamic_offsets, + ); + } } } } // Clear push constant ranges let non_overlapping = super::bind::compute_nonoverlapping_ranges( - &pipeline_layout.push_constant_ranges, + &pipeline.layout.push_constant_ranges, ); for range in non_overlapping { let offset = range.range.start; @@ -1609,7 +1582,7 @@ impl Global { size_bytes, |clear_offset, clear_data| unsafe { raw.set_push_constants( - &pipeline_layout.raw, + pipeline.layout.raw(), range.stages, clear_offset, clear_data, @@ -1651,13 +1624,13 @@ impl Global { log::trace!("RenderPass::set_index_buffer {buffer_id:?}"); let scope = PassErrorScope::SetIndexBuffer(buffer_id); - let buffer: &Buffer = info + let buffer = info .usage_scope .buffers .merge_single(&*buffer_guard, buffer_id, hal::BufferUses::INDEX) .map_pass_err(scope)?; - if buffer.device_id.value != device_id { + if buffer.device.as_info().id() != device.as_info().id() { return Err(DeviceError::WrongDevice).map_pass_err(scope); } @@ -1673,14 +1646,14 @@ impl Global { Some(s) => offset + s.get(), None => buffer.size, }; - state.index.bound_buffer_view = Some((id::Valid(buffer_id), offset..end)); + state.index.bound_buffer_view = Some((buffer_id, offset..end)); state.index.format = Some(index_format); state.index.update_limit(); - cmd_buf.buffer_memory_init_actions.extend( - buffer.initialization_status.create_action( - buffer_id, + buffer_memory_init_actions.extend( + buffer.initialization_status.read().create_action( + buffer, offset..end, MemoryInitKind::NeedsInitializedMemory, ), @@ -1704,13 +1677,13 @@ impl Global { log::trace!("RenderPass::set_vertex_buffer {slot} {buffer_id:?}"); let scope = PassErrorScope::SetVertexBuffer(buffer_id); - let buffer: &Buffer = info + let buffer = info .usage_scope .buffers .merge_single(&*buffer_guard, buffer_id, hal::BufferUses::VERTEX) .map_pass_err(scope)?; - if buffer.device_id.value != device_id { + if buffer.device.as_info().id() != device.as_info().id() { return Err(DeviceError::WrongDevice).map_pass_err(scope); } @@ -1745,9 +1718,9 @@ impl Global { }; vertex_state.bound = true; - cmd_buf.buffer_memory_init_actions.extend( - buffer.initialization_status.create_action( - buffer_id, + buffer_memory_init_actions.extend( + buffer.initialization_status.read().create_action( + buffer, offset..(offset + vertex_state.total_size), MemoryInitKind::NeedsInitializedMemory, ), @@ -1846,12 +1819,12 @@ impl Global { let data_slice = &base.push_constant_data[(values_offset as usize)..values_end_offset]; - let pipeline_layout_id = state + let pipeline_layout = state .binder - .pipeline_layout_id + .pipeline_layout + .as_ref() .ok_or(DrawError::MissingPipeline) .map_pass_err(scope)?; - let pipeline_layout = &pipeline_layout_guard[pipeline_layout_id]; pipeline_layout .validate_push_constant_ranges(stages, offset, end_offset_bytes) @@ -1859,7 +1832,12 @@ impl Global { .map_pass_err(scope)?; unsafe { - raw.set_push_constants(&pipeline_layout.raw, stages, offset, data_slice) + raw.set_push_constants( + pipeline_layout.raw(), + stages, + offset, + data_slice, + ) } } RenderCommand::SetScissor(ref rect) => { @@ -1898,9 +1876,7 @@ impl Global { indirect: false, pipeline: state.pipeline, }; - state - .is_ready::(indexed, &bind_group_layout_guard) - .map_pass_err(scope)?; + state.is_ready(indexed).map_pass_err(scope)?; let last_vertex = first_vertex + vertex_count; let vertex_limit = state.vertex.vertex_limit; @@ -1942,9 +1918,7 @@ impl Global { indirect: false, pipeline: state.pipeline, }; - state - .is_ready::(indexed, &*bind_group_layout_guard) - .map_pass_err(scope)?; + state.is_ready(indexed).map_pass_err(scope)?; //TODO: validate that base_vertex + max_index() is // within the provided range @@ -1991,9 +1965,7 @@ impl Global { indirect: true, pipeline: state.pipeline, }; - state - .is_ready::(indexed, &*bind_group_layout_guard) - .map_pass_err(scope)?; + state.is_ready(indexed).map_pass_err(scope)?; let stride = match indexed { false => mem::size_of::(), @@ -2009,7 +1981,7 @@ impl Global { .require_downlevel_flags(wgt::DownlevelFlags::INDIRECT_EXECUTION) .map_pass_err(scope)?; - let indirect_buffer: &Buffer = info + let indirect_buffer = info .usage_scope .buffers .merge_single(&*buffer_guard, buffer_id, hal::BufferUses::INDIRECT) @@ -2035,9 +2007,9 @@ impl Global { .map_pass_err(scope); } - cmd_buf.buffer_memory_init_actions.extend( - indirect_buffer.initialization_status.create_action( - buffer_id, + buffer_memory_init_actions.extend( + indirect_buffer.initialization_status.read().create_action( + indirect_buffer, offset..end_offset, MemoryInitKind::NeedsInitializedMemory, ), @@ -2067,9 +2039,7 @@ impl Global { indirect: true, pipeline: state.pipeline, }; - state - .is_ready::(indexed, &*bind_group_layout_guard) - .map_pass_err(scope)?; + state.is_ready(indexed).map_pass_err(scope)?; let stride = match indexed { false => mem::size_of::(), @@ -2083,7 +2053,7 @@ impl Global { .require_downlevel_flags(wgt::DownlevelFlags::INDIRECT_EXECUTION) .map_pass_err(scope)?; - let indirect_buffer: &Buffer = info + let indirect_buffer = info .usage_scope .buffers .merge_single(&*buffer_guard, buffer_id, hal::BufferUses::INDIRECT) @@ -2096,7 +2066,7 @@ impl Global { .ok_or(RenderCommandError::DestroyedBuffer(buffer_id)) .map_pass_err(scope)?; - let count_buffer: &Buffer = info + let count_buffer = info .usage_scope .buffers .merge_single( @@ -2123,9 +2093,9 @@ impl Global { }) .map_pass_err(scope); } - cmd_buf.buffer_memory_init_actions.extend( - indirect_buffer.initialization_status.create_action( - buffer_id, + buffer_memory_init_actions.extend( + indirect_buffer.initialization_status.read().create_action( + indirect_buffer, offset..end_offset, MemoryInitKind::NeedsInitializedMemory, ), @@ -2141,9 +2111,9 @@ impl Global { }) .map_pass_err(scope); } - cmd_buf.buffer_memory_init_actions.extend( - count_buffer.initialization_status.create_action( - count_buffer_id, + buffer_memory_init_actions.extend( + count_buffer.initialization_status.read().create_action( + count_buffer, count_buffer_offset..end_count_offset, MemoryInitKind::NeedsInitializedMemory, ), @@ -2224,8 +2194,7 @@ impl Global { .require_features(wgt::Features::TIMESTAMP_QUERY_INSIDE_PASSES) .map_pass_err(scope)?; - let query_set = cmd_buf - .trackers + let query_set = tracker .query_sets .add_single(&*query_set_guard, query_set_id) .ok_or(RenderCommandError::InvalidQuerySet(query_set_id)) @@ -2236,7 +2205,7 @@ impl Global { raw, query_set_id, query_index, - Some(&mut cmd_buf.pending_query_resets), + Some(&mut cmd_buf_data.pending_query_resets), ) .map_pass_err(scope)?; } @@ -2248,8 +2217,7 @@ impl Global { .ok_or(RenderPassErrorInner::MissingOcclusionQuerySet) .map_pass_err(scope)?; - let query_set = cmd_buf - .trackers + let query_set = tracker .query_sets .add_single(&*query_set_guard, query_set_id) .ok_or(RenderCommandError::InvalidQuerySet(query_set_id)) @@ -2260,7 +2228,7 @@ impl Global { raw, query_set_id, query_index, - Some(&mut cmd_buf.pending_query_resets), + Some(&mut cmd_buf_data.pending_query_resets), &mut active_query, ) .map_pass_err(scope)?; @@ -2279,8 +2247,7 @@ impl Global { log::trace!("RenderPass::begin_pipeline_statistics_query {query_set_id:?} {query_index}"); let scope = PassErrorScope::BeginPipelineStatisticsQuery; - let query_set = cmd_buf - .trackers + let query_set = tracker .query_sets .add_single(&*query_set_guard, query_set_id) .ok_or(RenderCommandError::InvalidQuerySet(query_set_id)) @@ -2291,7 +2258,7 @@ impl Global { raw, query_set_id, query_index, - Some(&mut cmd_buf.pending_query_resets), + Some(&mut cmd_buf_data.pending_query_resets), &mut active_query, ) .map_pass_err(scope)?; @@ -2306,14 +2273,13 @@ impl Global { RenderCommand::ExecuteBundle(bundle_id) => { log::trace!("RenderPass::execute_bundle {bundle_id:?}"); let scope = PassErrorScope::ExecuteBundle; - let bundle: &command::RenderBundle = cmd_buf - .trackers + let bundle: &command::RenderBundle = tracker .bundles .add_single(&*bundle_guard, bundle_id) .ok_or(RenderCommandError::InvalidRenderBundle(bundle_id)) .map_pass_err(scope)?; - if bundle.device_id.value != device_id { + if bundle.device.as_info().id() != device.as_info().id() { return Err(DeviceError::WrongDevice).map_pass_err(scope); } @@ -2339,48 +2305,39 @@ impl Global { .map_pass_err(scope); } - cmd_buf.buffer_memory_init_actions.extend( + buffer_memory_init_actions.extend( bundle .buffer_memory_init_actions .iter() - .filter_map(|action| match buffer_guard.get(action.id) { - Ok(buffer) => buffer.initialization_status.check_action(action), - Err(_) => None, + .filter_map(|action| { + action + .buffer + .initialization_status + .read() + .check_action(action) }), ); for action in bundle.texture_memory_init_actions.iter() { - info.pending_discard_init_fixups.extend( - cmd_buf - .texture_memory_actions - .register_init_action(action, &texture_guard), - ); + info.pending_discard_init_fixups + .extend(texture_memory_actions.register_init_action(action)); } - unsafe { - bundle.execute( - raw, - &*pipeline_layout_guard, - &*bind_group_guard, - &*render_pipeline_guard, - &*buffer_guard, - ) - } - .map_err(|e| match e { - ExecutionError::DestroyedBuffer(id) => { - RenderCommandError::DestroyedBuffer(id) - } - ExecutionError::Unimplemented(what) => { - RenderCommandError::Unimplemented(what) - } - }) - .map_pass_err(scope)?; + unsafe { bundle.execute(raw) } + .map_err(|e| match e { + ExecutionError::DestroyedBuffer(id) => { + RenderCommandError::DestroyedBuffer(id) + } + ExecutionError::Unimplemented(what) => { + RenderCommandError::Unimplemented(what) + } + }) + .map_pass_err(scope)?; unsafe { info.usage_scope - .merge_render_bundle(&*texture_guard, &bundle.used) + .merge_render_bundle(&bundle.used) .map_pass_err(scope)?; - cmd_buf - .trackers + tracker .add_from_render_bundle(&bundle.used) .map_pass_err(scope)?; }; @@ -2391,50 +2348,47 @@ impl Global { log::trace!("Merging renderpass into cmd_buf {:?}", encoder_id); let (trackers, pending_discard_init_fixups) = - info.finish(raw, &*texture_guard).map_pass_err(init_scope)?; + info.finish(raw).map_pass_err(init_scope)?; - cmd_buf.encoder.close(); + encoder.close(); (trackers, pending_discard_init_fixups) }; - let (mut cmb_guard, mut token) = hub.command_buffers.write(&mut token); - let (query_set_guard, mut token) = hub.query_sets.read(&mut token); - let (buffer_guard, mut token) = hub.buffers.read(&mut token); - let (texture_guard, _) = hub.textures.read(&mut token); + let query_set_guard = hub.query_sets.read(); + + let cmd_buf = hub.command_buffers.get(encoder_id).unwrap(); + let mut cmd_buf_data = cmd_buf.data.lock(); + let cmd_buf_data = cmd_buf_data.as_mut().unwrap(); + + let encoder = &mut cmd_buf_data.encoder; + let status = &mut cmd_buf_data.status; + let tracker = &mut cmd_buf_data.trackers; - let cmd_buf = cmb_guard.get_mut(encoder_id).unwrap(); { - let transit = cmd_buf.encoder.open(); + let transit = encoder.open(); fixup_discarded_surfaces( pending_discard_init_fixups.into_iter(), transit, - &texture_guard, - &mut cmd_buf.trackers.textures, - &device_guard[cmd_buf.device_id.value], + &mut tracker.textures, + &cmd_buf.device, ); - cmd_buf + cmd_buf_data .pending_query_resets .reset_queries( transit, &query_set_guard, - cmd_buf.device_id.value.0.backend(), + cmd_buf.device.info.id().backend(), ) .map_err(RenderCommandError::InvalidQuerySet) .map_pass_err(PassErrorScope::QueryReset)?; - super::CommandBuffer::insert_barriers_from_scope( - transit, - &mut cmd_buf.trackers, - &scope, - &*buffer_guard, - &*texture_guard, - ); + super::CommandBuffer::insert_barriers_from_scope(transit, tracker, &scope); } - cmd_buf.status = CommandEncoderStatus::Recording; - cmd_buf.encoder.close_and_swap(); + *status = CommandEncoderStatus::Recording; + encoder.close_and_swap(); Ok(()) } diff --git a/third_party/rust/wgpu-core/src/command/transfer.rs b/third_party/rust/wgpu-core/src/command/transfer.rs index 86c52d11ee0f9..dbb5be98de35a 100644 --- a/third_party/rust/wgpu-core/src/command/transfer.rs +++ b/third_party/rust/wgpu-core/src/command/transfer.rs @@ -7,16 +7,15 @@ use crate::{ error::{ErrorFormatter, PrettyError}, global::Global, hal_api::HalApi, - hub::Token, - id::{BufferId, CommandEncoderId, DeviceId, TextureId, Valid}, + id::{BufferId, CommandEncoderId, DeviceId, TextureId}, identity::GlobalIdentityHandlerFactory, init_tracker::{ has_copy_partial_init_tracker_coverage, MemoryInitKind, TextureInitRange, TextureInitTrackerAction, }, - resource::{Texture, TextureErrorDimension}, + resource::{Resource, Texture, TextureErrorDimension}, storage::Storage, - track::TextureSelector, + track::{TextureSelector, Tracker}, }; use arrayvec::ArrayVec; @@ -26,6 +25,8 @@ use wgt::{BufferAddress, BufferUsages, Extent3d, TextureUsages}; use std::iter; +use super::{memory_init::CommandBufferTextureMemoryActions, CommandEncoder}; + pub type ImageCopyBuffer = wgt::ImageCopyBuffer; pub type ImageCopyTexture = wgt::ImageCopyTexture; pub type ImageCopyTextureTagged = wgt::ImageCopyTextureTagged; @@ -185,7 +186,7 @@ pub enum CopyError { Transfer(#[from] TransferError), } -pub(crate) fn extract_texture_selector( +pub(crate) fn extract_texture_selector( copy_texture: &ImageCopyTexture, copy_size: &Extent3d, texture: &Texture, @@ -439,14 +440,17 @@ pub(crate) fn validate_texture_copy_range( fn handle_texture_init( init_kind: MemoryInitKind, - cmd_buf: &mut CommandBuffer, + encoder: &mut CommandEncoder, + trackers: &mut Tracker, + texture_memory_actions: &mut CommandBufferTextureMemoryActions, device: &Device, copy_texture: &ImageCopyTexture, copy_size: &Extent3d, texture_guard: &Storage, TextureId>, ) { + let texture = texture_guard.get(copy_texture.texture).unwrap(); let init_action = TextureInitTrackerAction { - id: copy_texture.texture, + texture: texture.clone(), range: TextureInitRange { mip_range: copy_texture.mip_level..copy_texture.mip_level + 1, layer_range: copy_texture.origin.z @@ -456,25 +460,22 @@ fn handle_texture_init( }; // Register the init action. - let immediate_inits = cmd_buf - .texture_memory_actions - .register_init_action(&{ init_action }, texture_guard); + let immediate_inits = texture_memory_actions.register_init_action(&{ init_action }); // In rare cases we may need to insert an init operation immediately onto the command buffer. if !immediate_inits.is_empty() { - let cmd_buf_raw = cmd_buf.encoder.open(); + let cmd_buf_raw = encoder.open(); for init in immediate_inits { clear_texture( - texture_guard, - Valid(init.texture), + &init.texture, TextureInitRange { mip_range: init.mip_level..(init.mip_level + 1), layer_range: init.layer..(init.layer + 1), }, cmd_buf_raw, - &mut cmd_buf.trackers.textures, + &mut trackers.textures, &device.alignments, - &device.zero_buffer, + device.zero_buffer.as_ref().unwrap(), ) .unwrap(); } @@ -486,7 +487,9 @@ fn handle_texture_init( /// Ensure the source texture of a transfer is in the right initialization /// state, and record the state for after the transfer operation. fn handle_src_texture_init( - cmd_buf: &mut CommandBuffer, + encoder: &mut CommandEncoder, + trackers: &mut Tracker, + texture_memory_actions: &mut CommandBufferTextureMemoryActions, device: &Device, source: &ImageCopyTexture, copy_size: &Extent3d, @@ -498,7 +501,9 @@ fn handle_src_texture_init( handle_texture_init( MemoryInitKind::NeedsInitializedMemory, - cmd_buf, + encoder, + trackers, + texture_memory_actions, device, source, copy_size, @@ -512,7 +517,9 @@ fn handle_src_texture_init( /// Ensure the destination texture of a transfer is in the right initialization /// state, and record the state for after the transfer operation. fn handle_dst_texture_init( - cmd_buf: &mut CommandBuffer, + encoder: &mut CommandEncoder, + trackers: &mut Tracker, + texture_memory_actions: &mut CommandBufferTextureMemoryActions, device: &Device, destination: &ImageCopyTexture, copy_size: &Extent3d, @@ -538,7 +545,9 @@ fn handle_dst_texture_init( handle_texture_init( dst_init_kind, - cmd_buf, + encoder, + trackers, + texture_memory_actions, device, destination, copy_size, @@ -563,20 +572,18 @@ impl Global { return Err(TransferError::SameSourceDestinationBuffer.into()); } let hub = A::hub(self); - let mut token = Token::root(); - let (device_guard, mut token) = hub.devices.read(&mut token); - let (mut cmd_buf_guard, mut token) = hub.command_buffers.write(&mut token); - let cmd_buf = CommandBuffer::get_encoder_mut(&mut *cmd_buf_guard, command_encoder_id)?; - let (buffer_guard, _) = hub.buffers.read(&mut token); + let cmd_buf = CommandBuffer::get_encoder(hub, command_encoder_id)?; + let mut cmd_buf_data = cmd_buf.data.lock(); + let cmd_buf_data = cmd_buf_data.as_mut().unwrap(); - let device = &device_guard[cmd_buf.device_id.value]; + let device = &cmd_buf.device; if !device.is_valid() { - return Err(TransferError::InvalidDevice(cmd_buf.device_id.value.0).into()); + return Err(TransferError::InvalidDevice(cmd_buf.device.as_info().id()).into()); } #[cfg(feature = "trace")] - if let Some(ref mut list) = cmd_buf.commands { + if let Some(ref mut list) = cmd_buf_data.commands { list.push(TraceCommand::CopyBufferToBuffer { src: source, src_offset: source_offset, @@ -586,11 +593,17 @@ impl Global { }); } - let (src_buffer, src_pending) = cmd_buf - .trackers - .buffers - .set_single(&*buffer_guard, source, hal::BufferUses::COPY_SRC) - .ok_or(TransferError::InvalidBuffer(source))?; + let (src_buffer, src_pending) = { + let buffer_guard = hub.buffers.read(); + let src_buffer = buffer_guard + .get(source) + .map_err(|_| TransferError::InvalidBuffer(source))?; + cmd_buf_data + .trackers + .buffers + .set_single(src_buffer, hal::BufferUses::COPY_SRC) + .ok_or(TransferError::InvalidBuffer(source))? + }; let src_raw = src_buffer .raw .as_ref() @@ -599,13 +612,19 @@ impl Global { return Err(TransferError::MissingCopySrcUsageFlag.into()); } // expecting only a single barrier - let src_barrier = src_pending.map(|pending| pending.into_hal(src_buffer)); - - let (dst_buffer, dst_pending) = cmd_buf - .trackers - .buffers - .set_single(&*buffer_guard, destination, hal::BufferUses::COPY_DST) - .ok_or(TransferError::InvalidBuffer(destination))?; + let src_barrier = src_pending.map(|pending| pending.into_hal(&src_buffer)); + + let (dst_buffer, dst_pending) = { + let buffer_guard = hub.buffers.read(); + let dst_buffer = buffer_guard + .get(destination) + .map_err(|_| TransferError::InvalidBuffer(destination))?; + cmd_buf_data + .trackers + .buffers + .set_single(dst_buffer, hal::BufferUses::COPY_DST) + .ok_or(TransferError::InvalidBuffer(destination))? + }; let dst_raw = dst_buffer .raw .as_ref() @@ -613,7 +632,7 @@ impl Global { if !dst_buffer.usage.contains(BufferUsages::COPY_DST) { return Err(TransferError::MissingCopyDstUsageFlag(Some(destination), None).into()); } - let dst_barrier = dst_pending.map(|pending| pending.into_hal(dst_buffer)); + let dst_barrier = dst_pending.map(|pending| pending.into_hal(&dst_buffer)); if size % wgt::COPY_BUFFER_ALIGNMENT != 0 { return Err(TransferError::UnalignedCopySize(size).into()); @@ -672,27 +691,27 @@ impl Global { } // Make sure source is initialized memory and mark dest as initialized. - cmd_buf - .buffer_memory_init_actions - .extend(dst_buffer.initialization_status.create_action( - destination, + cmd_buf_data.buffer_memory_init_actions.extend( + dst_buffer.initialization_status.read().create_action( + &dst_buffer, destination_offset..(destination_offset + size), MemoryInitKind::ImplicitlyInitialized, - )); - cmd_buf - .buffer_memory_init_actions - .extend(src_buffer.initialization_status.create_action( - source, + ), + ); + cmd_buf_data.buffer_memory_init_actions.extend( + src_buffer.initialization_status.read().create_action( + &src_buffer, source_offset..(source_offset + size), MemoryInitKind::NeedsInitializedMemory, - )); + ), + ); let region = hal::BufferCopy { src_offset: source_offset, dst_offset: destination_offset, size: wgt::BufferSize::new(size).unwrap(), }; - let cmd_buf_raw = cmd_buf.encoder.open(); + let cmd_buf_raw = cmd_buf_data.encoder.open(); unsafe { cmd_buf_raw.transition_buffers(src_barrier.into_iter().chain(dst_barrier)); cmd_buf_raw.copy_buffer_to_buffer(src_raw, dst_raw, iter::once(region)); @@ -710,21 +729,18 @@ impl Global { profiling::scope!("CommandEncoder::copy_buffer_to_texture"); let hub = A::hub(self); - let mut token = Token::root(); - - let (device_guard, mut token) = hub.devices.read(&mut token); - let (mut cmd_buf_guard, mut token) = hub.command_buffers.write(&mut token); - let cmd_buf = CommandBuffer::get_encoder_mut(&mut *cmd_buf_guard, command_encoder_id)?; - let (buffer_guard, mut token) = hub.buffers.read(&mut token); - let (texture_guard, _) = hub.textures.read(&mut token); - let device = &device_guard[cmd_buf.device_id.value]; + let cmd_buf = CommandBuffer::get_encoder(hub, command_encoder_id)?; + let device = &cmd_buf.device; if !device.is_valid() { - return Err(TransferError::InvalidDevice(cmd_buf.device_id.value.0).into()); + return Err(TransferError::InvalidDevice(cmd_buf.device.as_info().id()).into()); } + let mut cmd_buf_data = cmd_buf.data.lock(); + let cmd_buf_data = cmd_buf_data.as_mut().unwrap(); + #[cfg(feature = "trace")] - if let Some(ref mut list) = cmd_buf.commands { + if let Some(ref mut list) = cmd_buf_data.commands { list.push(TraceCommand::CopyBufferToTexture { src: *source, dst: *destination, @@ -732,6 +748,13 @@ impl Global { }); } + let encoder = &mut cmd_buf_data.encoder; + let tracker = &mut cmd_buf_data.trackers; + let buffer_memory_init_actions = &mut cmd_buf_data.buffer_memory_init_actions; + let texture_memory_actions = &mut cmd_buf_data.texture_memory_actions; + + let texture_guard = hub.textures.read(); + if copy_size.width == 0 || copy_size.height == 0 || copy_size.depth_or_array_layers == 0 { log::trace!("Ignoring copy_buffer_to_texture of size 0"); return Ok(()); @@ -753,13 +776,26 @@ impl Global { // Handle texture init *before* dealing with barrier transitions so we // have an easier time inserting "immediate-inits" that may be required // by prior discards in rare cases. - handle_dst_texture_init(cmd_buf, device, destination, copy_size, &texture_guard)?; + handle_dst_texture_init( + encoder, + tracker, + texture_memory_actions, + device, + destination, + copy_size, + &texture_guard, + )?; - let (src_buffer, src_pending) = cmd_buf - .trackers - .buffers - .set_single(&*buffer_guard, source.buffer, hal::BufferUses::COPY_SRC) - .ok_or(TransferError::InvalidBuffer(source.buffer))?; + let (src_buffer, src_pending) = { + let buffer_guard = hub.buffers.read(); + let src_buffer = buffer_guard + .get(source.buffer) + .map_err(|_| TransferError::InvalidBuffer(source.buffer))?; + tracker + .buffers + .set_single(src_buffer, hal::BufferUses::COPY_SRC) + .ok_or(TransferError::InvalidBuffer(source.buffer))? + }; let src_raw = src_buffer .raw .as_ref() @@ -767,20 +803,16 @@ impl Global { if !src_buffer.usage.contains(BufferUsages::COPY_SRC) { return Err(TransferError::MissingCopySrcUsageFlag.into()); } - let src_barrier = src_pending.map(|pending| pending.into_hal(src_buffer)); + let src_barrier = src_pending.map(|pending| pending.into_hal(&src_buffer)); - let dst_pending = cmd_buf - .trackers + let dst_pending = tracker .textures - .set_single( - dst_texture, - destination.texture, - dst_range, - hal::TextureUses::COPY_DST, - ) + .set_single(dst_texture, dst_range, hal::TextureUses::COPY_DST) .ok_or(TransferError::InvalidTexture(destination.texture))?; - let dst_raw = dst_texture - .inner + let dst_inner = dst_texture.inner(); + let dst_raw = dst_inner + .as_ref() + .unwrap() .as_raw() .ok_or(TransferError::InvalidTexture(destination.texture))?; if !dst_texture.desc.usage.contains(TextureUsages::COPY_DST) { @@ -788,7 +820,7 @@ impl Global { TransferError::MissingCopyDstUsageFlag(None, Some(destination.texture)).into(), ); } - let dst_barrier = dst_pending.map(|pending| pending.into_hal(dst_texture)); + let dst_barrier = dst_pending.map(|pending| pending.into_hal(dst_inner.as_ref().unwrap())); if !dst_base.aspect.is_one() { return Err(TransferError::CopyAspectNotOne.into()); @@ -818,13 +850,11 @@ impl Global { .map_err(TransferError::from)?; } - cmd_buf - .buffer_memory_init_actions - .extend(src_buffer.initialization_status.create_action( - source.buffer, - source.layout.offset..(source.layout.offset + required_buffer_bytes_in_copy), - MemoryInitKind::NeedsInitializedMemory, - )); + buffer_memory_init_actions.extend(src_buffer.initialization_status.read().create_action( + &src_buffer, + source.layout.offset..(source.layout.offset + required_buffer_bytes_in_copy), + MemoryInitKind::NeedsInitializedMemory, + )); let regions = (0..array_layer_count).map(|rel_array_layer| { let mut texture_base = dst_base.clone(); @@ -838,7 +868,7 @@ impl Global { } }); - let cmd_buf_raw = cmd_buf.encoder.open(); + let cmd_buf_raw = encoder.open(); unsafe { cmd_buf_raw.transition_textures(dst_barrier.into_iter()); cmd_buf_raw.transition_buffers(src_barrier.into_iter()); @@ -857,27 +887,30 @@ impl Global { profiling::scope!("CommandEncoder::copy_texture_to_buffer"); let hub = A::hub(self); - let mut token = Token::root(); - - let (device_guard, mut token) = hub.devices.read(&mut token); - let (mut cmd_buf_guard, mut token) = hub.command_buffers.write(&mut token); - let cmd_buf = CommandBuffer::get_encoder_mut(&mut *cmd_buf_guard, command_encoder_id)?; - let (buffer_guard, mut token) = hub.buffers.read(&mut token); - let (texture_guard, _) = hub.textures.read(&mut token); - let device = &device_guard[cmd_buf.device_id.value]; + let cmd_buf = CommandBuffer::get_encoder(hub, command_encoder_id)?; + let device = &cmd_buf.device; if !device.is_valid() { - return Err(TransferError::InvalidDevice(cmd_buf.device_id.value.0).into()); + return Err(TransferError::InvalidDevice(cmd_buf.device.as_info().id()).into()); } + let mut cmd_buf_data = cmd_buf.data.lock(); + let cmd_buf_data = cmd_buf_data.as_mut().unwrap(); + #[cfg(feature = "trace")] - if let Some(ref mut list) = cmd_buf.commands { + if let Some(ref mut list) = cmd_buf_data.commands { list.push(TraceCommand::CopyTextureToBuffer { src: *source, dst: *destination, size: *copy_size, }); } + let encoder = &mut cmd_buf_data.encoder; + let tracker = &mut cmd_buf_data.trackers; + let buffer_memory_init_actions = &mut cmd_buf_data.buffer_memory_init_actions; + let texture_memory_actions = &mut cmd_buf_data.texture_memory_actions; + + let texture_guard = hub.textures.read(); if copy_size.width == 0 || copy_size.height == 0 || copy_size.depth_or_array_layers == 0 { log::trace!("Ignoring copy_texture_to_buffer of size 0"); @@ -896,20 +929,24 @@ impl Global { // Handle texture init *before* dealing with barrier transitions so we // have an easier time inserting "immediate-inits" that may be required // by prior discards in rare cases. - handle_src_texture_init(cmd_buf, device, source, copy_size, &texture_guard)?; + handle_src_texture_init( + encoder, + tracker, + texture_memory_actions, + device, + source, + copy_size, + &texture_guard, + )?; - let src_pending = cmd_buf - .trackers + let src_pending = tracker .textures - .set_single( - src_texture, - source.texture, - src_range, - hal::TextureUses::COPY_SRC, - ) + .set_single(src_texture, src_range, hal::TextureUses::COPY_SRC) .ok_or(TransferError::InvalidTexture(source.texture))?; - let src_raw = src_texture - .inner + let src_inner = src_texture.inner(); + let src_raw = src_inner + .as_ref() + .unwrap() .as_raw() .ok_or(TransferError::InvalidTexture(source.texture))?; if !src_texture.desc.usage.contains(TextureUsages::COPY_SRC) { @@ -928,17 +965,18 @@ impl Global { } .into()); } - let src_barrier = src_pending.map(|pending| pending.into_hal(src_texture)); - - let (dst_buffer, dst_pending) = cmd_buf - .trackers - .buffers - .set_single( - &*buffer_guard, - destination.buffer, - hal::BufferUses::COPY_DST, - ) - .ok_or(TransferError::InvalidBuffer(destination.buffer))?; + let src_barrier = src_pending.map(|pending| pending.into_hal(src_inner.as_ref().unwrap())); + + let (dst_buffer, dst_pending) = { + let buffer_guard = hub.buffers.read(); + let dst_buffer = buffer_guard + .get(destination.buffer) + .map_err(|_| TransferError::InvalidBuffer(destination.buffer))?; + tracker + .buffers + .set_single(dst_buffer, hal::BufferUses::COPY_DST) + .ok_or(TransferError::InvalidBuffer(destination.buffer))? + }; let dst_raw = dst_buffer .raw .as_ref() @@ -948,7 +986,7 @@ impl Global { TransferError::MissingCopyDstUsageFlag(Some(destination.buffer), None).into(), ); } - let dst_barrier = dst_pending.map(|pending| pending.into_hal(dst_buffer)); + let dst_barrier = dst_pending.map(|pending| pending.into_hal(&dst_buffer)); if !src_base.aspect.is_one() { return Err(TransferError::CopyAspectNotOne.into()); @@ -978,14 +1016,11 @@ impl Global { .map_err(TransferError::from)?; } - cmd_buf - .buffer_memory_init_actions - .extend(dst_buffer.initialization_status.create_action( - destination.buffer, - destination.layout.offset - ..(destination.layout.offset + required_buffer_bytes_in_copy), - MemoryInitKind::ImplicitlyInitialized, - )); + buffer_memory_init_actions.extend(dst_buffer.initialization_status.read().create_action( + &dst_buffer, + destination.layout.offset..(destination.layout.offset + required_buffer_bytes_in_copy), + MemoryInitKind::ImplicitlyInitialized, + )); let regions = (0..array_layer_count).map(|rel_array_layer| { let mut texture_base = src_base.clone(); @@ -998,7 +1033,7 @@ impl Global { size: hal_copy_size, } }); - let cmd_buf_raw = cmd_buf.encoder.open(); + let cmd_buf_raw = encoder.open(); unsafe { cmd_buf_raw.transition_buffers(dst_barrier.into_iter()); cmd_buf_raw.transition_textures(src_barrier.into_iter()); @@ -1022,27 +1057,29 @@ impl Global { profiling::scope!("CommandEncoder::copy_texture_to_texture"); let hub = A::hub(self); - let mut token = Token::root(); - - let (device_guard, mut token) = hub.devices.read(&mut token); - let (mut cmd_buf_guard, mut token) = hub.command_buffers.write(&mut token); - let cmd_buf = CommandBuffer::get_encoder_mut(&mut *cmd_buf_guard, command_encoder_id)?; - let (_, mut token) = hub.buffers.read(&mut token); // skip token - let (texture_guard, _) = hub.textures.read(&mut token); - let device = &device_guard[cmd_buf.device_id.value]; + let cmd_buf = CommandBuffer::get_encoder(hub, command_encoder_id)?; + let device = &cmd_buf.device; if !device.is_valid() { - return Err(TransferError::InvalidDevice(cmd_buf.device_id.value.0).into()); + return Err(TransferError::InvalidDevice(cmd_buf.device.as_info().id()).into()); } + let mut cmd_buf_data = cmd_buf.data.lock(); + let cmd_buf_data = cmd_buf_data.as_mut().unwrap(); + #[cfg(feature = "trace")] - if let Some(ref mut list) = cmd_buf.commands { + if let Some(ref mut list) = cmd_buf_data.commands { list.push(TraceCommand::CopyTextureToTexture { src: *source, dst: *destination, size: *copy_size, }); } + let encoder = &mut cmd_buf_data.encoder; + let tracker = &mut cmd_buf_data.trackers; + let texture_memory_actions = &mut cmd_buf_data.texture_memory_actions; + + let texture_guard = hub.textures.read(); if copy_size.width == 0 || copy_size.height == 0 || copy_size.depth_or_array_layers == 0 { log::trace!("Ignoring copy_texture_to_texture of size 0"); @@ -1052,9 +1089,11 @@ impl Global { let src_texture = texture_guard .get(source.texture) .map_err(|_| TransferError::InvalidTexture(source.texture))?; + let src_inner = src_texture.inner(); let dst_texture = texture_guard .get(destination.texture) .map_err(|_| TransferError::InvalidTexture(source.texture))?; + let dst_inner = dst_texture.inner(); // src and dst texture format must be copy-compatible // https://gpuweb.github.io/gpuweb/#copy-compatible @@ -1092,21 +1131,33 @@ impl Global { // Handle texture init *before* dealing with barrier transitions so we // have an easier time inserting "immediate-inits" that may be required // by prior discards in rare cases. - handle_src_texture_init(cmd_buf, device, source, copy_size, &texture_guard)?; - handle_dst_texture_init(cmd_buf, device, destination, copy_size, &texture_guard)?; + handle_src_texture_init( + encoder, + tracker, + texture_memory_actions, + device, + source, + copy_size, + &texture_guard, + )?; + handle_dst_texture_init( + encoder, + tracker, + texture_memory_actions, + device, + destination, + copy_size, + &texture_guard, + )?; - let src_pending = cmd_buf + let src_pending = cmd_buf_data .trackers .textures - .set_single( - src_texture, - source.texture, - src_range, - hal::TextureUses::COPY_SRC, - ) + .set_single(src_texture, src_range, hal::TextureUses::COPY_SRC) .ok_or(TransferError::InvalidTexture(source.texture))?; - let src_raw = src_texture - .inner + let src_raw = src_inner + .as_ref() + .unwrap() .as_raw() .ok_or(TransferError::InvalidTexture(source.texture))?; if !src_texture.desc.usage.contains(TextureUsages::COPY_SRC) { @@ -1116,21 +1167,17 @@ impl Global { //TODO: try to avoid this the collection. It's needed because both // `src_pending` and `dst_pending` try to hold `trackers.textures` mutably. let mut barriers: ArrayVec<_, 2> = src_pending - .map(|pending| pending.into_hal(src_texture)) + .map(|pending| pending.into_hal(src_inner.as_ref().unwrap())) .collect(); - let dst_pending = cmd_buf + let dst_pending = cmd_buf_data .trackers .textures - .set_single( - dst_texture, - destination.texture, - dst_range, - hal::TextureUses::COPY_DST, - ) + .set_single(dst_texture, dst_range, hal::TextureUses::COPY_DST) .ok_or(TransferError::InvalidTexture(destination.texture))?; - let dst_raw = dst_texture - .inner + let dst_raw = dst_inner + .as_ref() + .unwrap() .as_raw() .ok_or(TransferError::InvalidTexture(destination.texture))?; if !dst_texture.desc.usage.contains(TextureUsages::COPY_DST) { @@ -1139,7 +1186,7 @@ impl Global { ); } - barriers.extend(dst_pending.map(|pending| pending.into_hal(dst_texture))); + barriers.extend(dst_pending.map(|pending| pending.into_hal(dst_inner.as_ref().unwrap()))); let hal_copy_size = hal::CopyExtent { width: src_copy_size.width.min(dst_copy_size.width), @@ -1157,7 +1204,7 @@ impl Global { size: hal_copy_size, } }); - let cmd_buf_raw = cmd_buf.encoder.open(); + let cmd_buf_raw = cmd_buf_data.encoder.open(); unsafe { cmd_buf_raw.transition_textures(barriers.into_iter()); cmd_buf_raw.copy_texture_to_texture( @@ -1167,6 +1214,7 @@ impl Global { regions, ); } + Ok(()) } } diff --git a/third_party/rust/wgpu-core/src/conv.rs b/third_party/rust/wgpu-core/src/conv.rs index 90629f08d6359..0b67ad3cbeb25 100644 --- a/third_party/rust/wgpu-core/src/conv.rs +++ b/third_party/rust/wgpu-core/src/conv.rs @@ -1,4 +1,6 @@ -use crate::resource; +use wgt::TextureFormatFeatures; + +use crate::resource::{self, TextureDescriptor}; pub fn is_power_of_two_u16(val: u16) -> bool { val != 0 && (val & (val - 1)) == 0 @@ -135,6 +137,32 @@ pub fn map_texture_usage( u } +pub fn map_texture_usage_for_texture( + desc: &TextureDescriptor, + format_features: &TextureFormatFeatures, +) -> hal::TextureUses { + // Enforce having COPY_DST/DEPTH_STENCIL_WRITE/COLOR_TARGET otherwise we + // wouldn't be able to initialize the texture. + map_texture_usage(desc.usage, desc.format.into()) + | if desc.format.is_depth_stencil_format() { + hal::TextureUses::DEPTH_STENCIL_WRITE + } else if desc.usage.contains(wgt::TextureUsages::COPY_DST) { + hal::TextureUses::COPY_DST // (set already) + } else { + // Use COPY_DST only if we can't use COLOR_TARGET + if format_features + .allowed_usages + .contains(wgt::TextureUsages::RENDER_ATTACHMENT) + && desc.dimension == wgt::TextureDimension::D2 + // Render targets dimension must be 2d + { + hal::TextureUses::COLOR_TARGET + } else { + hal::TextureUses::COPY_DST + } + } +} + pub fn map_texture_usage_from_hal(uses: hal::TextureUses) -> wgt::TextureUsages { let mut u = wgt::TextureUsages::empty(); u.set( diff --git a/third_party/rust/wgpu-core/src/device/any_device.rs b/third_party/rust/wgpu-core/src/device/any_device.rs new file mode 100644 index 0000000000000..ab13b1421f67d --- /dev/null +++ b/third_party/rust/wgpu-core/src/device/any_device.rs @@ -0,0 +1,88 @@ +use super::Device; +/// The `AnyDevice` type: a pointer to a `Device` for any backend `A`. +use crate::hal_api::HalApi; + +use std::any::Any; +use std::fmt; +use std::sync::Arc; + +/// A pointer to a `Device`, for any backend `A`. +/// +/// Any `AnyDevice` is just like an `Arc>`, except that the +/// `A` type parameter is erased. To access the `Device`, you must +/// downcast to a particular backend with the \[`downcast_ref`\] or +/// \[`downcast_clone`\] methods. +pub struct AnyDevice(Arc); + +impl AnyDevice { + /// Return an `AnyDevice` that holds an owning `Arc` pointer to `device`. + pub fn new(device: Arc>) -> AnyDevice { + AnyDevice(device) + } + + /// If `self` is an `Arc>`, return a reference to the + /// device. + pub fn downcast_ref(&self) -> Option<&Device> { + self.0.downcast_ref::>() + } + + /// If `self` is an `Arc>`, return a clone of that. + pub fn downcast_clone(&self) -> Option>> { + // `Arc::downcast` returns `Arc`, but requires that `T` be `Sync` and + // `Send`, and this is not the case for `Device` in wasm builds. + // + // But as far as I can see, `Arc::downcast` has no particular reason to + // require that `T` be `Sync` and `Send`; the steps used here are sound. + if (self.0).is::>() { + // Get an owned Arc. + let clone = self.0.clone(); + // Turn the `Arc`, which is a pointer to an `ArcInner` struct, into + // a pointer to the `ArcInner`'s `data` field. Carry along the + // vtable from the original `Arc`. + let raw_erased: *const (dyn Any + 'static) = Arc::into_raw(clone); + // Remove the vtable, and supply the concrete type of the `data`. + let raw_typed: *const Device = raw_erased.cast::>(); + // Convert the pointer to the `data` field back into a pointer to + // the `ArcInner`, and restore reference-counting behavior. + let arc_typed: Arc> = unsafe { + // Safety: + // - We checked that the `dyn Any` was indeed a `Device` above. + // - We're calling `Arc::from_raw` on the same pointer returned + // by `Arc::into_raw`, except that we stripped off the vtable + // pointer. + // - The pointer must still be live, because we've borrowed `self`, + // which holds another reference to it. + // - The format of a `ArcInner` must be the same as + // that of an `ArcInner>`, or else `AnyDevice::new` + // wouldn't be possible. + Arc::from_raw(raw_typed) + }; + Some(arc_typed) + } else { + None + } + } +} + +impl fmt::Debug for AnyDevice { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str("AnyDevice") + } +} + +#[cfg(any( + not(target_arch = "wasm32"), + all( + feature = "fragile-send-sync-non-atomic-wasm", + not(target_feature = "atomics") + ) +))] +unsafe impl Send for AnyDevice {} +#[cfg(any( + not(target_arch = "wasm32"), + all( + feature = "fragile-send-sync-non-atomic-wasm", + not(target_feature = "atomics") + ) +))] +unsafe impl Sync for AnyDevice {} diff --git a/third_party/rust/wgpu-core/src/device/global.rs b/third_party/rust/wgpu-core/src/device/global.rs index 1d9f48ead03f2..d7cf7fbceabfd 100644 --- a/third_party/rust/wgpu-core/src/device/global.rs +++ b/third_party/rust/wgpu-core/src/device/global.rs @@ -1,36 +1,32 @@ #[cfg(feature = "trace")] use crate::device::trace; use crate::{ - binding_model::{self, BindGroupLayout}, - command, conv, + binding_model, command, conv, device::{ - life::WaitIdleError, map_buffer, queue, Device, DeviceError, DeviceLostClosure, HostMap, + life::WaitIdleError, map_buffer, queue, DeviceError, DeviceLostClosure, HostMap, + IMPLICIT_FAILURE, }, global::Global, hal_api::HalApi, - hub::Token, - id::{self, AdapterId, DeviceId, SurfaceId}, + id::{self, AdapterId, DeviceId, QueueId, SurfaceId}, identity::{GlobalIdentityHandlerFactory, Input}, init_tracker::TextureInitTracker, instance::{self, Adapter, Surface}, pipeline, present, - resource::{self, Buffer, BufferAccessResult, BufferMapState}, - resource::{BufferAccessError, BufferMapOperation, TextureClearMode}, - storage::InvalidId, + resource::{self, BufferAccessResult}, + resource::{BufferAccessError, BufferMapOperation, Resource}, validation::check_buffer_usage, - FastHashMap, Label, LabelHelpers as _, Stored, + FastHashMap, Label, LabelHelpers as _, }; -use hal::{CommandEncoder as _, Device as _}; -use smallvec::SmallVec; +use hal::Device as _; +use parking_lot::RwLock; use wgt::{BufferAddress, TextureFormat}; -use std::{borrow::Cow, iter, mem, ops::Range, ptr}; +use std::{borrow::Cow, iter, ops::Range, ptr, sync::atomic::Ordering}; -use super::{ - BufferMapPendingClosure, ImplicitPipelineIds, InvalidDevice, UserClosures, IMPLICIT_FAILURE, -}; +use super::{ImplicitPipelineIds, InvalidDevice, UserClosures}; impl Global { pub fn adapter_is_surface_supported( @@ -39,10 +35,9 @@ impl Global { surface_id: SurfaceId, ) -> Result { let hub = A::hub(self); - let mut token = Token::root(); - let (surface_guard, mut token) = self.surfaces.read(&mut token); - let (adapter_guard, mut _token) = hub.adapters.read(&mut token); + let surface_guard = self.surfaces.read(); + let adapter_guard = hub.adapters.read(); let adapter = adapter_guard .get(adapter_id) .map_err(|_| instance::IsSurfaceSupportedError::InvalidAdapter)?; @@ -85,10 +80,9 @@ impl Global { get_supported_callback: F, ) -> Result { let hub = A::hub(self); - let mut token = Token::root(); - let (surface_guard, mut token) = self.surfaces.read(&mut token); - let (adapter_guard, mut _token) = hub.adapters.read(&mut token); + let surface_guard = self.surfaces.read(); + let adapter_guard = hub.adapters.read(); let adapter = adapter_guard .get(adapter_id) .map_err(|_| instance::GetSurfaceSupportError::InvalidAdapter)?; @@ -104,10 +98,9 @@ impl Global { device_id: DeviceId, ) -> Result { let hub = A::hub(self); - let mut token = Token::root(); - let (device_guard, _) = hub.devices.read(&mut token); - let device = device_guard.get(device_id).map_err(|_| InvalidDevice)?; - if !device.valid { + + let device = hub.devices.get(device_id).map_err(|_| InvalidDevice)?; + if !device.is_valid() { return Err(InvalidDevice); } @@ -119,10 +112,9 @@ impl Global { device_id: DeviceId, ) -> Result { let hub = A::hub(self); - let mut token = Token::root(); - let (device_guard, _) = hub.devices.read(&mut token); - let device = device_guard.get(device_id).map_err(|_| InvalidDevice)?; - if !device.valid { + + let device = hub.devices.get(device_id).map_err(|_| InvalidDevice)?; + if !device.is_valid() { return Err(InvalidDevice); } @@ -134,10 +126,9 @@ impl Global { device_id: DeviceId, ) -> Result { let hub = A::hub(self); - let mut token = Token::root(); - let (device_guard, _) = hub.devices.read(&mut token); - let device = device_guard.get(device_id).map_err(|_| InvalidDevice)?; - if !device.valid { + + let device = hub.devices.get(device_id).map_err(|_| InvalidDevice)?; + if !device.is_valid() { return Err(InvalidDevice); } @@ -153,63 +144,71 @@ impl Global { profiling::scope!("Device::create_buffer"); let hub = A::hub(self); - let mut token = Token::root(); - let fid = hub.buffers.prepare(id_in); + let fid = hub.buffers.prepare::(id_in); - let (device_guard, mut token) = hub.devices.read(&mut token); let error = loop { - let device = match device_guard.get(device_id) { + let device = match hub.devices.get(device_id) { Ok(device) => device, - Err(_) => break DeviceError::Invalid.into(), + Err(_) => { + break DeviceError::Invalid.into(); + } }; - if !device.valid { + if !device.is_valid() { break DeviceError::Lost.into(); } if desc.usage.is_empty() { // Per spec, `usage` must not be zero. - break resource::CreateBufferError::InvalidUsage(desc.usage); + let id = fid.assign_error(desc.label.borrow_or_default()); + return ( + id, + Some(resource::CreateBufferError::InvalidUsage(desc.usage)), + ); } #[cfg(feature = "trace")] - if let Some(ref trace) = device.trace { + if let Some(ref mut trace) = *device.trace.lock() { let mut desc = desc.clone(); - let mapped_at_creation = mem::replace(&mut desc.mapped_at_creation, false); + let mapped_at_creation = std::mem::replace(&mut desc.mapped_at_creation, false); if mapped_at_creation && !desc.usage.contains(wgt::BufferUsages::MAP_WRITE) { desc.usage |= wgt::BufferUsages::COPY_DST; } - trace - .lock() - .add(trace::Action::CreateBuffer(fid.id(), desc)); + trace.add(trace::Action::CreateBuffer(fid.id(), desc)); } - let mut buffer = match device.create_buffer(device_id, desc, false) { + let buffer = match device.create_buffer(desc, false) { Ok(buffer) => buffer, - Err(e) => break e, + Err(e) => { + let id = fid.assign_error(desc.label.borrow_or_default()); + return (id, Some(e)); + } }; - let ref_count = buffer.life_guard.add_ref(); + + let (id, resource) = fid.assign(buffer); + log::info!("Created Buffer {:?} with {:?}", id, desc); let buffer_use = if !desc.mapped_at_creation { hal::BufferUses::empty() } else if desc.usage.contains(wgt::BufferUsages::MAP_WRITE) { // buffer is mappable, so we are just doing that at start - let map_size = buffer.size; + let map_size = resource.size; let ptr = if map_size == 0 { std::ptr::NonNull::dangling() } else { - match map_buffer(&device.raw, &mut buffer, 0, map_size, HostMap::Write) { + match map_buffer(device.raw(), &resource, 0, map_size, HostMap::Write) { Ok(ptr) => ptr, Err(e) => { - let raw = buffer.raw.unwrap(); - device.lock_life(&mut token).schedule_resource_destruction( - queue::TempResource::Buffer(raw), + device.lock_life().schedule_resource_destruction( + queue::TempResource::Buffer(resource), !0, ); - break e.into(); + hub.buffers + .force_replace_with_error(id, desc.label.borrow_or_default()); + return (id, Some(e.into())); } } }; - buffer.map_state = resource::BufferMapState::Active { + *resource.map_state.lock() = resource::BufferMapState::Active { ptr, range: 0..map_size, host: HostMap::Write, @@ -225,60 +224,65 @@ impl Global { usage: wgt::BufferUsages::MAP_WRITE | wgt::BufferUsages::COPY_SRC, mapped_at_creation: false, }; - let mut stage = match device.create_buffer(device_id, &stage_desc, true) { + let stage = match device.create_buffer(&stage_desc, true) { Ok(stage) => stage, Err(e) => { - let raw = buffer.raw.unwrap(); - device - .lock_life(&mut token) - .schedule_resource_destruction(queue::TempResource::Buffer(raw), !0); - break e; + device.lock_life().schedule_resource_destruction( + queue::TempResource::Buffer(resource), + !0, + ); + hub.buffers + .force_replace_with_error(id, desc.label.borrow_or_default()); + return (id, Some(e)); } }; - let stage_buffer = stage.raw.unwrap(); - let mapping = match unsafe { device.raw.map_buffer(&stage_buffer, 0..stage.size) } { + let stage_fid = hub.buffers.request(); + let stage = stage_fid.init(stage); + + let mapping = match unsafe { device.raw().map_buffer(stage.raw(), 0..stage.size) } { Ok(mapping) => mapping, Err(e) => { - let raw = buffer.raw.unwrap(); - let mut life_lock = device.lock_life(&mut token); - life_lock - .schedule_resource_destruction(queue::TempResource::Buffer(raw), !0); + let mut life_lock = device.lock_life(); life_lock.schedule_resource_destruction( - queue::TempResource::Buffer(stage_buffer), + queue::TempResource::Buffer(resource), !0, ); - break DeviceError::from(e).into(); + life_lock + .schedule_resource_destruction(queue::TempResource::Buffer(stage), !0); + hub.buffers + .force_replace_with_error(id, desc.label.borrow_or_default()); + return (id, Some(DeviceError::from(e).into())); } }; - assert_eq!(buffer.size % wgt::COPY_BUFFER_ALIGNMENT, 0); + assert_eq!(resource.size % wgt::COPY_BUFFER_ALIGNMENT, 0); // Zero initialize memory and then mark both staging and buffer as initialized // (it's guaranteed that this is the case by the time the buffer is usable) - unsafe { ptr::write_bytes(mapping.ptr.as_ptr(), 0, buffer.size as usize) }; - buffer.initialization_status.drain(0..buffer.size); - stage.initialization_status.drain(0..buffer.size); - - buffer.map_state = resource::BufferMapState::Init { + unsafe { ptr::write_bytes(mapping.ptr.as_ptr(), 0, resource.size as usize) }; + resource + .initialization_status + .write() + .drain(0..resource.size); + stage.initialization_status.write().drain(0..resource.size); + + *resource.map_state.lock() = resource::BufferMapState::Init { ptr: mapping.ptr, needs_flush: !mapping.is_coherent, - stage_buffer, + stage_buffer: stage, }; hal::BufferUses::COPY_DST }; - let id = fid.assign(buffer, &mut token); - log::trace!("Device::create_buffer -> {:?}", id.0); - device .trackers .lock() .buffers - .insert_single(id, ref_count, buffer_use); + .insert_single(id, resource, buffer_use); - return (id.0, None); + return (id, None); }; - let id = fid.assign_error(desc.label.borrow_or_default(), &mut token); + let id = fid.assign_error(desc.label.borrow_or_default()); (id, Some(error)) } @@ -312,10 +316,9 @@ impl Global { /// [`wgpu_types::BufferUsages`]: wgt::BufferUsages pub fn create_buffer_error(&self, id_in: Input, label: Label) { let hub = A::hub(self); - let mut token = Token::root(); - let fid = hub.buffers.prepare(id_in); + let fid = hub.buffers.prepare::(id_in); - fid.assign_error(label.borrow_or_default(), &mut token); + fid.assign_error(label.borrow_or_default()); } pub fn create_render_bundle_error( @@ -324,11 +327,9 @@ impl Global { label: Label, ) { let hub = A::hub(self); - let mut token = Token::root(); - let fid = hub.render_bundles.prepare(id_in); + let fid = hub.render_bundles.prepare::(id_in); - let (_, mut token) = hub.devices.read(&mut token); - fid.assign_error(label.borrow_or_default(), &mut token); + fid.assign_error(label.borrow_or_default()); } /// Assign `id_in` an error with the given `label`. @@ -336,10 +337,9 @@ impl Global { /// See `create_buffer_error` for more context and explaination. pub fn create_texture_error(&self, id_in: Input, label: Label) { let hub = A::hub(self); - let mut token = Token::root(); - let fid = hub.textures.prepare(id_in); + let fid = hub.textures.prepare::(id_in); - fid.assign_error(label.borrow_or_default(), &mut token); + fid.assign_error(label.borrow_or_default()); } #[cfg(feature = "replay")] @@ -349,20 +349,19 @@ impl Global { buffer_id: id::BufferId, ) -> Result<(), WaitIdleError> { let hub = A::hub(self); - let mut token = Token::root(); - let (device_guard, mut token) = hub.devices.read(&mut token); + let last_submission = { - let (buffer_guard, _) = hub.buffers.write(&mut token); + let buffer_guard = hub.buffers.write(); match buffer_guard.get(buffer_id) { - Ok(buffer) => buffer.life_guard.life_count(), + Ok(buffer) => buffer.info.submission_index(), Err(_) => return Ok(()), } }; - device_guard + hub.devices .get(device_id) .map_err(|_| DeviceError::Invalid)? - .wait_for_submit(last_submission, &mut token) + .wait_for_submit(last_submission) } #[doc(hidden)] @@ -376,25 +375,24 @@ impl Global { profiling::scope!("Device::set_buffer_sub_data"); let hub = A::hub(self); - let mut token = Token::root(); - let (device_guard, mut token) = hub.devices.read(&mut token); - let (mut buffer_guard, _) = hub.buffers.write(&mut token); - let device = device_guard + let device = hub + .devices .get(device_id) .map_err(|_| DeviceError::Invalid)?; - if !device.valid { + if !device.is_valid() { return Err(DeviceError::Lost.into()); } - let buffer = buffer_guard - .get_mut(buffer_id) + + let buffer = hub + .buffers + .get(buffer_id) .map_err(|_| BufferAccessError::Invalid)?; check_buffer_usage(buffer.usage, wgt::BufferUsages::MAP_WRITE)?; //assert!(buffer isn't used by the GPU); #[cfg(feature = "trace")] - if let Some(ref trace) = device.trace { - let mut trace = trace.lock(); + if let Some(ref mut trace) = *device.trace.lock() { let data_path = trace.make_binary("bin", data); trace.add(trace::Action::WriteBuffer { id: buffer_id, @@ -404,20 +402,20 @@ impl Global { }); } - let raw_buf = buffer.raw.as_ref().unwrap(); + let raw_buf = buffer.raw(); unsafe { let mapping = device - .raw + .raw() .map_buffer(raw_buf, offset..offset + data.len() as u64) .map_err(DeviceError::from)?; ptr::copy_nonoverlapping(data.as_ptr(), mapping.ptr.as_ptr(), data.len()); if !mapping.is_coherent { device - .raw + .raw() .flush_mapped_ranges(raw_buf, iter::once(offset..offset + data.len() as u64)); } device - .raw + .raw() .unmap_buffer(raw_buf) .map_err(DeviceError::from)?; } @@ -436,37 +434,37 @@ impl Global { profiling::scope!("Device::get_buffer_sub_data"); let hub = A::hub(self); - let mut token = Token::root(); - let (device_guard, mut token) = hub.devices.read(&mut token); - let (mut buffer_guard, _) = hub.buffers.write(&mut token); - let device = device_guard + let device = hub + .devices .get(device_id) .map_err(|_| DeviceError::Invalid)?; - if !device.valid { + if !device.is_valid() { return Err(DeviceError::Lost.into()); } - let buffer = buffer_guard - .get_mut(buffer_id) + + let buffer = hub + .buffers + .get(buffer_id) .map_err(|_| BufferAccessError::Invalid)?; check_buffer_usage(buffer.usage, wgt::BufferUsages::MAP_READ)?; //assert!(buffer isn't used by the GPU); - let raw_buf = buffer.raw.as_ref().unwrap(); + let raw_buf = buffer.raw(); unsafe { let mapping = device - .raw + .raw() .map_buffer(raw_buf, offset..offset + data.len() as u64) .map_err(DeviceError::from)?; if !mapping.is_coherent { - device.raw.invalidate_mapped_ranges( + device.raw().invalidate_mapped_ranges( raw_buf, iter::once(offset..offset + data.len() as u64), ); } ptr::copy_nonoverlapping(mapping.ptr.as_ptr(), data.as_mut_ptr(), data.len()); device - .raw + .raw() .unmap_buffer(raw_buf) .map_err(DeviceError::from)?; } @@ -484,108 +482,53 @@ impl Global { ) -> Result<(), resource::DestroyError> { profiling::scope!("Buffer::destroy"); - let map_closure; - // Restrict the locks to this scope. - { - let hub = A::hub(self); - let mut token = Token::root(); - - //TODO: lock pending writes separately, keep the device read-only - let (mut device_guard, mut token) = hub.devices.write(&mut token); - - log::trace!("Buffer::destroy {buffer_id:?}"); - let (mut buffer_guard, _) = hub.buffers.write(&mut token); - let buffer = buffer_guard - .get_and_mark_destroyed(buffer_id) - .map_err(|_| resource::DestroyError::Invalid)?; - - let device = &mut device_guard[buffer.device_id.value]; - - map_closure = match &buffer.map_state { - &BufferMapState::Waiting(..) // To get the proper callback behavior. - | &BufferMapState::Init { .. } - | &BufferMapState::Active { .. } - => { - self.buffer_unmap_inner(buffer_id, buffer, device) - .unwrap_or(None) - } - _ => None, - }; - - #[cfg(feature = "trace")] - if let Some(ref trace) = device.trace { - trace.lock().add(trace::Action::FreeBuffer(buffer_id)); - } - - let raw = buffer - .raw - .take() - .ok_or(resource::DestroyError::AlreadyDestroyed)?; - let temp = queue::TempResource::Buffer(raw); - - if device.pending_writes.dst_buffers.contains(&buffer_id) { - device.pending_writes.temp_resources.push(temp); - } else { - let last_submit_index = buffer.life_guard.life_count(); - drop(buffer_guard); - device - .lock_life(&mut token) - .schedule_resource_destruction(temp, last_submit_index); - } - } - - // Note: outside the scope where locks are held when calling the callback - if let Some((operation, status)) = map_closure { - operation.callback.call(status); - } + let hub = A::hub(self); - Ok(()) + log::debug!("Buffer {:?} is asked to be dropped", buffer_id); + let mut buffer_guard = hub.buffers.write(); + let buffer = buffer_guard + .get_and_mark_destroyed(buffer_id) + .map_err(|_| resource::DestroyError::Invalid)?; + buffer.destroy() } pub fn buffer_drop(&self, buffer_id: id::BufferId, wait: bool) { profiling::scope!("Buffer::drop"); - log::trace!("Buffer::drop {buffer_id:?}"); + + log::debug!("Buffer {:?} is asked to be dropped", buffer_id); let hub = A::hub(self); - let mut token = Token::root(); - - let (ref_count, last_submit_index, device_id) = { - let (mut buffer_guard, _) = hub.buffers.write(&mut token); - match buffer_guard.get_occupied_or_destroyed_mut(buffer_id) { - Ok(buffer) => { - let ref_count = buffer.life_guard.ref_count.take().unwrap(); - let last_submit_index = buffer.life_guard.life_count(); - (ref_count, last_submit_index, buffer.device_id.value) - } - Err(InvalidId) => { - hub.buffers.unregister_locked(buffer_id, &mut *buffer_guard); - return; - } + + if let Some(buffer) = hub.buffers.unregister(buffer_id) { + if buffer.ref_count() == 1 { + buffer.destroy().ok(); } - }; - let (device_guard, mut token) = hub.devices.read(&mut token); - let device = &device_guard[device_id]; - { - let mut life_lock = device.lock_life(&mut token); - if device.pending_writes.dst_buffers.contains(&buffer_id) { - life_lock.future_suspected_buffers.push(Stored { - value: id::Valid(buffer_id), - ref_count, - }); + let last_submit_index = buffer.info.submission_index(); + + let device = buffer.device.clone(); + + if device + .pending_writes + .lock() + .as_ref() + .unwrap() + .dst_buffers + .contains_key(&buffer_id) + { + device.lock_life().future_suspected_buffers.push(buffer); } else { - drop(ref_count); - life_lock + device + .lock_life() .suspected_resources - .buffers - .push(id::Valid(buffer_id)); + .insert(buffer_id, buffer); } - } - if wait { - match device.wait_for_submit(last_submit_index, &mut token) { - Ok(()) => (), - Err(e) => log::error!("Failed to wait for buffer {:?}: {:?}", buffer_id, e), + if wait { + match device.wait_for_submit(last_submit_index) { + Ok(()) => (), + Err(e) => log::error!("Failed to wait for buffer {:?}: {:?}", buffer_id, e), + } } } } @@ -599,46 +542,40 @@ impl Global { profiling::scope!("Device::create_texture"); let hub = A::hub(self); - let mut token = Token::root(); - let fid = hub.textures.prepare(id_in); - let (adapter_guard, mut token) = hub.adapters.read(&mut token); - let (device_guard, mut token) = hub.devices.read(&mut token); + let fid = hub.textures.prepare::(id_in); + let error = loop { - let device = match device_guard.get(device_id) { + let device = match hub.devices.get(device_id) { Ok(device) => device, Err(_) => break DeviceError::Invalid.into(), }; - if !device.valid { + if !device.is_valid() { break DeviceError::Lost.into(); } #[cfg(feature = "trace")] - if let Some(ref trace) = device.trace { - trace - .lock() - .add(trace::Action::CreateTexture(fid.id(), desc.clone())); + if let Some(ref mut trace) = *device.trace.lock() { + trace.add(trace::Action::CreateTexture(fid.id(), desc.clone())); } - let adapter = &adapter_guard[device.adapter_id.value]; - let texture = match device.create_texture(device_id, adapter, desc) { + let texture = match device.create_texture(&device.adapter, desc) { Ok(texture) => texture, Err(error) => break error, }; - let ref_count = texture.life_guard.add_ref(); - let id = fid.assign(texture, &mut token); - log::trace!("Device::create_texture -> {:?}", id.0); + let (id, resource) = fid.assign(texture); + log::info!("Created Texture {:?} with {:?}", id, desc); device.trackers.lock().textures.insert_single( - id.0, - ref_count, + id, + resource, hal::TextureUses::UNINITIALIZED, ); - return (id.0, None); + return (id, None); }; - let id = fid.assign_error(desc.label.borrow_or_default(), &mut token); + let id = fid.assign_error(desc.label.borrow_or_default()); (id, Some(error)) } @@ -657,33 +594,27 @@ impl Global { profiling::scope!("Device::create_texture_from_hal"); let hub = A::hub(self); - let mut token = Token::root(); - let fid = hub.textures.prepare(id_in); - let (adapter_guard, mut token) = hub.adapters.read(&mut token); - let (device_guard, mut token) = hub.devices.read(&mut token); + let fid = hub.textures.prepare::(id_in); + let error = loop { - let device = match device_guard.get(device_id) { + let device = match hub.devices.get(device_id) { Ok(device) => device, Err(_) => break DeviceError::Invalid.into(), }; - if !device.valid { + if !device.is_valid() { break DeviceError::Lost.into(); } // NB: Any change done through the raw texture handle will not be // recorded in the replay #[cfg(feature = "trace")] - if let Some(ref trace) = device.trace { - trace - .lock() - .add(trace::Action::CreateTexture(fid.id(), desc.clone())); + if let Some(ref mut trace) = *device.trace.lock() { + trace.add(trace::Action::CreateTexture(fid.id(), desc.clone())); } - let adapter = &adapter_guard[device.adapter_id.value]; - let format_features = match device - .describe_format_features(adapter, desc.format) + .describe_format_features(&device.adapter, desc.format) .map_err(|error| resource::CreateTextureError::MissingFeatures(desc.format, error)) { Ok(features) => features, @@ -693,32 +624,30 @@ impl Global { let mut texture = device.create_texture_from_hal( hal_texture, conv::map_texture_usage(desc.usage, desc.format.into()), - device_id, desc, format_features, - TextureClearMode::None, + resource::TextureClearMode::None, ); if desc.usage.contains(wgt::TextureUsages::COPY_DST) { texture.hal_usage |= hal::TextureUses::COPY_DST; } - texture.initialization_status = TextureInitTracker::new(desc.mip_level_count, 0); - - let ref_count = texture.life_guard.add_ref(); + texture.initialization_status = + RwLock::new(TextureInitTracker::new(desc.mip_level_count, 0)); - let id = fid.assign(texture, &mut token); - log::trace!("Device::create_texture -> {:?}", id.0); + let (id, resource) = fid.assign(texture); + log::info!("Created Texture {:?} with {:?}", id, desc); device.trackers.lock().textures.insert_single( - id.0, - ref_count, + id, + resource, hal::TextureUses::UNINITIALIZED, ); - return (id.0, None); + return (id, None); }; - let id = fid.assign_error(desc.label.borrow_or_default(), &mut token); + let id = fid.assign_error(desc.label.borrow_or_default()); (id, Some(error)) } @@ -737,48 +666,40 @@ impl Global { profiling::scope!("Device::create_buffer"); let hub = A::hub(self); - let mut token = Token::root(); - let fid = hub.buffers.prepare(id_in); + let fid = hub.buffers.prepare::(id_in); - let (device_guard, mut token) = hub.devices.read(&mut token); let error = loop { + let device_guard = hub.devices.read(); let device = match device_guard.get(device_id) { Ok(device) => device, Err(_) => break DeviceError::Invalid.into(), }; - if !device.valid { + if !device.is_valid() { break DeviceError::Lost.into(); } // NB: Any change done through the raw buffer handle will not be // recorded in the replay #[cfg(feature = "trace")] - if let Some(ref trace) = device.trace { - trace - .lock() - .add(trace::Action::CreateBuffer(fid.id(), desc.clone())); + if let Some(trace) = device.trace.lock().as_mut() { + trace.add(trace::Action::CreateBuffer(fid.id(), desc.clone())); } - let mut buffer = device.create_buffer_from_hal(hal_buffer, device_id, desc); - - // Assume external buffers are initialized - buffer.initialization_status = crate::init_tracker::BufferInitTracker::new(0); - - let ref_count = buffer.life_guard.add_ref(); + let buffer = device.create_buffer_from_hal(hal_buffer, desc); - let id = fid.assign(buffer, &mut token); - log::trace!("Device::create_buffer -> {:?}", id.0); + let (id, buffer) = fid.assign(buffer); + log::info!("Created buffer {:?} with {:?}", id, desc); device .trackers .lock() .buffers - .insert_single(id, ref_count, hal::BufferUses::empty()); + .insert_single(id, buffer, hal::BufferUses::empty()); - return (id.0, None); + return (id, None); }; - let id = fid.assign_error(desc.label.borrow_or_default(), &mut token); + let id = fid.assign_error(desc.label.borrow_or_default()); (id, Some(error)) } @@ -794,52 +715,37 @@ impl Global { log::trace!("Texture::destroy {texture_id:?}"); let hub = A::hub(self); - let mut token = Token::root(); - //TODO: lock pending writes separately, keep the device read-only - let (mut device_guard, mut token) = hub.devices.write(&mut token); - - let (mut texture_guard, _) = hub.textures.write(&mut token); + log::debug!("Texture {:?} is destroyed", texture_id); + let mut texture_guard = hub.textures.write(); let texture = texture_guard .get_and_mark_destroyed(texture_id) .map_err(|_| resource::DestroyError::Invalid)?; - let device = &mut device_guard[texture.device_id.value]; + let device = &texture.device; #[cfg(feature = "trace")] - if let Some(ref trace) = device.trace { - trace.lock().add(trace::Action::FreeTexture(texture_id)); + if let Some(ref mut trace) = *device.trace.lock() { + trace.add(trace::Action::FreeTexture(texture_id)); } - let last_submit_index = texture.life_guard.life_count(); - - let clear_views = match std::mem::replace(&mut texture.clear_mode, TextureClearMode::None) { - TextureClearMode::BufferCopy => SmallVec::new(), - TextureClearMode::RenderPass { clear_views, .. } => clear_views, - TextureClearMode::None => SmallVec::new(), - }; + let last_submit_index = texture.info.submission_index(); - match texture.inner { - resource::TextureInner::Native { ref mut raw } => { - let raw = raw.take().ok_or(resource::DestroyError::AlreadyDestroyed)?; - let temp = queue::TempResource::Texture(raw, clear_views); - - if device.pending_writes.dst_textures.contains(&texture_id) { - device.pending_writes.temp_resources.push(temp); + if let resource::TextureInner::Native { ref raw } = *texture.inner().as_ref().unwrap() { + if !raw.is_none() { + let temp = queue::TempResource::Texture(texture.clone()); + let mut guard = device.pending_writes.lock(); + let pending_writes = guard.as_mut().unwrap(); + if pending_writes.dst_textures.contains_key(&texture_id) { + pending_writes.temp_resources.push(temp); } else { - drop(texture_guard); + drop(guard); device - .lock_life(&mut token) + .lock_life() .schedule_resource_destruction(temp, last_submit_index); } - } - resource::TextureInner::Surface { .. } => { - for clear_view in clear_views { - unsafe { - device.raw.destroy_texture_view(clear_view); - } - } - // TODO? + } else { + return Err(resource::DestroyError::AlreadyDestroyed); } } @@ -848,53 +754,46 @@ impl Global { pub fn texture_drop(&self, texture_id: id::TextureId, wait: bool) { profiling::scope!("Texture::drop"); - log::trace!("Texture::drop {texture_id:?}"); + + log::debug!("Texture {:?} is asked to be dropped", texture_id); let hub = A::hub(self); - let mut token = Token::root(); - - let (ref_count, last_submit_index, device_id) = { - let (mut texture_guard, _) = hub.textures.write(&mut token); - match texture_guard.get_occupied_or_destroyed_mut(texture_id) { - Ok(texture) => { - let ref_count = texture.life_guard.ref_count.take().unwrap(); - let last_submit_index = texture.life_guard.life_count(); - (ref_count, last_submit_index, texture.device_id.value) - } - Err(InvalidId) => { - hub.textures - .unregister_locked(texture_id, &mut *texture_guard); - return; - } - } - }; - let (device_guard, mut token) = hub.devices.read(&mut token); - let device = &device_guard[device_id]; - { - let mut life_lock = device.lock_life(&mut token); - if device.pending_writes.dst_textures.contains(&texture_id) { - life_lock.future_suspected_textures.push(Stored { - value: id::Valid(texture_id), - ref_count, - }); - } else { - drop(ref_count); - life_lock - .suspected_resources - .textures - .push(id::Valid(texture_id)); + if let Some(texture) = hub.textures.unregister(texture_id) { + let last_submit_index = texture.info.submission_index(); + + let device = &texture.device; + { + if device + .pending_writes + .lock() + .as_ref() + .unwrap() + .dst_textures + .contains_key(&texture_id) + { + device + .lock_life() + .future_suspected_textures + .push(texture.clone()); + } else { + device + .lock_life() + .suspected_resources + .insert(texture_id, texture.clone()); + } } - } - if wait { - match device.wait_for_submit(last_submit_index, &mut token) { - Ok(()) => (), - Err(e) => log::error!("Failed to wait for texture {:?}: {:?}", texture_id, e), + if wait { + match device.wait_for_submit(last_submit_index) { + Ok(()) => (), + Err(e) => log::error!("Failed to wait for texture {:?}: {:?}", texture_id, e), + } } } } + #[allow(unused_unsafe)] pub fn texture_create_view( &self, texture_id: id::TextureId, @@ -904,43 +803,37 @@ impl Global { profiling::scope!("Texture::create_view"); let hub = A::hub(self); - let mut token = Token::root(); - let fid = hub.texture_views.prepare(id_in); - let (device_guard, mut token) = hub.devices.read(&mut token); - let (texture_guard, mut token) = hub.textures.read(&mut token); + let fid = hub.texture_views.prepare::(id_in); + let error = loop { - let texture = match texture_guard.get(texture_id) { + let texture = match hub.textures.get(texture_id) { Ok(texture) => texture, Err(_) => break resource::CreateTextureViewError::InvalidTexture, }; - let device = &device_guard[texture.device_id.value]; + let device = &texture.device; #[cfg(feature = "trace")] - if let Some(ref trace) = device.trace { - trace.lock().add(trace::Action::CreateTextureView { + if let Some(ref mut trace) = *device.trace.lock() { + trace.add(trace::Action::CreateTextureView { id: fid.id(), parent_id: texture_id, desc: desc.clone(), }); } - let view = match device.create_texture_view(texture, texture_id, desc) { + let view = match unsafe { device.create_texture_view(&texture, desc) } { Ok(view) => view, Err(e) => break e, }; - let ref_count = view.life_guard.add_ref(); - let id = fid.assign(view, &mut token); - device.trackers.lock().views.insert_single(id, ref_count); - - log::trace!("Texture::create_view {:?} -> {:?}", texture_id, id.0); - - return (id.0, None); + let (id, resource) = fid.assign(view); + log::info!("Created TextureView {:?}", id); + device.trackers.lock().views.insert_single(id, resource); + return (id, None); }; log::error!("Texture::create_view {:?} error {:?}", texture_id, error); - - let id = fid.assign_error(desc.label.borrow_or_default(), &mut token); + let id = fid.assign_error(desc.label.borrow_or_default()); (id, Some(error)) } @@ -954,44 +847,28 @@ impl Global { wait: bool, ) -> Result<(), resource::TextureViewDestroyError> { profiling::scope!("TextureView::drop"); - log::trace!("TextureView::drop {:?}", texture_view_id); + + log::debug!("TextureView {:?} is asked to be dropped", texture_view_id); let hub = A::hub(self); - let mut token = Token::root(); - let (last_submit_index, device_id) = { - let (mut texture_view_guard, _) = hub.texture_views.write(&mut token); + if let Some(view) = hub.texture_views.unregister(texture_view_id) { + let last_submit_index = view.info.submission_index(); - match texture_view_guard.get_mut(texture_view_id) { - Ok(view) => { - let _ref_count = view.life_guard.ref_count.take(); - let last_submit_index = view.life_guard.life_count(); - (last_submit_index, view.device_id.value) - } - Err(InvalidId) => { - hub.texture_views - .unregister_locked(texture_view_id, &mut *texture_view_guard); - return Ok(()); - } - } - }; + view.device + .lock_life() + .suspected_resources + .insert(texture_view_id, view.clone()); - let (device_guard, mut token) = hub.devices.read(&mut token); - let device = &device_guard[device_id]; - device - .lock_life(&mut token) - .suspected_resources - .texture_views - .push(id::Valid(texture_view_id)); - - if wait { - match device.wait_for_submit(last_submit_index, &mut token) { - Ok(()) => (), - Err(e) => log::error!( - "Failed to wait for texture view {:?}: {:?}", - texture_view_id, - e - ), + if wait { + match view.device.wait_for_submit(last_submit_index) { + Ok(()) => (), + Err(e) => log::error!( + "Failed to wait for texture view {:?}: {:?}", + texture_view_id, + e + ), + } } } Ok(()) @@ -1006,41 +883,35 @@ impl Global { profiling::scope!("Device::create_sampler"); let hub = A::hub(self); - let mut token = Token::root(); - let fid = hub.samplers.prepare(id_in); + let fid = hub.samplers.prepare::(id_in); - let (device_guard, mut token) = hub.devices.read(&mut token); let error = loop { - let device = match device_guard.get(device_id) { + let device = match hub.devices.get(device_id) { Ok(device) => device, Err(_) => break DeviceError::Invalid.into(), }; - if !device.valid { + if !device.is_valid() { break DeviceError::Lost.into(); } #[cfg(feature = "trace")] - if let Some(ref trace) = device.trace { - trace - .lock() - .add(trace::Action::CreateSampler(fid.id(), desc.clone())); + if let Some(ref mut trace) = *device.trace.lock() { + trace.add(trace::Action::CreateSampler(fid.id(), desc.clone())); } - let sampler = match device.create_sampler(device_id, desc) { + let sampler = match device.create_sampler(desc) { Ok(sampler) => sampler, Err(e) => break e, }; - let ref_count = sampler.life_guard.add_ref(); - let id = fid.assign(sampler, &mut token); - - device.trackers.lock().samplers.insert_single(id, ref_count); - log::trace!("Device::create_sampler -> {:?}", id.0); + let (id, resource) = fid.assign(sampler); + log::info!("Created Sampler {:?}", id); + device.trackers.lock().samplers.insert_single(id, resource); - return (id.0, None); + return (id, None); }; - let id = fid.assign_error(desc.label.borrow_or_default(), &mut token); + let id = fid.assign_error(desc.label.borrow_or_default()); (id, Some(error)) } @@ -1050,32 +921,17 @@ impl Global { pub fn sampler_drop(&self, sampler_id: id::SamplerId) { profiling::scope!("Sampler::drop"); - log::trace!("Sampler::drop {sampler_id:?}"); + log::debug!("Sampler {:?} is asked to be dropped", sampler_id); let hub = A::hub(self); - let mut token = Token::root(); - - let device_id = { - let (mut sampler_guard, _) = hub.samplers.write(&mut token); - match sampler_guard.get_mut(sampler_id) { - Ok(sampler) => { - sampler.life_guard.ref_count.take(); - sampler.device_id.value - } - Err(InvalidId) => { - hub.samplers - .unregister_locked(sampler_id, &mut *sampler_guard); - return; - } - } - }; - let (device_guard, mut token) = hub.devices.read(&mut token); - device_guard[device_id] - .lock_life(&mut token) - .suspected_resources - .samplers - .push(id::Valid(sampler_id)); + if let Some(sampler) = hub.samplers.unregister(sampler_id) { + sampler + .device + .lock_life() + .suspected_resources + .insert(sampler_id, sampler.clone()); + } } pub fn device_create_bind_group_layout( @@ -1089,25 +945,21 @@ impl Global { ) { profiling::scope!("Device::create_bind_group_layout"); - let mut token = Token::root(); let hub = A::hub(self); - let fid = hub.bind_group_layouts.prepare(id_in); + let fid = hub.bind_group_layouts.prepare::(id_in); let error = 'outer: loop { - let (device_guard, mut token) = hub.devices.read(&mut token); - let device = match device_guard.get(device_id) { + let device = match hub.devices.get(device_id) { Ok(device) => device, Err(_) => break DeviceError::Invalid.into(), }; - if !device.valid { + if !device.is_valid() { break DeviceError::Lost.into(); } #[cfg(feature = "trace")] - if let Some(ref trace) = device.trace { - trace - .lock() - .add(trace::Action::CreateBindGroupLayout(fid.id(), desc.clone())); + if let Some(ref mut trace) = *device.trace.lock() { + trace.add(trace::Action::CreateBindGroupLayout(fid.id(), desc.clone())); } let mut entry_map = FastHashMap::default(); @@ -1125,57 +977,27 @@ impl Global { } } - let mut compatible_layout = None; - let layout = { - let (bgl_guard, _) = hub.bind_group_layouts.read(&mut token); - if let Some(id) = - Device::deduplicate_bind_group_layout(device_id, &entry_map, &*bgl_guard) - { - // If there is an equivalent BGL, just bump the refcount and return it. - // This is only applicable if ids are generated in wgpu. In practice: - // - wgpu users take this branch and return the existing - // id without using the indirection layer in BindGroupLayout. - // - Other users like gecko or the replay tool use don't take - // the branch and instead rely on the indirection to use the - // proper bind group layout id. - if G::ids_are_generated_in_wgpu() { - log::trace!("Device::create_bind_group_layout (duplicate of {id:?})"); - return (id, None); - } - - compatible_layout = Some(id::Valid(id)); - } + if let Some((id, layout)) = { + let bgl_guard = hub.bind_group_layouts.read(); + device.deduplicate_bind_group_layout(&entry_map, &*bgl_guard) + } { + log::info!("Reusing BindGroupLayout {layout:?} -> {:?}", id); + let id = fid.assign_existing(&layout); + return (id, None); + } - if let Some(original_id) = compatible_layout { - let original = &bgl_guard[original_id]; - BindGroupLayout { - device_id: original.device_id.clone(), - inner: crate::binding_model::BglOrDuplicate::Duplicate(original_id), - multi_ref_count: crate::MultiRefCount::new(), - } - } else { - match device.create_bind_group_layout(device_id, &desc.label, entry_map) { - Ok(layout) => layout, - Err(e) => break e, - } - } + let layout = match device.create_bind_group_layout(&desc.label, entry_map) { + Ok(layout) => layout, + Err(e) => break e, }; - let id = fid.assign(layout, &mut token); - - if let Some(dupe) = compatible_layout { - log::trace!( - "Device::create_bind_group_layout (duplicate of {dupe:?}) -> {:?}", - id.0 - ); - } else { - log::trace!("Device::create_bind_group_layout -> {:?}", id.0); - } - - return (id.0, None); + let (id, _layout) = fid.assign(layout); + log::info!("Created BindGroupLayout {:?}", id); + return (id, None); }; - let id = fid.assign_error(desc.label.borrow_or_default(), &mut token); + let fid = hub.bind_group_layouts.prepare::(id_in); + let id = fid.assign_error(desc.label.borrow_or_default()); (id, Some(error)) } @@ -1185,28 +1007,21 @@ impl Global { pub fn bind_group_layout_drop(&self, bind_group_layout_id: id::BindGroupLayoutId) { profiling::scope!("BindGroupLayout::drop"); - log::trace!("BindGroupLayout::drop {:?}", bind_group_layout_id); + + log::debug!( + "BindGroupLayout {:?} is asked to be dropped", + bind_group_layout_id + ); let hub = A::hub(self); - let mut token = Token::root(); - let device_id = { - let (mut bind_group_layout_guard, _) = hub.bind_group_layouts.write(&mut token); - match bind_group_layout_guard.get_mut(bind_group_layout_id) { - Ok(layout) => layout.device_id.value, - Err(InvalidId) => { - hub.bind_group_layouts - .unregister_locked(bind_group_layout_id, &mut *bind_group_layout_guard); - return; - } - } - }; - let (device_guard, mut token) = hub.devices.read(&mut token); - device_guard[device_id] - .lock_life(&mut token) - .suspected_resources - .bind_group_layouts - .push(id::Valid(bind_group_layout_id)); + if let Some(layout) = hub.bind_group_layouts.unregister(bind_group_layout_id) { + layout + .device + .lock_life() + .suspected_resources + .insert(bind_group_layout_id, layout.clone()); + } } pub fn device_create_pipeline_layout( @@ -1221,42 +1036,36 @@ impl Global { profiling::scope!("Device::create_pipeline_layout"); let hub = A::hub(self); - let mut token = Token::root(); - let fid = hub.pipeline_layouts.prepare(id_in); + let fid = hub.pipeline_layouts.prepare::(id_in); - let (device_guard, mut token) = hub.devices.read(&mut token); let error = loop { - let device = match device_guard.get(device_id) { + let device = match hub.devices.get(device_id) { Ok(device) => device, Err(_) => break DeviceError::Invalid.into(), }; - if !device.valid { + if !device.is_valid() { break DeviceError::Lost.into(); } #[cfg(feature = "trace")] - if let Some(ref trace) = device.trace { - trace - .lock() - .add(trace::Action::CreatePipelineLayout(fid.id(), desc.clone())); + if let Some(ref mut trace) = *device.trace.lock() { + trace.add(trace::Action::CreatePipelineLayout(fid.id(), desc.clone())); } let layout = { - let (bgl_guard, _) = hub.bind_group_layouts.read(&mut token); - match device.create_pipeline_layout(device_id, desc, &*bgl_guard) { + let bgl_guard = hub.bind_group_layouts.read(); + match device.create_pipeline_layout(desc, &*bgl_guard) { Ok(layout) => layout, Err(e) => break e, } }; - let id = fid.assign(layout, &mut token); - - log::trace!("Device::create_pipeline_layout -> {:?}", id.0); - - return (id.0, None); + let (id, _) = fid.assign(layout); + log::info!("Created PipelineLayout {:?}", id); + return (id, None); }; - let id = fid.assign_error(desc.label.borrow_or_default(), &mut token); + let id = fid.assign_error(desc.label.borrow_or_default()); (id, Some(error)) } @@ -1266,34 +1075,20 @@ impl Global { pub fn pipeline_layout_drop(&self, pipeline_layout_id: id::PipelineLayoutId) { profiling::scope!("PipelineLayout::drop"); - log::trace!("PipelineLayout::drop {:?}", pipeline_layout_id); - let hub = A::hub(self); - let mut token = Token::root(); - let (device_id, ref_count) = { - let (mut pipeline_layout_guard, _) = hub.pipeline_layouts.write(&mut token); - match pipeline_layout_guard.get_mut(pipeline_layout_id) { - Ok(layout) => ( - layout.device_id.value, - layout.life_guard.ref_count.take().unwrap(), - ), - Err(InvalidId) => { - hub.pipeline_layouts - .unregister_locked(pipeline_layout_id, &mut *pipeline_layout_guard); - return; - } - } - }; + log::debug!( + "PipelineLayout {:?} is asked to be dropped", + pipeline_layout_id + ); - let (device_guard, mut token) = hub.devices.read(&mut token); - device_guard[device_id] - .lock_life(&mut token) - .suspected_resources - .pipeline_layouts - .push(Stored { - value: id::Valid(pipeline_layout_id), - ref_count, - }); + let hub = A::hub(self); + if let Some(layout) = hub.pipeline_layouts.unregister(pipeline_layout_id) { + layout + .device + .lock_life() + .suspected_resources + .insert(pipeline_layout_id, layout.clone()); + } } pub fn device_create_bind_group( @@ -1305,69 +1100,49 @@ impl Global { profiling::scope!("Device::create_bind_group"); let hub = A::hub(self); - let mut token = Token::root(); - let fid = hub.bind_groups.prepare(id_in); - - let (device_guard, mut token) = hub.devices.read(&mut token); - let (bind_group_layout_guard, mut token) = hub.bind_group_layouts.read(&mut token); + let fid = hub.bind_groups.prepare::(id_in); let error = loop { - let device = match device_guard.get(device_id) { + let device = match hub.devices.get(device_id) { Ok(device) => device, Err(_) => break DeviceError::Invalid.into(), }; - if !device.valid { + if !device.is_valid() { break DeviceError::Lost.into(); } #[cfg(feature = "trace")] - if let Some(ref trace) = device.trace { - trace - .lock() - .add(trace::Action::CreateBindGroup(fid.id(), desc.clone())); + if let Some(ref mut trace) = *device.trace.lock() { + trace.add(trace::Action::CreateBindGroup(fid.id(), desc.clone())); } - let mut bind_group_layout = match bind_group_layout_guard.get(desc.layout) { + let bind_group_layout_guard = hub.bind_group_layouts.read(); + let bind_group_layout = match bind_group_layout_guard.get(desc.layout) { Ok(layout) => layout, Err(..) => break binding_model::CreateBindGroupError::InvalidLayout, }; - if bind_group_layout.device_id.value.0 != device_id { + if bind_group_layout.device.as_info().id() != device.as_info().id() { break DeviceError::WrongDevice.into(); } - let mut layout_id = id::Valid(desc.layout); - if let Some(id) = bind_group_layout.as_duplicate() { - layout_id = id; - bind_group_layout = &bind_group_layout_guard[id]; - } - - let bind_group = match device.create_bind_group( - device_id, - bind_group_layout, - layout_id, - desc, - hub, - &mut token, - ) { + let bind_group = match device.create_bind_group(bind_group_layout, desc, hub) { Ok(bind_group) => bind_group, Err(e) => break e, }; - let ref_count = bind_group.life_guard.add_ref(); - let id = fid.assign(bind_group, &mut token); - - log::trace!("Device::create_bind_group -> {:?}", id.0); + let (id, resource) = fid.assign(bind_group); + log::info!("Created BindGroup {:?}", id,); device .trackers .lock() .bind_groups - .insert_single(id, ref_count); - return (id.0, None); + .insert_single(id, resource); + return (id, None); }; - let id = fid.assign_error(desc.label.borrow_or_default(), &mut token); + let id = fid.assign_error(desc.label.borrow_or_default()); (id, Some(error)) } @@ -1377,32 +1152,18 @@ impl Global { pub fn bind_group_drop(&self, bind_group_id: id::BindGroupId) { profiling::scope!("BindGroup::drop"); - log::trace!("BindGroup::drop {:?}", bind_group_id); + + log::debug!("BindGroup {:?} is asked to be dropped", bind_group_id); let hub = A::hub(self); - let mut token = Token::root(); - - let device_id = { - let (mut bind_group_guard, _) = hub.bind_groups.write(&mut token); - match bind_group_guard.get_mut(bind_group_id) { - Ok(bind_group) => { - bind_group.life_guard.ref_count.take(); - bind_group.device_id.value - } - Err(InvalidId) => { - hub.bind_groups - .unregister_locked(bind_group_id, &mut *bind_group_guard); - return; - } - } - }; - let (device_guard, mut token) = hub.devices.read(&mut token); - device_guard[device_id] - .lock_life(&mut token) - .suspected_resources - .bind_groups - .push(id::Valid(bind_group_id)); + if let Some(bind_group) = hub.bind_groups.unregister(bind_group_id) { + bind_group + .device + .lock_life() + .suspected_resources + .insert(bind_group_id, bind_group.clone()); + } } pub fn device_create_shader_module( @@ -1418,22 +1179,19 @@ impl Global { profiling::scope!("Device::create_shader_module"); let hub = A::hub(self); - let mut token = Token::root(); - let fid = hub.shader_modules.prepare(id_in); + let fid = hub.shader_modules.prepare::(id_in); - let (device_guard, mut token) = hub.devices.read(&mut token); let error = loop { - let device = match device_guard.get(device_id) { + let device = match hub.devices.get(device_id) { Ok(device) => device, Err(_) => break DeviceError::Invalid.into(), }; - if !device.valid { + if !device.is_valid() { break DeviceError::Lost.into(); } #[cfg(feature = "trace")] - if let Some(ref trace) = device.trace { - let mut trace = trace.lock(); + if let Some(ref mut trace) = *device.trace.lock() { let data = match source { #[cfg(feature = "wgsl")] pipeline::ShaderModuleSource::Wgsl(ref code) => { @@ -1456,18 +1214,17 @@ impl Global { }); }; - let shader = match device.create_shader_module(device_id, desc, source) { + let shader = match device.create_shader_module(desc, source) { Ok(shader) => shader, Err(e) => break e, }; - let id = fid.assign(shader, &mut token); - - log::trace!("Device::create_shader_module -> {:?}", id.0); - return (id.0, None); + let (id, _) = fid.assign(shader); + log::info!("Created ShaderModule {:?} with {:?}", id, desc); + return (id, None); }; - let id = fid.assign_error(desc.label.borrow_or_default(), &mut token); + let id = fid.assign_error(desc.label.borrow_or_default()); (id, Some(error)) } @@ -1490,22 +1247,19 @@ impl Global { profiling::scope!("Device::create_shader_module"); let hub = A::hub(self); - let mut token = Token::root(); - let fid = hub.shader_modules.prepare(id_in); + let fid = hub.shader_modules.prepare::(id_in); - let (device_guard, mut token) = hub.devices.read(&mut token); let error = loop { - let device = match device_guard.get(device_id) { + let device = match hub.devices.get(device_id) { Ok(device) => device, Err(_) => break DeviceError::Invalid.into(), }; - if !device.valid { + if !device.is_valid() { break DeviceError::Lost.into(); } #[cfg(feature = "trace")] - if let Some(ref trace) = device.trace { - let mut trace = trace.lock(); + if let Some(ref mut trace) = *device.trace.lock() { let data = trace.make_binary("spv", unsafe { std::slice::from_raw_parts(source.as_ptr() as *const u8, source.len() * 4) }); @@ -1516,16 +1270,16 @@ impl Global { }); }; - let shader = - match unsafe { device.create_shader_module_spirv(device_id, desc, &source) } { - Ok(shader) => shader, - Err(e) => break e, - }; - let id = fid.assign(shader, &mut token); - return (id.0, None); + let shader = match unsafe { device.create_shader_module_spirv(desc, &source) } { + Ok(shader) => shader, + Err(e) => break e, + }; + let (id, _) = fid.assign(shader); + log::info!("Created ShaderModule {:?} with {:?}", id, desc); + return (id, None); }; - let id = fid.assign_error(desc.label.borrow_or_default(), &mut token); + let id = fid.assign_error(desc.label.borrow_or_default()); (id, Some(error)) } @@ -1535,24 +1289,11 @@ impl Global { pub fn shader_module_drop(&self, shader_module_id: id::ShaderModuleId) { profiling::scope!("ShaderModule::drop"); - log::trace!("ShaderModule::drop {:?}", shader_module_id); + + log::debug!("ShaderModule {:?} is asked to be dropped", shader_module_id); let hub = A::hub(self); - let mut token = Token::root(); - let (device_guard, mut token) = hub.devices.read(&mut token); - let (module, _) = hub.shader_modules.unregister(shader_module_id, &mut token); - if let Some(module) = module { - let device = &device_guard[module.device_id.value]; - #[cfg(feature = "trace")] - if let Some(ref trace) = device.trace { - trace - .lock() - .add(trace::Action::DestroyShaderModule(shader_module_id)); - } - unsafe { - device.raw.destroy_shader_module(module.raw); - } - } + hub.shader_modules.unregister(shader_module_id); } pub fn device_create_command_encoder( @@ -1564,52 +1305,46 @@ impl Global { profiling::scope!("Device::create_command_encoder"); let hub = A::hub(self); - let mut token = Token::root(); - let fid = hub.command_buffers.prepare(id_in); + let fid = hub.command_buffers.prepare::(id_in); - let (device_guard, mut token) = hub.devices.read(&mut token); let error = loop { - let device = match device_guard.get(device_id) { + let device = match hub.devices.get(device_id) { Ok(device) => device, Err(_) => break DeviceError::Invalid, }; - if !device.valid { + if !device.is_valid() { break DeviceError::Lost; } - - let dev_stored = Stored { - value: id::Valid(device_id), - ref_count: device.life_guard.add_ref(), + let queue = match hub.queues.get(device.queue_id.read().unwrap()) { + Ok(queue) => queue, + Err(_) => break DeviceError::InvalidQueueId, }; let encoder = match device .command_allocator .lock() - .acquire_encoder(&device.raw, &device.queue) + .as_mut() + .unwrap() + .acquire_encoder(device.raw(), queue.raw.as_ref().unwrap()) { Ok(raw) => raw, Err(_) => break DeviceError::OutOfMemory, }; let command_buffer = command::CommandBuffer::new( encoder, - dev_stored, - device.limits.clone(), - device.downlevel.clone(), - device.features, + &device, #[cfg(feature = "trace")] - device.trace.is_some(), + device.trace.lock().is_some(), desc.label .to_hal(device.instance_flags) .map(|s| s.to_string()), ); - let id = fid.assign(command_buffer, &mut token); - - log::trace!("Device::create_command_encoder -> {:?}", id.0); - - return (id.0, None); + let (id, _) = fid.assign(command_buffer); + log::info!("Created CommandBuffer {:?} with {:?}", id, desc); + return (id, None); }; - let id = fid.assign_error(desc.label.borrow_or_default(), &mut token); + let id = fid.assign_error(desc.label.borrow_or_default()); (id, Some(error)) } @@ -1619,25 +1354,28 @@ impl Global { pub fn command_encoder_drop(&self, command_encoder_id: id::CommandEncoderId) { profiling::scope!("CommandEncoder::drop"); - log::trace!("CommandEncoder::drop {:?}", command_encoder_id); + + log::debug!( + "CommandEncoder {:?} is asked to be dropped", + command_encoder_id + ); let hub = A::hub(self); - let mut token = Token::root(); - - let (mut device_guard, mut token) = hub.devices.write(&mut token); - let (cmdbuf, _) = hub - .command_buffers - .unregister(command_encoder_id, &mut token); - if let Some(cmdbuf) = cmdbuf { - let device = &mut device_guard[cmdbuf.device_id.value]; - device.untrack::(hub, &cmdbuf.trackers, &mut token); - device.destroy_command_buffer(cmdbuf); + + if let Some(cmd_buf) = hub.command_buffers.unregister(command_encoder_id) { + cmd_buf + .device + .untrack(&cmd_buf.data.lock().as_ref().unwrap().trackers); } } pub fn command_buffer_drop(&self, command_buffer_id: id::CommandBufferId) { profiling::scope!("CommandBuffer::drop"); - log::trace!("CommandBuffer::drop {:?}", command_buffer_id); + + log::debug!( + "CommandBuffer {:?} is asked to be dropped", + command_buffer_id + ); self.command_encoder_drop::(command_buffer_id) } @@ -1667,22 +1405,21 @@ impl Global { profiling::scope!("RenderBundleEncoder::finish"); let hub = A::hub(self); - let mut token = Token::root(); - let fid = hub.render_bundles.prepare(id_in); - let (device_guard, mut token) = hub.devices.read(&mut token); + let fid = hub.render_bundles.prepare::(id_in); + let error = loop { - let device = match device_guard.get(bundle_encoder.parent()) { + let device = match hub.devices.get(bundle_encoder.parent()) { Ok(device) => device, Err(_) => break command::RenderBundleError::INVALID_DEVICE, }; - if !device.valid { + if !device.is_valid() { break command::RenderBundleError::INVALID_DEVICE; } #[cfg(feature = "trace")] - if let Some(ref trace) = device.trace { - trace.lock().add(trace::Action::CreateRenderBundle { + if let Some(ref mut trace) = *device.trace.lock() { + trace.add(trace::Action::CreateRenderBundle { id: fid.id(), desc: trace::new_render_bundle_encoder_descriptor( desc.label.clone(), @@ -1694,23 +1431,18 @@ impl Global { }); } - let render_bundle = match bundle_encoder.finish(desc, device, hub, &mut token) { + let render_bundle = match bundle_encoder.finish(desc, &device, hub) { Ok(bundle) => bundle, Err(e) => break e, }; - log::debug!("Render bundle"); - let ref_count = render_bundle.life_guard.add_ref(); - let id = fid.assign(render_bundle, &mut token); - - device.trackers.lock().bundles.insert_single(id, ref_count); - - log::trace!("RenderBundleEncoder::finish -> {:?}", id.0); - - return (id.0, None); + let (id, resource) = fid.assign(render_bundle); + log::info!("Created RenderBundle {:?}", id); + device.trackers.lock().bundles.insert_single(id, resource); + return (id, None); }; - let id = fid.assign_error(desc.label.borrow_or_default(), &mut token); + let id = fid.assign_error(desc.label.borrow_or_default()); (id, Some(error)) } @@ -1720,31 +1452,18 @@ impl Global { pub fn render_bundle_drop(&self, render_bundle_id: id::RenderBundleId) { profiling::scope!("RenderBundle::drop"); - log::trace!("RenderBundle::drop {:?}", render_bundle_id); + + log::debug!("RenderBundle {:?} is asked to be dropped", render_bundle_id); + let hub = A::hub(self); - let mut token = Token::root(); - - let (device_guard, mut token) = hub.devices.read(&mut token); - let device_id = { - let (mut bundle_guard, _) = hub.render_bundles.write(&mut token); - match bundle_guard.get_mut(render_bundle_id) { - Ok(bundle) => { - bundle.life_guard.ref_count.take(); - bundle.device_id.value - } - Err(InvalidId) => { - hub.render_bundles - .unregister_locked(render_bundle_id, &mut *bundle_guard); - return; - } - } - }; - device_guard[device_id] - .lock_life(&mut token) - .suspected_resources - .render_bundles - .push(id::Valid(render_bundle_id)); + if let Some(bundle) = hub.render_bundles.unregister(render_bundle_id) { + bundle + .device + .lock_life() + .suspected_resources + .insert(render_bundle_id, bundle.clone()); + } } pub fn device_create_query_set( @@ -1756,80 +1475,65 @@ impl Global { profiling::scope!("Device::create_query_set"); let hub = A::hub(self); - let mut token = Token::root(); - let fid = hub.query_sets.prepare(id_in); + let fid = hub.query_sets.prepare::(id_in); - let (device_guard, mut token) = hub.devices.read(&mut token); let error = loop { - let device = match device_guard.get(device_id) { + let device = match hub.devices.get(device_id) { Ok(device) => device, Err(_) => break DeviceError::Invalid.into(), }; - if !device.valid { + if !device.is_valid() { break DeviceError::Lost.into(); } #[cfg(feature = "trace")] - if let Some(ref trace) = device.trace { - trace.lock().add(trace::Action::CreateQuerySet { + if let Some(ref mut trace) = *device.trace.lock() { + trace.add(trace::Action::CreateQuerySet { id: fid.id(), desc: desc.clone(), }); } - let query_set = match device.create_query_set(device_id, desc) { + let query_set = match device.create_query_set(desc) { Ok(query_set) => query_set, Err(err) => break err, }; - let ref_count = query_set.life_guard.add_ref(); - let id = fid.assign(query_set, &mut token); - + let (id, resource) = fid.assign(query_set); + log::info!("Created QuerySet {:?}", id); device .trackers .lock() .query_sets - .insert_single(id, ref_count); + .insert_single(id, resource); - return (id.0, None); + return (id, None); }; - let id = fid.assign_error("", &mut token); - - log::trace!("Device::create_query_set -> {:?}", id); - + let id = fid.assign_error(""); (id, Some(error)) } pub fn query_set_drop(&self, query_set_id: id::QuerySetId) { profiling::scope!("QuerySet::drop"); - log::trace!("QuerySet::drop {query_set_id:?}"); + + log::debug!("QuerySet {:?} is asked to be dropped", query_set_id); let hub = A::hub(self); - let mut token = Token::root(); - let device_id = { - let (mut query_set_guard, _) = hub.query_sets.write(&mut token); - let query_set = query_set_guard.get_mut(query_set_id).unwrap(); - query_set.life_guard.ref_count.take(); - query_set.device_id.value - }; + if let Some(query_set) = hub.query_sets.unregister(query_set_id) { + let device = &query_set.device; - let (device_guard, mut token) = hub.devices.read(&mut token); - let device = &device_guard[device_id]; + #[cfg(feature = "trace")] + if let Some(ref mut trace) = *device.trace.lock() { + trace.add(trace::Action::DestroyQuerySet(query_set_id)); + } - #[cfg(feature = "trace")] - if let Some(ref trace) = device.trace { - trace - .lock() - .add(trace::Action::DestroyQuerySet(query_set_id)); + device + .lock_life() + .suspected_resources + .insert(query_set_id, query_set.clone()); } - - device - .lock_life(&mut token) - .suspected_resources - .query_sets - .push(id::Valid(query_set_id)); } pub fn query_set_label(&self, id: id::QuerySetId) -> String { @@ -1849,64 +1553,52 @@ impl Global { profiling::scope!("Device::create_render_pipeline"); let hub = A::hub(self); - let mut token = Token::root(); - let fid = hub.render_pipelines.prepare(id_in); + let fid = hub.render_pipelines.prepare::(id_in); let implicit_context = implicit_pipeline_ids.map(|ipi| ipi.prepare(hub)); let implicit_error_context = implicit_context.clone(); - let (adapter_guard, mut token) = hub.adapters.read(&mut token); - let (device_guard, mut token) = hub.devices.read(&mut token); let error = loop { - let device = match device_guard.get(device_id) { + let device = match hub.devices.get(device_id) { Ok(device) => device, Err(_) => break DeviceError::Invalid.into(), }; - if !device.valid { + if !device.is_valid() { break DeviceError::Lost.into(); } - - let adapter = &adapter_guard[device.adapter_id.value]; #[cfg(feature = "trace")] - if let Some(ref trace) = device.trace { - trace.lock().add(trace::Action::CreateRenderPipeline { + if let Some(ref mut trace) = *device.trace.lock() { + trace.add(trace::Action::CreateRenderPipeline { id: fid.id(), desc: desc.clone(), implicit_context: implicit_context.clone(), }); } - let pipeline = match device.create_render_pipeline( - device_id, - adapter, - desc, - implicit_context, - hub, - &mut token, - ) { - Ok(pair) => pair, - Err(e) => break e, - }; - let ref_count = pipeline.life_guard.add_ref(); + let pipeline = + match device.create_render_pipeline(&device.adapter, desc, implicit_context, hub) { + Ok(pair) => pair, + Err(e) => break e, + }; - let id = fid.assign(pipeline, &mut token); - log::trace!("Device::create_render_pipeline -> {:?}", id.0); + let (id, resource) = fid.assign(pipeline); + log::info!("Created RenderPipeline {:?} with {:?}", id, desc); device .trackers .lock() .render_pipelines - .insert_single(id, ref_count); + .insert_single(id, resource); - return (id.0, None); + return (id, None); }; - let id = fid.assign_error(desc.label.borrow_or_default(), &mut token); + let id = fid.assign_error(desc.label.borrow_or_default()); // We also need to assign errors to the implicit pipeline layout and the // implicit bind group layout. We have to remove any existing entries first. - let (mut pipeline_layout_guard, mut token) = hub.pipeline_layouts.write(&mut token); - let (mut bgl_guard, _token) = hub.bind_group_layouts.write(&mut token); + let mut pipeline_layout_guard = hub.pipeline_layouts.write(); + let mut bgl_guard = hub.bind_group_layouts.write(); if let Some(ref ids) = implicit_error_context { if pipeline_layout_guard.contains(ids.root_id) { pipeline_layout_guard.remove(ids.root_id); @@ -1935,59 +1627,26 @@ impl Global { Option, ) { let hub = A::hub(self); - let mut token = Token::root(); let error = loop { - let device_id; - let id; - - { - let (pipeline_layout_guard, mut token) = hub.pipeline_layouts.read(&mut token); - - let (bgl_guard, mut token) = hub.bind_group_layouts.read(&mut token); - let (_, mut token) = hub.bind_groups.read(&mut token); - let (pipeline_guard, _) = hub.render_pipelines.read(&mut token); - - let pipeline = match pipeline_guard.get(pipeline_id) { - Ok(pipeline) => pipeline, - Err(_) => break binding_model::GetBindGroupLayoutError::InvalidPipeline, - }; - id = match pipeline_layout_guard[pipeline.layout_id.value] - .bind_group_layout_ids - .get(index as usize) - { - Some(id) => *id, - None => break binding_model::GetBindGroupLayoutError::InvalidGroupIndex(index), - }; - - let layout = &bgl_guard[id]; - layout.multi_ref_count.inc(); - - if G::ids_are_generated_in_wgpu() { - return (id.0, None); - } - - device_id = layout.device_id.clone(); - } - - // The ID is provided externally, so we must create a new bind group layout - // with the given ID as a duplicate of the existing one. - let new_layout = BindGroupLayout { - device_id, - inner: crate::binding_model::BglOrDuplicate::::Duplicate(id), - multi_ref_count: crate::MultiRefCount::new(), + let pipeline = match hub.render_pipelines.get(pipeline_id) { + Ok(pipeline) => pipeline, + Err(_) => break binding_model::GetBindGroupLayoutError::InvalidPipeline, }; - - let fid = hub.bind_group_layouts.prepare(id_in); - let id = fid.assign(new_layout, &mut token); - - return (id.0, None); + let id = match pipeline.layout.bind_group_layouts.get(index as usize) { + Some(bg) => hub + .bind_group_layouts + .prepare::(id_in) + .assign_existing(bg), + None => break binding_model::GetBindGroupLayoutError::InvalidGroupIndex(index), + }; + return (id, None); }; let id = hub .bind_group_layouts - .prepare(id_in) - .assign_error("", &mut token); + .prepare::(id_in) + .assign_error(""); (id, Some(error)) } @@ -1997,35 +1656,26 @@ impl Global { pub fn render_pipeline_drop(&self, render_pipeline_id: id::RenderPipelineId) { profiling::scope!("RenderPipeline::drop"); - log::trace!("RenderPipeline::drop {:?}", render_pipeline_id); + + log::debug!( + "RenderPipeline {:?} is asked to be dropped", + render_pipeline_id + ); + let hub = A::hub(self); - let mut token = Token::root(); - let (device_guard, mut token) = hub.devices.read(&mut token); - - let (device_id, layout_id) = { - let (mut pipeline_guard, _) = hub.render_pipelines.write(&mut token); - match pipeline_guard.get_mut(render_pipeline_id) { - Ok(pipeline) => { - pipeline.life_guard.ref_count.take(); - (pipeline.device_id.value, pipeline.layout_id.clone()) - } - Err(InvalidId) => { - hub.render_pipelines - .unregister_locked(render_pipeline_id, &mut *pipeline_guard); - return; - } - } - }; - let mut life_lock = device_guard[device_id].lock_life(&mut token); - life_lock - .suspected_resources - .render_pipelines - .push(id::Valid(render_pipeline_id)); - life_lock - .suspected_resources - .pipeline_layouts - .push(layout_id); + if let Some(pipeline) = hub.render_pipelines.unregister(render_pipeline_id) { + let layout_id = pipeline.layout.as_info().id(); + let device = &pipeline.device; + let mut life_lock = device.lock_life(); + life_lock + .suspected_resources + .insert(render_pipeline_id, pipeline.clone()); + + life_lock + .suspected_resources + .insert(layout_id, pipeline.layout.clone()); + } } pub fn device_create_compute_pipeline( @@ -2041,59 +1691,50 @@ impl Global { profiling::scope!("Device::create_compute_pipeline"); let hub = A::hub(self); - let mut token = Token::root(); - let fid = hub.compute_pipelines.prepare(id_in); + let fid = hub.compute_pipelines.prepare::(id_in); let implicit_context = implicit_pipeline_ids.map(|ipi| ipi.prepare(hub)); let implicit_error_context = implicit_context.clone(); - let (device_guard, mut token) = hub.devices.read(&mut token); let error = loop { - let device = match device_guard.get(device_id) { + let device = match hub.devices.get(device_id) { Ok(device) => device, Err(_) => break DeviceError::Invalid.into(), }; - if !device.valid { + if !device.is_valid() { break DeviceError::Lost.into(); } #[cfg(feature = "trace")] - if let Some(ref trace) = device.trace { - trace.lock().add(trace::Action::CreateComputePipeline { + if let Some(ref mut trace) = *device.trace.lock() { + trace.add(trace::Action::CreateComputePipeline { id: fid.id(), desc: desc.clone(), implicit_context: implicit_context.clone(), }); } - let pipeline = match device.create_compute_pipeline( - device_id, - desc, - implicit_context, - hub, - &mut token, - ) { + let pipeline = match device.create_compute_pipeline(desc, implicit_context, hub) { Ok(pair) => pair, Err(e) => break e, }; - let ref_count = pipeline.life_guard.add_ref(); - let id = fid.assign(pipeline, &mut token); - log::trace!("Device::create_compute_pipeline -> {:?}", id.0); + let (id, resource) = fid.assign(pipeline); + log::info!("Created ComputePipeline {:?} with {:?}", id, desc); device .trackers .lock() .compute_pipelines - .insert_single(id, ref_count); - return (id.0, None); + .insert_single(id, resource); + return (id, None); }; - let id = fid.assign_error(desc.label.borrow_or_default(), &mut token); + let id = fid.assign_error(desc.label.borrow_or_default()); // We also need to assign errors to the implicit pipeline layout and the // implicit bind group layout. We have to remove any existing entries first. - let (mut pipeline_layout_guard, mut token) = hub.pipeline_layouts.write(&mut token); - let (mut bgl_guard, _token) = hub.bind_group_layouts.write(&mut token); + let mut pipeline_layout_guard = hub.pipeline_layouts.write(); + let mut bgl_guard = hub.bind_group_layouts.write(); if let Some(ref ids) = implicit_error_context { if pipeline_layout_guard.contains(ids.root_id) { pipeline_layout_guard.remove(ids.root_id); @@ -2106,7 +1747,6 @@ impl Global { bgl_guard.insert_error(bgl_id, IMPLICIT_FAILURE); } } - (id, Some(error)) } @@ -2122,59 +1762,30 @@ impl Global { Option, ) { let hub = A::hub(self); - let mut token = Token::root(); let error = loop { - let device_id; - let id; - - { - let (pipeline_layout_guard, mut token) = hub.pipeline_layouts.read(&mut token); - - let (bgl_guard, mut token) = hub.bind_group_layouts.read(&mut token); - let (_, mut token) = hub.bind_groups.read(&mut token); - let (pipeline_guard, _) = hub.compute_pipelines.read(&mut token); - - let pipeline = match pipeline_guard.get(pipeline_id) { - Ok(pipeline) => pipeline, - Err(_) => break binding_model::GetBindGroupLayoutError::InvalidPipeline, - }; - id = match pipeline_layout_guard[pipeline.layout_id.value] - .bind_group_layout_ids - .get(index as usize) - { - Some(id) => *id, - None => break binding_model::GetBindGroupLayoutError::InvalidGroupIndex(index), - }; - - let layout = &bgl_guard[id]; - layout.multi_ref_count.inc(); - - if G::ids_are_generated_in_wgpu() { - return (id.0, None); - } - - device_id = layout.device_id.clone(); - } + let pipeline_guard = hub.compute_pipelines.read(); - // The ID is provided externally, so we must create a new bind group layout - // with the given ID as a duplicate of the existing one. - let new_layout = BindGroupLayout { - device_id, - inner: crate::binding_model::BglOrDuplicate::::Duplicate(id), - multi_ref_count: crate::MultiRefCount::new(), + let pipeline = match pipeline_guard.get(pipeline_id) { + Ok(pipeline) => pipeline, + Err(_) => break binding_model::GetBindGroupLayoutError::InvalidPipeline, }; - let fid = hub.bind_group_layouts.prepare(id_in); - let id = fid.assign(new_layout, &mut token); + let id = match pipeline.layout.bind_group_layouts.get(index as usize) { + Some(bg) => hub + .bind_group_layouts + .prepare::(id_in) + .assign_existing(bg), + None => break binding_model::GetBindGroupLayoutError::InvalidGroupIndex(index), + }; - return (id.0, None); + return (id, None); }; let id = hub .bind_group_layouts - .prepare(id_in) - .assign_error("", &mut token); + .prepare::(id_in) + .assign_error(""); (id, Some(error)) } @@ -2184,35 +1795,25 @@ impl Global { pub fn compute_pipeline_drop(&self, compute_pipeline_id: id::ComputePipelineId) { profiling::scope!("ComputePipeline::drop"); - log::trace!("ComputePipeline::drop {:?}", compute_pipeline_id); + + log::debug!( + "ComputePipeline {:?} is asked to be dropped", + compute_pipeline_id + ); + let hub = A::hub(self); - let mut token = Token::root(); - let (device_guard, mut token) = hub.devices.read(&mut token); - - let (device_id, layout_id) = { - let (mut pipeline_guard, _) = hub.compute_pipelines.write(&mut token); - match pipeline_guard.get_mut(compute_pipeline_id) { - Ok(pipeline) => { - pipeline.life_guard.ref_count.take(); - (pipeline.device_id.value, pipeline.layout_id.clone()) - } - Err(InvalidId) => { - hub.compute_pipelines - .unregister_locked(compute_pipeline_id, &mut *pipeline_guard); - return; - } - } - }; - let mut life_lock = device_guard[device_id].lock_life(&mut token); - life_lock - .suspected_resources - .compute_pipelines - .push(id::Valid(compute_pipeline_id)); - life_lock - .suspected_resources - .pipeline_layouts - .push(layout_id); + if let Some(pipeline) = hub.compute_pipelines.unregister(compute_pipeline_id) { + let layout_id = pipeline.layout.as_info().id(); + let device = &pipeline.device; + let mut life_lock = device.lock_life(); + life_lock + .suspected_resources + .insert(compute_pipeline_id, pipeline.clone()); + life_lock + .suspected_resources + .insert(layout_id, pipeline.layout.clone()); + } } pub fn surface_configure( @@ -2334,43 +1935,42 @@ impl Global { Ok(()) } - log::info!("configuring surface with {:?}", config); + log::debug!("configuring surface with {:?}", config); let error = 'outer: loop { // User callbacks must not be called while we are holding locks. let user_callbacks; { let hub = A::hub(self); - let mut token = Token::root(); - - let (mut surface_guard, mut token) = self.surfaces.write(&mut token); - let (adapter_guard, mut token) = hub.adapters.read(&mut token); - let (device_guard, mut token) = hub.devices.read(&mut token); + let surface_guard = self.surfaces.read(); + let device_guard = hub.devices.read(); let device = match device_guard.get(device_id) { Ok(device) => device, Err(_) => break DeviceError::Invalid.into(), }; - if !device.valid { + if !device.is_valid() { break DeviceError::Lost.into(); } #[cfg(feature = "trace")] - if let Some(ref trace) = device.trace { - trace - .lock() - .add(trace::Action::ConfigureSurface(surface_id, config.clone())); + if let Some(ref mut trace) = *device.trace.lock() { + trace.add(trace::Action::ConfigureSurface(surface_id, config.clone())); } - let surface = match surface_guard.get_mut(surface_id) { + let surface = match surface_guard.get(surface_id) { Ok(surface) => surface, Err(_) => break E::InvalidSurface, }; let caps = unsafe { let suf = A::get_surface(surface); - let adapter = &adapter_guard[device.adapter_id.value]; - match adapter.raw.adapter.surface_capabilities(&suf.unwrap().raw) { + let adapter = &device.adapter; + match adapter + .raw + .adapter + .surface_capabilities(suf.unwrap().raw.as_ref()) + { Some(caps) => caps, None => break E::UnsupportedQueueFamily, } @@ -2422,7 +2022,9 @@ impl Global { } // Wait for all work to finish before configuring the surface. - match device.maintain(hub, wgt::Maintain::Wait, &mut token) { + let fence = device.fence.read(); + let fence = fence.as_ref().unwrap(); + match device.maintain(hub, fence, wgt::Maintain::Wait) { Ok((closures, _)) => { user_callbacks = closures; } @@ -2432,7 +2034,7 @@ impl Global { } // All textures must be destroyed before the surface can be re-configured. - if let Some(present) = surface.presentation.take() { + if let Some(present) = surface.presentation.lock().take() { if present.acquired_texture.is_some() { break E::PreviousOutputExists; } @@ -2445,10 +2047,10 @@ impl Global { // https://github.com/gfx-rs/wgpu/issues/4105 match unsafe { - A::get_surface_mut(surface) + A::get_surface(surface) .unwrap() .raw - .configure(&device.raw, &hal_config) + .configure(device.raw(), &hal_config) } { Ok(()) => (), Err(error) => { @@ -2465,11 +2067,9 @@ impl Global { } } - surface.presentation = Some(present::Presentation { - device_id: Stored { - value: id::Valid(device_id), - ref_count: device.life_guard.add_ref(), - }, + let mut presentation = surface.presentation.lock(); + *presentation = Some(present::Presentation { + device: super::any_device::AnyDevice::new(device.clone()), config: config.clone(), num_frames, acquired_texture: None, @@ -2477,7 +2077,6 @@ impl Global { } user_callbacks.fire(); - return None; }; @@ -2489,18 +2088,16 @@ impl Global { /// upon creating new resources when re-playing a trace. pub fn device_maintain_ids(&self, device_id: DeviceId) -> Result<(), InvalidDevice> { let hub = A::hub(self); - let mut token = Token::root(); - let (device_guard, mut token) = hub.devices.read(&mut token); - let device = device_guard.get(device_id).map_err(|_| InvalidDevice)?; - if !device.valid { + + let device = hub.devices.get(device_id).map_err(|_| InvalidDevice)?; + if !device.is_valid() { return Err(InvalidDevice); } - device.lock_life(&mut token).triage_suspected( + device.lock_life().triage_suspected( hub, &device.trackers, #[cfg(feature = "trace")] None, - &mut token, ); Ok(()) } @@ -2526,12 +2123,13 @@ impl Global { } let hub = A::hub(self); - let mut token = Token::root(); - let (device_guard, mut token) = hub.devices.read(&mut token); - device_guard + let device = hub + .devices .get(device_id) - .map_err(|_| DeviceError::Invalid)? - .maintain(hub, maintain, &mut token)? + .map_err(|_| DeviceError::Invalid)?; + let fence = device.fence.read(); + let fence = fence.as_ref().unwrap(); + device.maintain(hub, fence, maintain)? }; closures.fire(); @@ -2545,42 +2143,33 @@ impl Global { /// /// Return `all_queue_empty` indicating whether there are more queue /// submissions still in flight. - fn poll_devices( + fn poll_device( &self, force_wait: bool, closures: &mut UserClosures, ) -> Result { - profiling::scope!("poll_devices"); + profiling::scope!("poll_device"); let hub = A::hub(self); - let mut devices_to_drop = vec![]; let mut all_queue_empty = true; { - let mut token = Token::root(); - let (device_guard, mut token) = hub.devices.read(&mut token); + let device_guard = hub.devices.read(); - for (id, device) in device_guard.iter(A::VARIANT) { + for (_id, device) in device_guard.iter(A::VARIANT) { let maintain = if force_wait { wgt::Maintain::Wait } else { wgt::Maintain::Poll }; - let (cbs, queue_empty) = device.maintain(hub, maintain, &mut token)?; + let fence = device.fence.read(); + let fence = fence.as_ref().unwrap(); + let (cbs, queue_empty) = device.maintain(hub, fence, maintain)?; all_queue_empty = all_queue_empty && queue_empty; - // If the device's own `RefCount` clone is the only one left, and - // its submission queue is empty, then it can be freed. - if queue_empty && device.ref_count.load() == 1 { - devices_to_drop.push(id); - } closures.extend(cbs); } } - for device_id in devices_to_drop { - self.exit_device::(device_id); - } - Ok(all_queue_empty) } @@ -2596,28 +2185,28 @@ impl Global { #[cfg(all(feature = "vulkan", not(target_arch = "wasm32")))] { - all_queue_empty = self.poll_devices::(force_wait, &mut closures)? - && all_queue_empty; + all_queue_empty = + self.poll_device::(force_wait, &mut closures)? && all_queue_empty; } #[cfg(all(feature = "metal", any(target_os = "macos", target_os = "ios")))] { all_queue_empty = - self.poll_devices::(force_wait, &mut closures)? && all_queue_empty; + self.poll_device::(force_wait, &mut closures)? && all_queue_empty; } #[cfg(all(feature = "dx12", windows))] { all_queue_empty = - self.poll_devices::(force_wait, &mut closures)? && all_queue_empty; + self.poll_device::(force_wait, &mut closures)? && all_queue_empty; } #[cfg(all(feature = "dx11", windows))] { all_queue_empty = - self.poll_devices::(force_wait, &mut closures)? && all_queue_empty; + self.poll_device::(force_wait, &mut closures)? && all_queue_empty; } #[cfg(feature = "gles")] { all_queue_empty = - self.poll_devices::(force_wait, &mut closures)? && all_queue_empty; + self.poll_device::(force_wait, &mut closures)? && all_queue_empty; } closures.fire(); @@ -2633,13 +2222,12 @@ impl Global { log::trace!("Device::start_capture"); let hub = A::hub(self); - let mut token = Token::root(); - let (device_guard, _) = hub.devices.read(&mut token); - if let Ok(device) = device_guard.get(id) { - if !device.valid { + + if let Ok(device) = hub.devices.get(id) { + if !device.is_valid() { return; } - unsafe { device.raw.start_capture() }; + unsafe { device.raw().start_capture() }; } } @@ -2647,30 +2235,33 @@ impl Global { log::trace!("Device::stop_capture"); let hub = A::hub(self); - let mut token = Token::root(); - let (device_guard, _) = hub.devices.read(&mut token); - if let Ok(device) = device_guard.get(id) { - if !device.valid { + + if let Ok(device) = hub.devices.get(id) { + if !device.is_valid() { return; } - unsafe { device.raw.stop_capture() }; + unsafe { device.raw().stop_capture() }; } } pub fn device_drop(&self, device_id: DeviceId) { profiling::scope!("Device::drop"); - log::trace!("Device::drop {device_id:?}"); + log::debug!("Device {:?} is asked to be dropped", device_id); let hub = A::hub(self); - let mut token = Token::root(); - - // For now, just drop the `RefCount` in `device.life_guard`, which - // stands for the user's reference to the device. We'll take care of - // cleaning up the device when we're polled, once its queue submissions - // have completed and it is no longer needed by other resources. - let (mut device_guard, _) = hub.devices.write(&mut token); - if let Ok(device) = device_guard.get_mut(device_id) { - device.life_guard.ref_count.take().unwrap(); + if let Some(device) = hub.devices.unregister(device_id) { + // The things `Device::prepare_to_die` takes care are mostly + // unnecessary here. We know our queue is empty, so we don't + // need to wait for submissions or triage them. We know we were + // just polled, so `life_tracker.free_resources` is empty. + debug_assert!(device.lock_life().queue_empty()); + { + let mut pending_writes = device.pending_writes.lock(); + let pending_writes = pending_writes.as_mut().unwrap(); + pending_writes.deactivate(); + } + + drop(device); } } @@ -2680,11 +2271,9 @@ impl Global { device_lost_closure: DeviceLostClosure, ) { let hub = A::hub(self); - let mut token = Token::root(); - let (mut device_guard, mut token) = hub.devices.write(&mut token); - if let Ok(device) = device_guard.get_mut(device_id) { - let mut life_tracker = device.lock_life(&mut token); + if let Ok(device) = hub.devices.get(device_id) { + let mut life_tracker = device.lock_life(); life_tracker.device_lost_closure = Some(device_lost_closure); } } @@ -2693,12 +2282,16 @@ impl Global { log::trace!("Device::destroy {device_id:?}"); let hub = A::hub(self); - let mut token = Token::root(); - let (mut device_guard, _) = hub.devices.write(&mut token); - if let Ok(device) = device_guard.get_mut(device_id) { + if let Ok(device) = hub.devices.get(device_id) { // Follow the steps at // https://gpuweb.github.io/gpuweb/#dom-gpudevice-destroy. + // It's legal to call destroy multiple times, but if the device + // is already invalid, there's nothing more to do. There's also + // no need to return an error. + if !device.is_valid() { + return; + } // The last part of destroy is to lose the device. The spec says // delay that until all "currently-enqueued operations on any @@ -2707,7 +2300,7 @@ impl Global { // check for empty queues and a DeviceLostClosure. At that time, // the DeviceLostClosure will be called with "destroyed" as the // reason. - device.valid = false; + device.valid.store(false, Ordering::Relaxed); } } @@ -2715,42 +2308,19 @@ impl Global { log::trace!("Device::mark_lost {device_id:?}"); let hub = A::hub(self); - let mut token = Token::root(); - let (mut device_guard, mut token) = hub.devices.write(&mut token); - if let Ok(device) = device_guard.get_mut(device_id) { - device.lose(&mut token, message); + if let Ok(device) = hub.devices.get(device_id) { + device.lose(message); } } - /// Exit the unreferenced, inactive device `device_id`. - fn exit_device(&self, device_id: DeviceId) { - let hub = A::hub(self); - let mut token = Token::root(); - let mut free_adapter_id = None; - { - let (device, mut _token) = hub.devices.unregister(device_id, &mut token); - if let Some(mut device) = device { - // The things `Device::prepare_to_die` takes care are mostly - // unnecessary here. We know our queue is empty, so we don't - // need to wait for submissions or triage them. We know we were - // just polled, so `life_tracker.free_resources` is empty. - debug_assert!(device.lock_life(&mut _token).queue_empty()); - device.pending_writes.deactivate(); - - // Adapter is only referenced by the device and itself. - // This isn't a robust way to destroy them, we should find a better one. - if device.adapter_id.ref_count.load() == 1 { - free_adapter_id = Some(device.adapter_id.value.0); - } + pub fn queue_drop(&self, queue_id: QueueId) { + profiling::scope!("Queue::drop"); + log::debug!("Queue {:?} is asked to be dropped", queue_id); - device.dispose(); - } - } - - // Free the adapter now that we've dropped the `Device` token. - if let Some(free_adapter_id) = free_adapter_id { - let _ = hub.adapters.unregister(free_adapter_id, &mut token); + let hub = A::hub(self); + if let Some(queue) = hub.queues.unregister(queue_id) { + drop(queue); } } @@ -2765,9 +2335,10 @@ impl Global { // User callbacks must not be called while holding buffer_map_async_inner's locks, so we // defer the error callback if it needs to be called immediately (typically when running // into errors). - if let Err((op, err)) = self.buffer_map_async_inner::(buffer_id, range, op) { - op.callback.call(Err(err.clone())); - + if let Err((mut operation, err)) = self.buffer_map_async_inner::(buffer_id, range, op) { + if let Some(callback) = operation.callback.take() { + callback.call(Err(err.clone())); + } return Err(err); } @@ -2785,8 +2356,7 @@ impl Global { profiling::scope!("Buffer::map_async"); let hub = A::hub(self); - let mut token = Token::root(); - let (device_guard, mut token) = hub.devices.read(&mut token); + let (pub_usage, internal_use) = match op.host { HostMap::Read => (wgt::BufferUsages::MAP_READ, hal::BufferUses::MAP_READ), HostMap::Write => (wgt::BufferUsages::MAP_WRITE, hal::BufferUses::MAP_WRITE), @@ -2796,10 +2366,10 @@ impl Global { return Err((op, BufferAccessError::UnalignedRange)); } - let (device_id, ref_count) = { - let (mut buffer_guard, _) = hub.buffers.write(&mut token); - let buffer = buffer_guard - .get_mut(buffer_id) + let buffer = { + let buffer = hub + .buffers + .get(buffer_id) .map_err(|_| BufferAccessError::Invalid); let buffer = match buffer { @@ -2809,8 +2379,8 @@ impl Global { } }; - let device = &device_guard[buffer.device_id.value]; - if !device.valid { + let device = &buffer.device; + if !device.is_valid() { return Err((op, DeviceError::Lost.into())); } @@ -2836,40 +2406,38 @@ impl Global { }, )); } - - buffer.map_state = match buffer.map_state { - resource::BufferMapState::Init { .. } | resource::BufferMapState::Active { .. } => { - return Err((op, BufferAccessError::AlreadyMapped)); - } - resource::BufferMapState::Waiting(_) => { - return Err((op, BufferAccessError::MapAlreadyPending)); - } - resource::BufferMapState::Idle => { - resource::BufferMapState::Waiting(resource::BufferPendingMapping { - range, - op, - _parent_ref_count: buffer.life_guard.add_ref(), - }) - } - }; + { + let map_state = &mut *buffer.map_state.lock(); + *map_state = match *map_state { + resource::BufferMapState::Init { .. } + | resource::BufferMapState::Active { .. } => { + return Err((op, BufferAccessError::AlreadyMapped)); + } + resource::BufferMapState::Waiting(_) => { + return Err((op, BufferAccessError::MapAlreadyPending)); + } + resource::BufferMapState::Idle => { + resource::BufferMapState::Waiting(resource::BufferPendingMapping { + range, + op, + _parent_buffer: buffer.clone(), + }) + } + }; + } log::debug!("Buffer {:?} map state -> Waiting", buffer_id); - let ret = (buffer.device_id.value, buffer.life_guard.add_ref()); - - let mut trackers = device.trackers.lock(); - trackers - .buffers - .set_single(&*buffer_guard, buffer_id, internal_use); - trackers.buffers.drain(); + { + let mut trackers = buffer.device.as_ref().trackers.lock(); + trackers.buffers.set_single(&buffer, internal_use); + //TODO: Check if draining ALL buffers is correct! + let _ = trackers.buffers.drain_transitions(); + } - ret + buffer }; - let device = &device_guard[device_id]; - // Validity of device was confirmed in the code block that set device_id. - device - .lock_life(&mut token) - .map(id::Valid(buffer_id), ref_count); + buffer.device.lock_life().map(&buffer); Ok(()) } @@ -2884,9 +2452,9 @@ impl Global { log::trace!("Buffer::get_mapped_range {buffer_id:?}"); let hub = A::hub(self); - let mut token = Token::root(); - let (buffer_guard, _) = hub.buffers.read(&mut token); - let buffer = buffer_guard + + let buffer = hub + .buffers .get(buffer_id) .map_err(|_| BufferAccessError::Invalid)?; @@ -2904,9 +2472,9 @@ impl Global { if range_size % wgt::COPY_BUFFER_ALIGNMENT != 0 { return Err(BufferAccessError::UnalignedRangeSize { range_size }); } - - match buffer.map_state { - resource::BufferMapState::Init { ptr, .. } => { + let map_state = &*buffer.map_state.lock(); + match *map_state { + resource::BufferMapState::Init { ref ptr, .. } => { // offset (u64) can not be < 0, so no need to validate the lower bound if offset + range_size > buffer.size { return Err(BufferAccessError::OutOfBoundsOverrun { @@ -2916,7 +2484,9 @@ impl Global { } unsafe { Ok((ptr.as_ptr().offset(offset as isize), range_size)) } } - resource::BufferMapState::Active { ptr, ref range, .. } => { + resource::BufferMapState::Active { + ref ptr, ref range, .. + } => { if offset < range.start { return Err(BufferAccessError::OutOfBoundsUnderrun { index: offset, @@ -2939,107 +2509,6 @@ impl Global { } } } - - fn buffer_unmap_inner( - &self, - buffer_id: id::BufferId, - buffer: &mut Buffer, - device: &mut Device, - ) -> Result, BufferAccessError> { - log::debug!("Buffer {:?} map state -> Idle", buffer_id); - match mem::replace(&mut buffer.map_state, resource::BufferMapState::Idle) { - resource::BufferMapState::Init { - ptr, - stage_buffer, - needs_flush, - } => { - #[cfg(feature = "trace")] - if let Some(ref trace) = device.trace { - let mut trace = trace.lock(); - let data = trace.make_binary("bin", unsafe { - std::slice::from_raw_parts(ptr.as_ptr(), buffer.size as usize) - }); - trace.add(trace::Action::WriteBuffer { - id: buffer_id, - data, - range: 0..buffer.size, - queued: true, - }); - } - let _ = ptr; - if needs_flush { - unsafe { - device - .raw - .flush_mapped_ranges(&stage_buffer, iter::once(0..buffer.size)); - } - } - - let raw_buf = buffer.raw.as_ref().ok_or(BufferAccessError::Destroyed)?; - - buffer.life_guard.use_at(device.active_submission_index + 1); - let region = wgt::BufferSize::new(buffer.size).map(|size| hal::BufferCopy { - src_offset: 0, - dst_offset: 0, - size, - }); - let transition_src = hal::BufferBarrier { - buffer: &stage_buffer, - usage: hal::BufferUses::MAP_WRITE..hal::BufferUses::COPY_SRC, - }; - let transition_dst = hal::BufferBarrier { - buffer: raw_buf, - usage: hal::BufferUses::empty()..hal::BufferUses::COPY_DST, - }; - let encoder = device.pending_writes.activate(); - unsafe { - encoder.transition_buffers( - iter::once(transition_src).chain(iter::once(transition_dst)), - ); - if buffer.size > 0 { - encoder.copy_buffer_to_buffer(&stage_buffer, raw_buf, region.into_iter()); - } - } - device - .pending_writes - .consume_temp(queue::TempResource::Buffer(stage_buffer)); - device.pending_writes.dst_buffers.insert(buffer_id); - } - resource::BufferMapState::Idle => { - return Err(BufferAccessError::NotMapped); - } - resource::BufferMapState::Waiting(pending) => { - return Ok(Some((pending.op, Err(BufferAccessError::MapAborted)))); - } - resource::BufferMapState::Active { ptr, range, host } => { - if host == HostMap::Write { - #[cfg(feature = "trace")] - if let Some(ref trace) = device.trace { - let mut trace = trace.lock(); - let size = range.end - range.start; - let data = trace.make_binary("bin", unsafe { - std::slice::from_raw_parts(ptr.as_ptr(), size as usize) - }); - trace.add(trace::Action::WriteBuffer { - id: buffer_id, - data, - range: range.clone(), - queued: false, - }); - } - let _ = (ptr, range); - } - unsafe { - device - .raw - .unmap_buffer(buffer.raw.as_ref().unwrap()) - .map_err(DeviceError::from)? - }; - } - } - Ok(None) - } - pub fn buffer_unmap(&self, buffer_id: id::BufferId) -> BufferAccessResult { profiling::scope!("unmap", "Buffer"); log::trace!("Buffer::unmap {buffer_id:?}"); @@ -3048,24 +2517,22 @@ impl Global { { // Restrict the locks to this scope. let hub = A::hub(self); - let mut token = Token::root(); - let (mut device_guard, mut token) = hub.devices.write(&mut token); - let (mut buffer_guard, _) = hub.buffers.write(&mut token); - let buffer = buffer_guard - .get_mut(buffer_id) + let buffer = hub + .buffers + .get(buffer_id) .map_err(|_| BufferAccessError::Invalid)?; - let device = &mut device_guard[buffer.device_id.value]; - if !device.valid { + if !buffer.device.is_valid() { return Err(DeviceError::Lost.into()); } - - closure = self.buffer_unmap_inner(buffer_id, buffer, device) + closure = buffer.buffer_unmap_inner() } // Note: outside the scope where locks are held when calling the callback - if let Some((operation, status)) = closure? { - operation.callback.call(status); + if let Some((mut operation, status)) = closure? { + if let Some(callback) = operation.callback.take() { + callback.call(status); + } } Ok(()) } diff --git a/third_party/rust/wgpu-core/src/device/life.rs b/third_party/rust/wgpu-core/src/device/life.rs index dc27bc43e0304..5a2eff0cfc6a5 100644 --- a/third_party/rust/wgpu-core/src/device/life.rs +++ b/third_party/rust/wgpu-core/src/device/life.rs @@ -1,202 +1,148 @@ #[cfg(feature = "trace")] use crate::device::trace; use crate::{ + binding_model::{BindGroup, BindGroupLayout, PipelineLayout}, + command::RenderBundle, device::{ queue::{EncoderInFlight, SubmittedWorkDoneClosure, TempResource}, DeviceError, DeviceLostClosure, }, hal_api::HalApi, - hub::{Hub, Token}, - id, - identity::GlobalIdentityHandlerFactory, - resource, - track::{BindGroupStates, RenderBundleScope, Tracker}, - RefCount, Stored, SubmissionIndex, + hub::Hub, + id::{ + self, BindGroupId, BindGroupLayoutId, BufferId, ComputePipelineId, PipelineLayoutId, + QuerySetId, RenderBundleId, RenderPipelineId, SamplerId, StagingBufferId, TextureId, + TextureViewId, + }, + pipeline::{ComputePipeline, RenderPipeline}, + registry::Registry, + resource::{ + self, Buffer, QuerySet, Resource, ResourceType, Sampler, StagingBuffer, Texture, + TextureView, + }, + track::{ResourceTracker, Tracker}, + FastHashMap, SubmissionIndex, }; use smallvec::SmallVec; -use hal::Device as _; use parking_lot::Mutex; use thiserror::Error; +use wgt::WasmNotSendSync; -use std::mem; +use std::{any::Any, sync::Arc}; -/// A struct that keeps lists of resources that are no longer needed by the user. -#[derive(Debug, Default)] -pub(super) struct SuspectedResources { - pub(super) buffers: Vec>, - pub(super) textures: Vec>, - pub(super) texture_views: Vec>, - pub(super) samplers: Vec>, - pub(super) bind_groups: Vec>, - pub(super) compute_pipelines: Vec>, - pub(super) render_pipelines: Vec>, - pub(super) bind_group_layouts: Vec>, - pub(super) pipeline_layouts: Vec>, - pub(super) render_bundles: Vec>, - pub(super) query_sets: Vec>, +pub(crate) trait ResourceMap: Any + WasmNotSendSync { + fn as_any(&self) -> &dyn Any; + fn as_any_mut(&mut self) -> &mut dyn Any; + fn clear_map(&mut self); + fn extend_map(&mut self, maps: &mut ResourceMaps); } -impl SuspectedResources { - pub(super) fn clear(&mut self) { - self.buffers.clear(); - self.textures.clear(); - self.texture_views.clear(); - self.samplers.clear(); - self.bind_groups.clear(); - self.compute_pipelines.clear(); - self.render_pipelines.clear(); - self.bind_group_layouts.clear(); - self.pipeline_layouts.clear(); - self.render_bundles.clear(); - self.query_sets.clear(); - } - - pub(super) fn extend(&mut self, other: &Self) { - self.buffers.extend_from_slice(&other.buffers); - self.textures.extend_from_slice(&other.textures); - self.texture_views.extend_from_slice(&other.texture_views); - self.samplers.extend_from_slice(&other.samplers); - self.bind_groups.extend_from_slice(&other.bind_groups); - self.compute_pipelines - .extend_from_slice(&other.compute_pipelines); - self.render_pipelines - .extend_from_slice(&other.render_pipelines); - self.bind_group_layouts - .extend_from_slice(&other.bind_group_layouts); - self.pipeline_layouts - .extend_from_slice(&other.pipeline_layouts); - self.render_bundles.extend_from_slice(&other.render_bundles); - self.query_sets.extend_from_slice(&other.query_sets); - } - - pub(super) fn add_render_bundle_scope(&mut self, trackers: &RenderBundleScope) { - self.buffers.extend(trackers.buffers.used()); - self.textures.extend(trackers.textures.used()); - self.bind_groups.extend(trackers.bind_groups.used()); - self.render_pipelines - .extend(trackers.render_pipelines.used()); - self.query_sets.extend(trackers.query_sets.used()); - } - - pub(super) fn add_bind_group_states(&mut self, trackers: &BindGroupStates) { - self.buffers.extend(trackers.buffers.used()); - self.textures.extend(trackers.textures.used()); - self.texture_views.extend(trackers.views.used()); - self.samplers.extend(trackers.samplers.used()); +impl ResourceMap for FastHashMap> +where + Id: id::TypedId, + R: Resource, +{ + fn as_any(&self) -> &dyn Any { + self + } + fn as_any_mut(&mut self) -> &mut dyn Any { + self + } + fn clear_map(&mut self) { + self.clear() + } + fn extend_map(&mut self, r: &mut ResourceMaps) { + if let Some(other) = r.maps.get_mut(R::TYPE) { + if let Some(other) = other.as_any_mut().downcast_mut::() { + self.extend(other.drain()); + } + } } } -/// Raw backend resources that should be freed shortly. -#[derive(Debug)] -struct NonReferencedResources { - buffers: Vec, - textures: Vec, - texture_views: Vec, - samplers: Vec, - bind_groups: Vec, - compute_pipes: Vec, - render_pipes: Vec, - bind_group_layouts: Vec, - pipeline_layouts: Vec, - query_sets: Vec, +/// A struct that keeps lists of resources that are no longer needed by the user. +#[derive(Default)] +pub(crate) struct ResourceMaps { + pub(crate) maps: FastHashMap>, } -impl NonReferencedResources { - fn new() -> Self { - Self { - buffers: Vec::new(), - textures: Vec::new(), - texture_views: Vec::new(), - samplers: Vec::new(), - bind_groups: Vec::new(), - compute_pipes: Vec::new(), - render_pipes: Vec::new(), - bind_group_layouts: Vec::new(), - pipeline_layouts: Vec::new(), - query_sets: Vec::new(), - } +impl ResourceMaps { + fn add_type(&mut self) -> &mut Self + where + Id: id::TypedId, + R: Resource, + { + let map = FastHashMap::>::default(); + self.maps.insert(R::TYPE, Box::new(map)); + self } - - fn extend(&mut self, other: Self) { - self.buffers.extend(other.buffers); - self.textures.extend(other.textures); - self.texture_views.extend(other.texture_views); - self.samplers.extend(other.samplers); - self.bind_groups.extend(other.bind_groups); - self.compute_pipes.extend(other.compute_pipes); - self.render_pipes.extend(other.render_pipes); - self.query_sets.extend(other.query_sets); - assert!(other.bind_group_layouts.is_empty()); - assert!(other.pipeline_layouts.is_empty()); - } - - unsafe fn clean(&mut self, device: &A::Device) { - if !self.buffers.is_empty() { - profiling::scope!("destroy_buffers"); - for raw in self.buffers.drain(..) { - unsafe { device.destroy_buffer(raw) }; - } - } - if !self.textures.is_empty() { - profiling::scope!("destroy_textures"); - for raw in self.textures.drain(..) { - unsafe { device.destroy_texture(raw) }; - } - } - if !self.texture_views.is_empty() { - profiling::scope!("destroy_texture_views"); - for raw in self.texture_views.drain(..) { - unsafe { device.destroy_texture_view(raw) }; - } - } - if !self.samplers.is_empty() { - profiling::scope!("destroy_samplers"); - for raw in self.samplers.drain(..) { - unsafe { device.destroy_sampler(raw) }; - } - } - if !self.bind_groups.is_empty() { - profiling::scope!("destroy_bind_groups"); - for raw in self.bind_groups.drain(..) { - unsafe { device.destroy_bind_group(raw) }; - } - } - if !self.compute_pipes.is_empty() { - profiling::scope!("destroy_compute_pipelines"); - for raw in self.compute_pipes.drain(..) { - unsafe { device.destroy_compute_pipeline(raw) }; - } - } - if !self.render_pipes.is_empty() { - profiling::scope!("destroy_render_pipelines"); - for raw in self.render_pipes.drain(..) { - unsafe { device.destroy_render_pipeline(raw) }; - } - } - if !self.bind_group_layouts.is_empty() { - profiling::scope!("destroy_bind_group_layouts"); - for raw in self.bind_group_layouts.drain(..) { - unsafe { device.destroy_bind_group_layout(raw) }; - } - } - if !self.pipeline_layouts.is_empty() { - profiling::scope!("destroy_pipeline_layouts"); - for raw in self.pipeline_layouts.drain(..) { - unsafe { device.destroy_pipeline_layout(raw) }; - } - } - if !self.query_sets.is_empty() { - profiling::scope!("destroy_query_sets"); - for raw in self.query_sets.drain(..) { - unsafe { device.destroy_query_set(raw) }; - } - } + fn map(&self) -> &FastHashMap> + where + Id: id::TypedId, + R: Resource, + { + let map = self.maps.get(R::TYPE).unwrap(); + let any_map = map.as_ref().as_any(); + let map = any_map.downcast_ref::>>().unwrap(); + map + } + fn map_mut(&mut self) -> &mut FastHashMap> + where + Id: id::TypedId, + R: Resource, + { + let map = self + .maps + .entry(R::TYPE) + .or_insert_with(|| Box::>>::default()); + let any_map = map.as_mut().as_any_mut(); + let map = any_map.downcast_mut::>>().unwrap(); + map + } + pub(crate) fn new() -> Self { + let mut maps = Self::default(); + maps.add_type::>(); + maps.add_type::>(); + maps.add_type::>(); + maps.add_type::>(); + maps.add_type::>(); + maps.add_type::>(); + maps.add_type::>(); + maps.add_type::>(); + maps.add_type::>(); + maps.add_type::>(); + maps.add_type::>(); + maps.add_type::>(); + maps + } + pub(crate) fn clear(&mut self) { + self.maps.iter_mut().for_each(|(_t, map)| map.clear_map()); + } + pub(crate) fn extend(&mut self, mut other: Self) { + self.maps.iter_mut().for_each(|(_t, map)| { + map.extend_map(&mut other); + }); + } + pub(crate) fn insert(&mut self, id: Id, r: Arc) -> &mut Self + where + Id: id::TypedId, + R: Resource, + { + self.map_mut().insert(id, r); + self + } + pub(crate) fn contains(&mut self, id: &Id) -> bool + where + Id: id::TypedId, + R: Resource, + { + self.map::().contains_key(id) } } /// Resources used by a queue submission, and work to be done once it completes. -struct ActiveSubmission { +struct ActiveSubmission { /// The index of the submission we track. /// /// When `Device::fence`'s value is greater than or equal to this, our queue @@ -213,10 +159,10 @@ struct ActiveSubmission { /// This includes things like temporary resources and resources that are /// used by submitted commands but have been dropped by the user (meaning that /// this submission is their last reference.) - last_resources: NonReferencedResources, + last_resources: ResourceMaps, /// Buffers to be mapped once this submission has completed. - mapped: Vec>, + mapped: Vec>>, encoders: Vec>, @@ -243,12 +189,12 @@ pub enum WaitIdleError { /// A buffer cannot be mapped until all active queue submissions that use it /// have completed. To that end: /// -/// - Each buffer's `LifeGuard::submission_index` records the index of the +/// - Each buffer's `ResourceInfo::submission_index` records the index of the /// most recent queue submission that uses that buffer. /// -/// - Calling `map_async` adds the buffer to `self.mapped`, and changes -/// `Buffer::map_state` to prevent it from being used in any new -/// submissions. +/// - Calling `Global::buffer_map_async` adds the buffer to +/// `self.mapped`, and changes `Buffer::map_state` to prevent it +/// from being used in any new submissions. /// /// - When the device is polled, the following `LifetimeTracker` methods decide /// what should happen next: @@ -271,23 +217,23 @@ pub enum WaitIdleError { /// /// 4) `cleanup` frees everything in `free_resources`. /// -/// Only `self.mapped` holds a `RefCount` for the buffer; it is dropped by -/// `triage_mapped`. -pub(super) struct LifetimeTracker { +/// Only calling `Global::buffer_map_async` clones a new `Arc` for the +/// buffer. This new `Arc` is only dropped by `handle_mapping`. +pub(crate) struct LifetimeTracker { /// Resources that the user has requested be mapped, but which are used by /// queue submissions still in flight. - mapped: Vec>, + mapped: Vec>>, /// Buffers can be used in a submission that is yet to be made, by the /// means of `write_buffer()`, so we have a special place for them. - pub future_suspected_buffers: Vec>, + pub future_suspected_buffers: Vec>>, /// Textures can be used in the upcoming submission by `write_texture`. - pub future_suspected_textures: Vec>, + pub future_suspected_textures: Vec>>, /// Resources whose user handle has died (i.e. drop/destroy has been called) /// and will likely be ready for destruction soon. - pub suspected_resources: SuspectedResources, + pub suspected_resources: ResourceMaps, /// Resources used by queue submissions still in flight. One entry per /// submission, with older submissions appearing before younger. @@ -302,11 +248,11 @@ pub(super) struct LifetimeTracker { /// These are freed by `LifeTracker::cleanup`, which is called from periodic /// maintenance functions like `Global::device_poll`, and when a device is /// destroyed. - free_resources: NonReferencedResources, + free_resources: ResourceMaps, /// Buffers the user has asked us to map, and which are not used by any /// queue submission still in flight. - ready_to_map: Vec>, + ready_to_map: Vec>>, /// Queue "on_submitted_work_done" closures that were initiated for while there is no /// currently pending submissions. These cannot be immeidately invoked as they @@ -320,15 +266,15 @@ pub(super) struct LifetimeTracker { pub device_lost_closure: Option, } -impl LifetimeTracker { +impl LifetimeTracker { pub fn new() -> Self { Self { mapped: Vec::new(), future_suspected_buffers: Vec::new(), future_suspected_textures: Vec::new(), - suspected_resources: SuspectedResources::default(), + suspected_resources: ResourceMaps::new::(), active: Vec::new(), - free_resources: NonReferencedResources::new(), + free_resources: ResourceMaps::new::(), ready_to_map: Vec::new(), work_done_closures: SmallVec::new(), device_lost_closure: None, @@ -347,13 +293,17 @@ impl LifetimeTracker { temp_resources: impl Iterator>, encoders: Vec>, ) { - let mut last_resources = NonReferencedResources::new(); + let mut last_resources = ResourceMaps::new::(); for res in temp_resources { match res { - TempResource::Buffer(raw) => last_resources.buffers.push(raw), - TempResource::Texture(raw, views) => { - last_resources.textures.push(raw); - last_resources.texture_views.extend(views); + TempResource::Buffer(raw) => { + last_resources.insert(raw.as_info().id(), raw); + } + TempResource::StagingBuffer(raw) => { + last_resources.insert(raw.as_info().id(), raw); + } + TempResource::Texture(raw) => { + last_resources.insert(raw.as_info().id(), raw); } } } @@ -368,20 +318,16 @@ impl LifetimeTracker { } pub fn post_submit(&mut self) { - self.suspected_resources.buffers.extend( - self.future_suspected_buffers - .drain(..) - .map(|stored| stored.value), - ); - self.suspected_resources.textures.extend( - self.future_suspected_textures - .drain(..) - .map(|stored| stored.value), - ); + for v in self.future_suspected_buffers.drain(..).take(1) { + self.suspected_resources.insert(v.as_info().id(), v); + } + for v in self.future_suspected_textures.drain(..).take(1) { + self.suspected_resources.insert(v.as_info().id(), v); + } } - pub(crate) fn map(&mut self, value: id::Valid, ref_count: RefCount) { - self.mapped.push(Stored { value, ref_count }); + pub(crate) fn map(&mut self, value: &Arc>) { + self.mapped.push(value.clone()); } /// Sort out the consequences of completed submissions. @@ -409,7 +355,7 @@ impl LifetimeTracker { pub fn triage_submissions( &mut self, last_done: SubmissionIndex, - command_allocator: &Mutex>, + command_allocator: &mut super::CommandAllocator, ) -> SmallVec<[SubmittedWorkDoneClosure; 1]> { profiling::scope!("triage_submissions"); @@ -423,23 +369,21 @@ impl LifetimeTracker { let mut work_done_closures: SmallVec<_> = self.work_done_closures.drain(..).collect(); for a in self.active.drain(..done_count) { - log::trace!("Active submission {} is done", a.index); + log::info!("Active submission {} is done", a.index); self.free_resources.extend(a.last_resources); self.ready_to_map.extend(a.mapped); for encoder in a.encoders { let raw = unsafe { encoder.land() }; - command_allocator.lock().release_encoder(raw); + command_allocator.release_encoder(raw); } work_done_closures.extend(a.work_done_closures); } work_done_closures } - pub fn cleanup(&mut self, device: &A::Device) { + pub fn cleanup(&mut self) { profiling::scope!("LifetimeTracker::cleanup"); - unsafe { - self.free_resources.clean(device); - } + self.free_resources.clear(); } pub fn schedule_resource_destruction( @@ -453,10 +397,14 @@ impl LifetimeTracker { .find(|a| a.index == last_submit_index) .map_or(&mut self.free_resources, |a| &mut a.last_resources); match temp_resource { - TempResource::Buffer(raw) => resources.buffers.push(raw), - TempResource::Texture(raw, views) => { - resources.texture_views.extend(views); - resources.textures.push(raw); + TempResource::Buffer(raw) => { + resources.insert(raw.as_info().id(), raw); + } + TempResource::StagingBuffer(raw) => { + resources.insert(raw.as_info().id(), raw); + } + TempResource::Texture(raw) => { + resources.insert(raw.as_info().id(), raw); } } } @@ -476,6 +424,399 @@ impl LifetimeTracker { } impl LifetimeTracker { + fn triage_resources( + resources_map: &mut FastHashMap>, + active: &mut [ActiveSubmission], + free_resources: &mut ResourceMaps, + trackers: &mut impl ResourceTracker, + registry: &Registry, + count_fn: F, + mut on_remove: T, + ) -> Vec> + where + Id: id::TypedId, + R: Resource, + F: Fn(u64, &[ActiveSubmission], &Id) -> usize, + T: FnMut(&Id, &Arc), + { + let mut removed_resources = Vec::new(); + resources_map.retain(|&id, resource| { + let submit_index = resource.as_info().submission_index(); + let mut count = 1; + count += count_fn(submit_index, active, &id); + count += registry.contains(id) as usize; + + let non_referenced_resources = active + .iter_mut() + .find(|a| a.index == submit_index) + .map_or(&mut *free_resources, |a| &mut a.last_resources); + count += non_referenced_resources.contains::(&id) as usize; + + let is_removed = trackers.remove_abandoned(id, count); + if is_removed { + on_remove(&id, resource); + removed_resources.push(resource.clone()); + non_referenced_resources.insert(id, resource.clone()); + } + !is_removed + }); + removed_resources + } + + fn triage_suspected_render_bundles( + &mut self, + hub: &Hub, + trackers: &Mutex>, + #[cfg(feature = "trace")] trace: &mut Option<&mut trace::Trace>, + ) -> &mut Self { + let mut trackers = trackers.lock(); + let resource_map = self.suspected_resources.map_mut(); + let mut removed_resources = Self::triage_resources( + resource_map, + self.active.as_mut_slice(), + &mut self.free_resources, + &mut trackers.bundles, + &hub.render_bundles, + |_submit_index, _active, _id| 0, + |_bundle_id, _bundle| { + #[cfg(feature = "trace")] + if let Some(ref mut t) = *trace { + t.add(trace::Action::DestroyRenderBundle(*_bundle_id)); + } + }, + ); + removed_resources.drain(..).for_each(|bundle| { + for v in bundle.used.buffers.write().drain_resources() { + self.suspected_resources.insert(v.as_info().id(), v); + } + for v in bundle.used.textures.write().drain_resources() { + self.suspected_resources.insert(v.as_info().id(), v); + } + for v in bundle.used.bind_groups.write().drain_resources() { + self.suspected_resources.insert(v.as_info().id(), v); + } + for v in bundle.used.render_pipelines.write().drain_resources() { + self.suspected_resources.insert(v.as_info().id(), v); + } + for v in bundle.used.query_sets.write().drain_resources() { + self.suspected_resources.insert(v.as_info().id(), v); + } + }); + self + } + + fn triage_suspected_bind_groups( + &mut self, + hub: &Hub, + trackers: &Mutex>, + #[cfg(feature = "trace")] trace: &mut Option<&mut trace::Trace>, + ) -> &mut Self { + let mut trackers = trackers.lock(); + let resource_map = self.suspected_resources.map_mut(); + let mut removed_resource = Self::triage_resources( + resource_map, + self.active.as_mut_slice(), + &mut self.free_resources, + &mut trackers.bind_groups, + &hub.bind_groups, + |_submit_index, _active, _id| 0, + |_bind_group_id, _bind_group| { + #[cfg(feature = "trace")] + if let Some(ref mut t) = *trace { + t.add(trace::Action::DestroyBindGroup(*_bind_group_id)); + } + }, + ); + removed_resource.drain(..).for_each(|bind_group| { + for v in bind_group.used.buffers.drain_resources() { + self.suspected_resources.insert(v.as_info().id(), v); + } + for v in bind_group.used.textures.drain_resources() { + self.suspected_resources.insert(v.as_info().id(), v); + } + for v in bind_group.used.views.drain_resources() { + self.suspected_resources.insert(v.as_info().id(), v); + } + for v in bind_group.used.samplers.drain_resources() { + self.suspected_resources.insert(v.as_info().id(), v); + } + //Releasing safely unused resources to decrement refcount + bind_group.used_buffer_ranges.write().clear(); + bind_group.used_texture_ranges.write().clear(); + bind_group.dynamic_binding_info.write().clear(); + + self.suspected_resources + .insert(bind_group.layout.as_info().id(), bind_group.layout.clone()); + }); + self + } + + fn triage_suspected_texture_views( + &mut self, + hub: &Hub, + trackers: &Mutex>, + #[cfg(feature = "trace")] trace: &mut Option<&mut trace::Trace>, + ) -> &mut Self { + let mut trackers = trackers.lock(); + let resource_map = self.suspected_resources.map_mut(); + let mut removed_resources = Self::triage_resources( + resource_map, + self.active.as_mut_slice(), + &mut self.free_resources, + &mut trackers.views, + &hub.texture_views, + |_submit_index, _active, _id| 0, + |_texture_view_id, _texture_view| { + #[cfg(feature = "trace")] + if let Some(ref mut t) = *trace { + t.add(trace::Action::DestroyTextureView(*_texture_view_id)); + } + }, + ); + removed_resources.drain(..).for_each(|texture_view| { + let mut lock = texture_view.parent.write(); + if let Some(parent_texture) = lock.take() { + self.suspected_resources + .insert(parent_texture.as_info().id(), parent_texture); + } + }); + self + } + + fn triage_suspected_textures( + &mut self, + hub: &Hub, + trackers: &Mutex>, + #[cfg(feature = "trace")] trace: &mut Option<&mut trace::Trace>, + ) -> &mut Self { + let mut trackers = trackers.lock(); + let resource_map = self.suspected_resources.map_mut(); + Self::triage_resources( + resource_map, + self.active.as_mut_slice(), + &mut self.free_resources, + &mut trackers.textures, + &hub.textures, + |_submit_index, _active, _id| 0, + |_texture_id, _texture| { + #[cfg(feature = "trace")] + if let Some(ref mut t) = *trace { + t.add(trace::Action::DestroyTexture(*_texture_id)); + } + }, + ); + self + } + + fn triage_suspected_samplers( + &mut self, + hub: &Hub, + trackers: &Mutex>, + #[cfg(feature = "trace")] trace: &mut Option<&mut trace::Trace>, + ) -> &mut Self { + let mut trackers = trackers.lock(); + let resource_map = self.suspected_resources.map_mut(); + Self::triage_resources( + resource_map, + self.active.as_mut_slice(), + &mut self.free_resources, + &mut trackers.samplers, + &hub.samplers, + |_submit_index, _active, _id| 0, + |_sampler_id, _sampler| { + #[cfg(feature = "trace")] + if let Some(ref mut t) = *trace { + t.add(trace::Action::DestroySampler(*_sampler_id)); + } + }, + ); + self + } + + fn triage_suspected_buffers( + &mut self, + hub: &Hub, + trackers: &Mutex>, + #[cfg(feature = "trace")] trace: &mut Option<&mut trace::Trace>, + ) -> &mut Self { + let mut trackers = trackers.lock(); + let resource_map = self.suspected_resources.map_mut(); + let mut removed_resources = Self::triage_resources( + resource_map, + self.active.as_mut_slice(), + &mut self.free_resources, + &mut trackers.buffers, + &hub.buffers, + |submit_index, active, buffer_id| { + let mut count = 0; + let mapped = active + .iter() + .find(|a| a.index == submit_index) + .map_or(&self.mapped, |a| &a.mapped); + mapped.iter().for_each(|b| { + if b.as_info().id() == *buffer_id { + count += 1; + } + }); + count + }, + |_buffer_id, _buffer| { + #[cfg(feature = "trace")] + if let Some(ref mut t) = *trace { + t.add(trace::Action::DestroyBuffer(*_buffer_id)); + } + }, + ); + removed_resources.drain(..).for_each(|buffer| { + if let resource::BufferMapState::Init { + ref stage_buffer, .. + } = *buffer.map_state.lock() + { + self.free_resources + .insert(stage_buffer.as_info().id(), stage_buffer.clone()); + } + }); + self + } + + fn triage_suspected_compute_pipelines( + &mut self, + hub: &Hub, + trackers: &Mutex>, + #[cfg(feature = "trace")] trace: &mut Option<&mut trace::Trace>, + ) -> &mut Self { + let mut trackers = trackers.lock(); + let resource_map = self.suspected_resources.map_mut(); + let mut removed_resources = Self::triage_resources( + resource_map, + self.active.as_mut_slice(), + &mut self.free_resources, + &mut trackers.compute_pipelines, + &hub.compute_pipelines, + |_submit_index, _active, _id| 0, + |_compute_pipeline_id, _compute_pipeline| { + #[cfg(feature = "trace")] + if let Some(ref mut t) = *trace { + t.add(trace::Action::DestroyComputePipeline(*_compute_pipeline_id)); + } + }, + ); + removed_resources.drain(..).for_each(|compute_pipeline| { + self.suspected_resources.insert( + compute_pipeline.layout.as_info().id(), + compute_pipeline.layout.clone(), + ); + }); + self + } + + fn triage_suspected_render_pipelines( + &mut self, + hub: &Hub, + trackers: &Mutex>, + #[cfg(feature = "trace")] trace: &mut Option<&mut trace::Trace>, + ) -> &mut Self { + let mut trackers = trackers.lock(); + let resource_map = self.suspected_resources.map_mut(); + let mut removed_resources = Self::triage_resources( + resource_map, + self.active.as_mut_slice(), + &mut self.free_resources, + &mut trackers.render_pipelines, + &hub.render_pipelines, + |_submit_index, _active, _id| 0, + |_render_pipeline_id, _render_pipeline| { + #[cfg(feature = "trace")] + if let Some(ref mut t) = *trace { + t.add(trace::Action::DestroyRenderPipeline(*_render_pipeline_id)); + } + }, + ); + removed_resources.drain(..).for_each(|render_pipeline| { + self.suspected_resources.insert( + render_pipeline.layout.as_info().id(), + render_pipeline.layout.clone(), + ); + }); + self + } + + fn triage_suspected_pipeline_layouts( + &mut self, + #[cfg(feature = "trace")] trace: &mut Option<&mut trace::Trace>, + ) -> &mut Self { + let mut removed_resources = Vec::new(); + self.suspected_resources + .map_mut::>() + .retain(|_pipeline_layout_id, pipeline_layout| { + #[cfg(feature = "trace")] + if let Some(ref mut t) = *trace { + t.add(trace::Action::DestroyPipelineLayout(*_pipeline_layout_id)); + } + removed_resources.push(pipeline_layout.clone()); + false + }); + removed_resources.drain(..).for_each(|pipeline_layout| { + for bgl in &pipeline_layout.bind_group_layouts { + self.suspected_resources + .insert(bgl.as_info().id(), bgl.clone()); + } + }); + self + } + + fn triage_suspected_bind_group_layouts( + &mut self, + #[cfg(feature = "trace")] trace: &mut Option<&mut trace::Trace>, + ) -> &mut Self { + self.suspected_resources + .map_mut::>() + .retain(|bind_group_layout_id, bind_group_layout| { + //Note: this has to happen after all the suspected pipelines are destroyed + //Note: nothing else can bump the refcount since the guard is locked exclusively + //Note: same BGL can appear multiple times in the list, but only the last + // encounter could drop the refcount to 0. + #[cfg(feature = "trace")] + if let Some(ref mut t) = *trace { + t.add(trace::Action::DestroyBindGroupLayout(*bind_group_layout_id)); + } + self.free_resources + .insert(*bind_group_layout_id, bind_group_layout.clone()); + false + }); + self + } + + fn triage_suspected_query_sets( + &mut self, + hub: &Hub, + trackers: &Mutex>, + ) -> &mut Self { + let mut trackers = trackers.lock(); + let resource_map = self.suspected_resources.map_mut(); + Self::triage_resources( + resource_map, + self.active.as_mut_slice(), + &mut self.free_resources, + &mut trackers.query_sets, + &hub.query_sets, + |_submit_index, _active, _id| 0, + |_query_set_id, _query_set| {}, + ); + self + } + + fn triage_suspected_staging_buffers(&mut self) -> &mut Self { + self.suspected_resources + .map_mut::>() + .retain(|staging_buffer_id, staging_buffer| { + self.free_resources + .insert(*staging_buffer_id, staging_buffer.clone()); + false + }); + self + } + /// Identify resources to free, according to `trackers` and `self.suspected_resources`. /// /// Given `trackers`, the [`Tracker`] belonging to same [`Device`] as @@ -515,347 +856,98 @@ impl LifetimeTracker { /// [`self.active`]: LifetimeTracker::active /// [`triage_submissions`]: LifetimeTracker::triage_submissions /// [`self.free_resources`]: LifetimeTracker::free_resources - pub(super) fn triage_suspected( + pub(crate) fn triage_suspected( &mut self, - hub: &Hub, + hub: &Hub, trackers: &Mutex>, - #[cfg(feature = "trace")] trace: Option<&Mutex>, - token: &mut Token>, + #[cfg(feature = "trace")] mut trace: Option<&mut trace::Trace>, ) { profiling::scope!("triage_suspected"); - if !self.suspected_resources.render_bundles.is_empty() { - let (mut guard, _) = hub.render_bundles.write(token); - let mut trackers = trackers.lock(); - - while let Some(id) = self.suspected_resources.render_bundles.pop() { - if trackers.bundles.remove_abandoned(id) { - log::debug!("Bundle {:?} will be destroyed", id); - #[cfg(feature = "trace")] - if let Some(t) = trace { - t.lock().add(trace::Action::DestroyRenderBundle(id.0)); - } - - if let Some(res) = hub.render_bundles.unregister_locked(id.0, &mut *guard) { - self.suspected_resources.add_render_bundle_scope(&res.used); - } - } - } - } - - if !self.suspected_resources.bind_groups.is_empty() { - let (mut guard, _) = hub.bind_groups.write(token); - let mut trackers = trackers.lock(); - - while let Some(id) = self.suspected_resources.bind_groups.pop() { - if trackers.bind_groups.remove_abandoned(id) { - log::debug!("Bind group {:?} will be destroyed", id); - #[cfg(feature = "trace")] - if let Some(t) = trace { - t.lock().add(trace::Action::DestroyBindGroup(id.0)); - } - - if let Some(res) = hub.bind_groups.unregister_locked(id.0, &mut *guard) { - self.suspected_resources.add_bind_group_states(&res.used); - - self.suspected_resources - .bind_group_layouts - .push(res.layout_id); - - let submit_index = res.life_guard.life_count(); - self.active - .iter_mut() - .find(|a| a.index == submit_index) - .map_or(&mut self.free_resources, |a| &mut a.last_resources) - .bind_groups - .push(res.raw); - } - } - } - } - - if !self.suspected_resources.texture_views.is_empty() { - let (mut guard, _) = hub.texture_views.write(token); - let mut trackers = trackers.lock(); - - let mut list = mem::take(&mut self.suspected_resources.texture_views); - for id in list.drain(..) { - if trackers.views.remove_abandoned(id) { - log::debug!("Texture view {:?} will be destroyed", id); - #[cfg(feature = "trace")] - if let Some(t) = trace { - t.lock().add(trace::Action::DestroyTextureView(id.0)); - } - - if let Some(res) = hub.texture_views.unregister_locked(id.0, &mut *guard) { - self.suspected_resources.textures.push(res.parent_id.value); - let submit_index = res.life_guard.life_count(); - self.active - .iter_mut() - .find(|a| a.index == submit_index) - .map_or(&mut self.free_resources, |a| &mut a.last_resources) - .texture_views - .push(res.raw); - } - } - } - self.suspected_resources.texture_views = list; - } - - if !self.suspected_resources.textures.is_empty() { - let (mut guard, _) = hub.textures.write(token); - let mut trackers = trackers.lock(); - - for id in self.suspected_resources.textures.drain(..) { - if trackers.textures.remove_abandoned(id) { - log::debug!("Texture {:?} will be destroyed", id); - #[cfg(feature = "trace")] - if let Some(t) = trace { - t.lock().add(trace::Action::DestroyTexture(id.0)); - } - - if let Some(res) = hub.textures.unregister_locked(id.0, &mut *guard) { - let submit_index = res.life_guard.life_count(); - let raw = match res.inner { - resource::TextureInner::Native { raw: Some(raw) } => raw, - _ => continue, - }; - let non_referenced_resources = self - .active - .iter_mut() - .find(|a| a.index == submit_index) - .map_or(&mut self.free_resources, |a| &mut a.last_resources); - - non_referenced_resources.textures.push(raw); - if let resource::TextureClearMode::RenderPass { clear_views, .. } = - res.clear_mode - { - non_referenced_resources - .texture_views - .extend(clear_views.into_iter()); - } - } - } - } - } - - if !self.suspected_resources.samplers.is_empty() { - let (mut guard, _) = hub.samplers.write(token); - let mut trackers = trackers.lock(); - - for id in self.suspected_resources.samplers.drain(..) { - if trackers.samplers.remove_abandoned(id) { - log::debug!("Sampler {:?} will be destroyed", id); - #[cfg(feature = "trace")] - if let Some(t) = trace { - t.lock().add(trace::Action::DestroySampler(id.0)); - } - - if let Some(res) = hub.samplers.unregister_locked(id.0, &mut *guard) { - let submit_index = res.life_guard.life_count(); - self.active - .iter_mut() - .find(|a| a.index == submit_index) - .map_or(&mut self.free_resources, |a| &mut a.last_resources) - .samplers - .push(res.raw); - } - } - } - } - - if !self.suspected_resources.buffers.is_empty() { - let (mut guard, _) = hub.buffers.write(token); - let mut trackers = trackers.lock(); - - for id in self.suspected_resources.buffers.drain(..) { - if trackers.buffers.remove_abandoned(id) { - log::debug!("Buffer {:?} will be destroyed", id); - #[cfg(feature = "trace")] - if let Some(t) = trace { - t.lock().add(trace::Action::DestroyBuffer(id.0)); - } - - if let Some(res) = hub.buffers.unregister_locked(id.0, &mut *guard) { - let submit_index = res.life_guard.life_count(); - if let resource::BufferMapState::Init { stage_buffer, .. } = res.map_state { - self.free_resources.buffers.push(stage_buffer); - } - self.active - .iter_mut() - .find(|a| a.index == submit_index) - .map_or(&mut self.free_resources, |a| &mut a.last_resources) - .buffers - .extend(res.raw); - } - } - } - } - - if !self.suspected_resources.compute_pipelines.is_empty() { - let (mut guard, _) = hub.compute_pipelines.write(token); - let mut trackers = trackers.lock(); - - for id in self.suspected_resources.compute_pipelines.drain(..) { - if trackers.compute_pipelines.remove_abandoned(id) { - log::debug!("Compute pipeline {:?} will be destroyed", id); - #[cfg(feature = "trace")] - if let Some(t) = trace { - t.lock().add(trace::Action::DestroyComputePipeline(id.0)); - } - - if let Some(res) = hub.compute_pipelines.unregister_locked(id.0, &mut *guard) { - let submit_index = res.life_guard.life_count(); - self.active - .iter_mut() - .find(|a| a.index == submit_index) - .map_or(&mut self.free_resources, |a| &mut a.last_resources) - .compute_pipes - .push(res.raw); - } - } - } - } - - if !self.suspected_resources.render_pipelines.is_empty() { - let (mut guard, _) = hub.render_pipelines.write(token); - let mut trackers = trackers.lock(); - - for id in self.suspected_resources.render_pipelines.drain(..) { - if trackers.render_pipelines.remove_abandoned(id) { - log::debug!("Render pipeline {:?} will be destroyed", id); - #[cfg(feature = "trace")] - if let Some(t) = trace { - t.lock().add(trace::Action::DestroyRenderPipeline(id.0)); - } - - if let Some(res) = hub.render_pipelines.unregister_locked(id.0, &mut *guard) { - let submit_index = res.life_guard.life_count(); - self.active - .iter_mut() - .find(|a| a.index == submit_index) - .map_or(&mut self.free_resources, |a| &mut a.last_resources) - .render_pipes - .push(res.raw); - } - } - } - } - - if !self.suspected_resources.pipeline_layouts.is_empty() { - let (mut guard, _) = hub.pipeline_layouts.write(token); - - for Stored { - value: id, - ref_count, - } in self.suspected_resources.pipeline_layouts.drain(..) - { - //Note: this has to happen after all the suspected pipelines are destroyed - if ref_count.load() == 1 { - log::debug!("Pipeline layout {:?} will be destroyed", id); - #[cfg(feature = "trace")] - if let Some(t) = trace { - t.lock().add(trace::Action::DestroyPipelineLayout(id.0)); - } - - if let Some(lay) = hub.pipeline_layouts.unregister_locked(id.0, &mut *guard) { - self.suspected_resources - .bind_group_layouts - .extend_from_slice(&lay.bind_group_layout_ids); - self.free_resources.pipeline_layouts.push(lay.raw); - } - } - } - } - - if !self.suspected_resources.bind_group_layouts.is_empty() { - let (mut guard, _) = hub.bind_group_layouts.write(token); - - for id in self.suspected_resources.bind_group_layouts.drain(..) { - //Note: this has to happen after all the suspected pipelines are destroyed - //Note: nothing else can bump the refcount since the guard is locked exclusively - //Note: same BGL can appear multiple times in the list, but only the last - // encounter could drop the refcount to 0. - let mut bgl_to_check = Some(id); - while let Some(id) = bgl_to_check.take() { - let bgl = &guard[id]; - if bgl.multi_ref_count.dec_and_check_empty() { - // If This layout points to a compatible one, go over the latter - // to decrement the ref count and potentially destroy it. - bgl_to_check = bgl.as_duplicate(); - - log::debug!("Bind group layout {:?} will be destroyed", id); - #[cfg(feature = "trace")] - if let Some(t) = trace { - t.lock().add(trace::Action::DestroyBindGroupLayout(id.0)); - } - if let Some(lay) = - hub.bind_group_layouts.unregister_locked(id.0, &mut *guard) - { - if let Some(inner) = lay.into_inner() { - self.free_resources.bind_group_layouts.push(inner.raw); - } - } - } - } - } - } - - if !self.suspected_resources.query_sets.is_empty() { - let (mut guard, _) = hub.query_sets.write(token); - let mut trackers = trackers.lock(); - - for id in self.suspected_resources.query_sets.drain(..) { - if trackers.query_sets.remove_abandoned(id) { - log::debug!("Query set {:?} will be destroyed", id); - // #[cfg(feature = "trace")] - // trace.map(|t| t.lock().add(trace::Action::DestroyComputePipeline(id.0))); - if let Some(res) = hub.query_sets.unregister_locked(id.0, &mut *guard) { - let submit_index = res.life_guard.life_count(); - self.active - .iter_mut() - .find(|a| a.index == submit_index) - .map_or(&mut self.free_resources, |a| &mut a.last_resources) - .query_sets - .push(res.raw); - } - } - } - } + //NOTE: the order is important to release resources that depends between each other! + self.triage_suspected_render_bundles( + hub, + trackers, + #[cfg(feature = "trace")] + &mut trace, + ); + self.triage_suspected_compute_pipelines( + hub, + trackers, + #[cfg(feature = "trace")] + &mut trace, + ); + self.triage_suspected_render_pipelines( + hub, + trackers, + #[cfg(feature = "trace")] + &mut trace, + ); + self.triage_suspected_bind_groups( + hub, + trackers, + #[cfg(feature = "trace")] + &mut trace, + ); + self.triage_suspected_pipeline_layouts( + #[cfg(feature = "trace")] + &mut trace, + ); + self.triage_suspected_bind_group_layouts( + #[cfg(feature = "trace")] + &mut trace, + ); + self.triage_suspected_query_sets(hub, trackers); + self.triage_suspected_samplers( + hub, + trackers, + #[cfg(feature = "trace")] + &mut trace, + ); + self.triage_suspected_staging_buffers(); + self.triage_suspected_texture_views( + hub, + trackers, + #[cfg(feature = "trace")] + &mut trace, + ); + self.triage_suspected_textures( + hub, + trackers, + #[cfg(feature = "trace")] + &mut trace, + ); + self.triage_suspected_buffers( + hub, + trackers, + #[cfg(feature = "trace")] + &mut trace, + ); } /// Determine which buffers are ready to map, and which must wait for the /// GPU. /// /// See the documentation for [`LifetimeTracker`] for details. - pub(super) fn triage_mapped( - &mut self, - hub: &Hub, - token: &mut Token>, - ) { + pub(crate) fn triage_mapped(&mut self) { if self.mapped.is_empty() { return; } - let (buffer_guard, _) = hub.buffers.read(token); - - for stored in self.mapped.drain(..) { - let resource_id = stored.value; - // The buffer may have been destroyed since the map request. - if let Ok(buf) = buffer_guard.get(resource_id.0) { - let submit_index = buf.life_guard.life_count(); - log::trace!( - "Mapping of {:?} at submission {:?} gets assigned to active {:?}", - resource_id, - submit_index, - self.active.iter().position(|a| a.index == submit_index) - ); - - self.active - .iter_mut() - .find(|a| a.index == submit_index) - .map_or(&mut self.ready_to_map, |a| &mut a.mapped) - .push(resource_id); - } + + for buffer in self.mapped.drain(..) { + let submit_index = buffer.info.submission_index(); + log::trace!( + "Mapping of {:?} at submission {:?} gets assigned to active {:?}", + buffer.info.id(), + submit_index, + self.active.iter().position(|a| a.index == submit_index) + ); + + self.active + .iter_mut() + .find(|a| a.index == submit_index) + .map_or(&mut self.ready_to_map, |a| &mut a.mapped) + .push(buffer); } } @@ -865,41 +957,33 @@ impl LifetimeTracker { /// /// See the documentation for [`LifetimeTracker`] for details. #[must_use] - pub(super) fn handle_mapping( + pub(crate) fn handle_mapping( &mut self, - hub: &Hub, + hub: &Hub, raw: &A::Device, trackers: &Mutex>, - token: &mut Token>, ) -> Vec { if self.ready_to_map.is_empty() { return Vec::new(); } - let (mut buffer_guard, _) = hub.buffers.write(token); let mut pending_callbacks: Vec = Vec::with_capacity(self.ready_to_map.len()); - let mut trackers = trackers.lock(); - for buffer_id in self.ready_to_map.drain(..) { - let buffer = match buffer_guard.get_occupied_or_destroyed_mut(buffer_id.0) { - Ok(buf) => buf, - Err(..) => { - // The buffer may have been destroyed since the map request. - continue; - } + + for buffer in self.ready_to_map.drain(..) { + let buffer_id = buffer.info.id(); + let is_removed = { + let mut trackers = trackers.lock(); + let mut count = 1; + count += hub.buffers.contains(buffer_id) as usize; + trackers.buffers.remove_abandoned(buffer_id, count) }; - if buffer.life_guard.ref_count.is_none() && trackers.buffers.remove_abandoned(buffer_id) - { - buffer.map_state = resource::BufferMapState::Idle; - log::debug!("Mapping request is dropped because the buffer is destroyed."); - if let Some(buf) = hub - .buffers - .unregister_locked(buffer_id.0, &mut *buffer_guard) - { - self.free_resources.buffers.extend(buf.raw); - } + if is_removed { + *buffer.map_state.lock() = resource::BufferMapState::Idle; + log::info!("Buffer ready to map {:?} is not tracked anymore", buffer_id); + self.free_resources.insert(buffer_id, buffer.clone()); } else { let mapping = match std::mem::replace( - &mut buffer.map_state, + &mut *buffer.map_state.lock(), resource::BufferMapState::Idle, ) { resource::BufferMapState::Waiting(pending_mapping) => pending_mapping, @@ -908,7 +992,7 @@ impl LifetimeTracker { // Mapping queued at least twice by map -> unmap -> map // and was already successfully mapped below active @ resource::BufferMapState::Active { .. } => { - buffer.map_state = active; + *buffer.map_state.lock() = active; continue; } _ => panic!("No pending mapping."), @@ -917,9 +1001,9 @@ impl LifetimeTracker { log::debug!("Buffer {:?} map state -> Active", buffer_id); let host = mapping.op.host; let size = mapping.range.end - mapping.range.start; - match super::map_buffer(raw, buffer, mapping.range.start, size, host) { + match super::map_buffer(raw, &buffer, mapping.range.start, size, host) { Ok(ptr) => { - buffer.map_state = resource::BufferMapState::Active { + *buffer.map_state.lock() = resource::BufferMapState::Active { ptr, range: mapping.range.start..mapping.range.start + size, host, @@ -932,7 +1016,7 @@ impl LifetimeTracker { } } } else { - buffer.map_state = resource::BufferMapState::Active { + *buffer.map_state.lock() = resource::BufferMapState::Active { ptr: std::ptr::NonNull::dangling(), range: mapping.range, host: mapping.op.host, diff --git a/third_party/rust/wgpu-core/src/device/mod.rs b/third_party/rust/wgpu-core/src/device/mod.rs index 1d89d54796eb0..269c8304af823 100644 --- a/third_party/rust/wgpu-core/src/device/mod.rs +++ b/third_party/rust/wgpu-core/src/device/mod.rs @@ -2,7 +2,7 @@ use crate::{ binding_model, hal_api::HalApi, hub::Hub, - id, + id::{self}, identity::{GlobalIdentityHandlerFactory, Input}, resource::{Buffer, BufferAccessResult}, resource::{BufferAccessError, BufferMapOperation}, @@ -18,6 +18,7 @@ use wgt::{BufferAddress, DeviceLostReason, TextureFormat}; use std::{iter, num::NonZeroU32, ptr}; +pub mod any_device; pub mod global; mod life; pub mod queue; @@ -187,8 +188,10 @@ impl UserClosures { // Mappings _must_ be fired before submissions, as the spec requires all mapping callbacks that are registered before // a on_submitted_work_done callback to be fired before the on_submitted_work_done callback. - for (operation, status) in self.mappings { - operation.callback.call(status); + for (mut operation, status) in self.mappings { + if let Some(callback) = operation.callback.take() { + callback.call(status); + } } for closure in self.submissions { closure.call(); @@ -288,24 +291,21 @@ impl DeviceLostClosure { } } -fn map_buffer( +fn map_buffer( raw: &A::Device, - buffer: &mut Buffer, + buffer: &Buffer, offset: BufferAddress, size: BufferAddress, kind: HostMap, ) -> Result, BufferAccessError> { let mapping = unsafe { - raw.map_buffer(buffer.raw.as_ref().unwrap(), offset..offset + size) + raw.map_buffer(buffer.raw(), offset..offset + size) .map_err(DeviceError::from)? }; - buffer.sync_mapped_writes = match kind { + *buffer.sync_mapped_writes.lock() = match kind { HostMap::Read if !mapping.is_coherent => unsafe { - raw.invalidate_mapped_ranges( - buffer.raw.as_ref().unwrap(), - iter::once(offset..offset + size), - ); + raw.invalidate_mapped_ranges(buffer.raw(), iter::once(offset..offset + size)); None }, HostMap::Write if !mapping.is_coherent => Some(offset..offset + size), @@ -329,10 +329,15 @@ fn map_buffer( // reasonable way as all data is pushed to GPU anyways. // No need to flush if it is flushed later anyways. - let zero_init_needs_flush_now = mapping.is_coherent && buffer.sync_mapped_writes.is_none(); + let zero_init_needs_flush_now = + mapping.is_coherent && buffer.sync_mapped_writes.lock().is_none(); let mapped = unsafe { std::slice::from_raw_parts_mut(mapping.ptr.as_ptr(), size as usize) }; - for uninitialized in buffer.initialization_status.drain(offset..(size + offset)) { + for uninitialized in buffer + .initialization_status + .write() + .drain(offset..(size + offset)) + { // The mapping's pointer is already offset, however we track the // uninitialized range relative to the buffer's start. let fill_range = @@ -340,20 +345,18 @@ fn map_buffer( mapped[fill_range].fill(0); if zero_init_needs_flush_now { - unsafe { - raw.flush_mapped_ranges(buffer.raw.as_ref().unwrap(), iter::once(uninitialized)) - }; + unsafe { raw.flush_mapped_ranges(buffer.raw(), iter::once(uninitialized)) }; } } Ok(mapping.ptr) } -struct CommandAllocator { +pub(crate) struct CommandAllocator { free_encoders: Vec, } -impl CommandAllocator { +impl CommandAllocator { fn acquire_encoder( &mut self, device: &A::Device, @@ -387,6 +390,7 @@ impl CommandAllocator { pub struct InvalidDevice; #[derive(Clone, Debug, Error)] +#[non_exhaustive] pub enum DeviceError { #[error("Parent device is invalid.")] Invalid, @@ -396,6 +400,8 @@ pub enum DeviceError { OutOfMemory, #[error("Creation of a resource failed for a reason other than running out of memory.")] ResourceCreationFailed, + #[error("QueueId is invalid")] + InvalidQueueId, #[error("Attempt to use a resource with a different device from the one that created it")] WrongDevice, } @@ -435,13 +441,13 @@ pub struct ImplicitPipelineIds<'a, G: GlobalIdentityHandlerFactory> { } impl ImplicitPipelineIds<'_, G> { - fn prepare(self, hub: &Hub) -> ImplicitPipelineContext { + fn prepare(self, hub: &Hub) -> ImplicitPipelineContext { ImplicitPipelineContext { - root_id: hub.pipeline_layouts.prepare(self.root_id).into_id(), + root_id: hub.pipeline_layouts.prepare::(self.root_id).into_id(), group_ids: self .group_ids .iter() - .map(|id_in| hub.bind_group_layouts.prepare(id_in.clone()).into_id()) + .map(|id_in| hub.bind_group_layouts.prepare::(*id_in).into_id()) .collect(), } } diff --git a/third_party/rust/wgpu-core/src/device/queue.rs b/third_party/rust/wgpu-core/src/device/queue.rs index bac33e785c2e7..3902d6190c9b2 100644 --- a/third_party/rust/wgpu-core/src/device/queue.rs +++ b/third_party/rust/wgpu-core/src/device/queue.rs @@ -6,25 +6,57 @@ use crate::{ ClearError, CommandBuffer, CopySide, ImageCopyTexture, TransferError, }, conv, - device::{DeviceError, WaitIdleError}, + device::{life::ResourceMaps, DeviceError, WaitIdleError}, get_lowest_common_denom, global::Global, hal_api::HalApi, hal_label, - hub::Token, - id, + id::{self, QueueId}, identity::{GlobalIdentityHandlerFactory, Input}, init_tracker::{has_copy_partial_init_tracker_coverage, TextureInitRange}, - resource::{BufferAccessError, BufferMapState, StagingBuffer, TextureInner}, - track, FastHashSet, SubmissionIndex, + resource::{ + Buffer, BufferAccessError, BufferMapState, Resource, ResourceInfo, ResourceType, + StagingBuffer, Texture, TextureInner, + }, + track, FastHashMap, SubmissionIndex, }; use hal::{CommandEncoder as _, Device as _, Queue as _}; use parking_lot::Mutex; -use smallvec::SmallVec; -use std::{iter, mem, ptr}; + +use std::{ + iter, mem, ptr, + sync::{atomic::Ordering, Arc}, +}; use thiserror::Error; +use super::Device; + +pub struct Queue { + pub device: Option>>, + pub raw: Option, + pub info: ResourceInfo, +} + +impl Resource for Queue { + const TYPE: ResourceType = "Queue"; + + fn as_info(&self) -> &ResourceInfo { + &self.info + } + + fn as_info_mut(&mut self) -> &mut ResourceInfo { + &mut self.info + } +} + +impl Drop for Queue { + fn drop(&mut self) { + let queue = self.raw.take().unwrap(); + self.device.as_ref().unwrap().release_queue(queue); + } +} + /// Number of command buffers that we generate from the same pool /// for the write_xxx commands, before the pool is recycled. /// @@ -109,7 +141,7 @@ impl SubmittedWorkDoneClosure { #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct WrappedSubmissionIndex { - pub queue_id: id::QueueId, + pub queue_id: QueueId, pub index: SubmissionIndex, } @@ -127,19 +159,20 @@ pub struct WrappedSubmissionIndex { /// - `LifetimeTracker::free_resources`: resources to be freed in the next /// `maintain` call, no longer used anywhere #[derive(Debug)] -pub enum TempResource { - Buffer(A::Buffer), - Texture(A::Texture, SmallVec<[A::TextureView; 1]>), +pub enum TempResource { + Buffer(Arc>), + StagingBuffer(Arc>), + Texture(Arc>), } /// A queue execution for a particular command encoder. -pub(super) struct EncoderInFlight { +pub(crate) struct EncoderInFlight { raw: A::CommandEncoder, cmd_buffers: Vec, } -impl EncoderInFlight { - pub(super) unsafe fn land(mut self) -> A::CommandEncoder { +impl EncoderInFlight { + pub(crate) unsafe fn land(mut self) -> A::CommandEncoder { unsafe { self.raw.reset_all(self.cmd_buffers.into_iter()) }; self.raw } @@ -160,25 +193,29 @@ impl EncoderInFlight { /// time the user submits a wgpu command buffer, ahead of the user's /// commands. /// +/// Important: +/// When locking pending_writes be sure that tracker is not locked +/// and try to lock trackers for the minimum timespan possible +/// /// All uses of [`StagingBuffer`]s end up here. #[derive(Debug)] -pub(crate) struct PendingWrites { +pub(crate) struct PendingWrites { pub command_encoder: A::CommandEncoder, pub is_active: bool, pub temp_resources: Vec>, - pub dst_buffers: FastHashSet, - pub dst_textures: FastHashSet, + pub dst_buffers: FastHashMap>>, + pub dst_textures: FastHashMap>>, pub executing_command_buffers: Vec, } -impl PendingWrites { +impl PendingWrites { pub fn new(command_encoder: A::CommandEncoder) -> Self { Self { command_encoder, is_active: false, temp_resources: Vec::new(), - dst_buffers: FastHashSet::default(), - dst_textures: FastHashSet::default(), + dst_buffers: FastHashMap::default(), + dst_textures: FastHashMap::default(), executing_command_buffers: Vec::new(), } } @@ -193,27 +230,16 @@ impl PendingWrites { device.destroy_command_encoder(self.command_encoder); } - for resource in self.temp_resources { - match resource { - TempResource::Buffer(buffer) => unsafe { - device.destroy_buffer(buffer); - }, - TempResource::Texture(texture, views) => unsafe { - for view in views.into_iter() { - device.destroy_texture_view(view); - } - device.destroy_texture(texture); - }, - } - } + self.temp_resources.clear(); } pub fn consume_temp(&mut self, resource: TempResource) { self.temp_resources.push(resource); } - fn consume(&mut self, buffer: StagingBuffer) { - self.temp_resources.push(TempResource::Buffer(buffer.raw)); + fn consume(&mut self, buffer: Arc>) { + self.temp_resources + .push(TempResource::StagingBuffer(buffer)); } #[must_use] @@ -233,15 +259,12 @@ impl PendingWrites { #[must_use] fn post_submit( &mut self, - command_allocator: &Mutex>, + command_allocator: &mut super::CommandAllocator, device: &A::Device, queue: &A::Queue, ) -> Option> { if self.executing_command_buffers.len() >= WRITE_COMMAND_BUFFERS_PER_POOL { - let new_encoder = command_allocator - .lock() - .acquire_encoder(device, queue) - .unwrap(); + let new_encoder = command_allocator.acquire_encoder(device, queue).unwrap(); Some(EncoderInFlight { raw: mem::replace(&mut self.command_encoder, new_encoder), cmd_buffers: mem::take(&mut self.executing_command_buffers), @@ -274,7 +297,7 @@ impl PendingWrites { } fn prepare_staging_buffer( - device: &mut A::Device, + device: &Arc>, size: wgt::BufferAddress, instance_flags: wgt::InstanceFlags, ) -> Result<(StagingBuffer, *mut u8), DeviceError> { @@ -286,24 +309,31 @@ fn prepare_staging_buffer( memory_flags: hal::MemoryFlags::TRANSIENT, }; - let buffer = unsafe { device.create_buffer(&stage_desc)? }; - let mapping = unsafe { device.map_buffer(&buffer, 0..size) }?; + let buffer = unsafe { device.raw().create_buffer(&stage_desc)? }; + let mapping = unsafe { device.raw().map_buffer(&buffer, 0..size) }?; let staging_buffer = StagingBuffer { - raw: buffer, + raw: Mutex::new(Some(buffer)), + device: device.clone(), size, + info: ResourceInfo::new(""), is_coherent: mapping.is_coherent, }; Ok((staging_buffer, mapping.ptr.as_ptr())) } -impl StagingBuffer { +impl StagingBuffer { unsafe fn flush(&self, device: &A::Device) -> Result<(), DeviceError> { if !self.is_coherent { - unsafe { device.flush_mapped_ranges(&self.raw, iter::once(0..self.size)) }; + unsafe { + device.flush_mapped_ranges( + self.raw.lock().as_ref().unwrap(), + iter::once(0..self.size), + ) + }; } - unsafe { device.unmap_buffer(&self.raw)? }; + unsafe { device.unmap_buffer(self.raw.lock().as_ref().unwrap())? }; Ok(()) } } @@ -349,7 +379,7 @@ pub enum QueueSubmitError { impl Global { pub fn queue_write_buffer( &self, - queue_id: id::QueueId, + queue_id: QueueId, buffer_id: id::BufferId, buffer_offset: wgt::BufferAddress, data: &[u8], @@ -357,18 +387,18 @@ impl Global { profiling::scope!("Queue::write_buffer"); let hub = A::hub(self); - let root_token = &mut Token::root(); - let (mut device_guard, ref mut device_token) = hub.devices.write(root_token); - let device = device_guard - .get_mut(queue_id) - .map_err(|_| DeviceError::Invalid)?; + let queue = hub + .queues + .get(queue_id) + .map_err(|_| DeviceError::InvalidQueueId)?; + + let device = queue.device.as_ref().unwrap(); let data_size = data.len() as wgt::BufferAddress; #[cfg(feature = "trace")] - if let Some(ref trace) = device.trace { - let mut trace = trace.lock(); + if let Some(ref mut trace) = *device.trace.lock() { let data_path = trace.make_binary("bin", data); trace.add(Action::WriteBuffer { id: buffer_id, @@ -387,124 +417,131 @@ impl Global { // freed, even if an error occurs. All paths from here must call // `device.pending_writes.consume`. let (staging_buffer, staging_buffer_ptr) = - prepare_staging_buffer(&mut device.raw, data_size, device.instance_flags)?; + prepare_staging_buffer(device, data_size, device.instance_flags)?; + let mut pending_writes = device.pending_writes.lock(); + let pending_writes = pending_writes.as_mut().unwrap(); + + let stage_fid = hub.staging_buffers.request(); + let staging_buffer = stage_fid.init(staging_buffer); if let Err(flush_error) = unsafe { profiling::scope!("copy"); ptr::copy_nonoverlapping(data.as_ptr(), staging_buffer_ptr, data.len()); - staging_buffer.flush(&device.raw) + staging_buffer.flush(device.raw()) } { - device.pending_writes.consume(staging_buffer); + pending_writes.consume(staging_buffer); return Err(flush_error.into()); } let result = self.queue_write_staging_buffer_impl( - queue_id, device, - device_token, + pending_writes, &staging_buffer, buffer_id, buffer_offset, ); - device.pending_writes.consume(staging_buffer); + pending_writes.consume(staging_buffer); result } pub fn queue_create_staging_buffer( &self, - queue_id: id::QueueId, + queue_id: QueueId, buffer_size: wgt::BufferSize, id_in: Input, ) -> Result<(id::StagingBufferId, *mut u8), QueueWriteError> { profiling::scope!("Queue::create_staging_buffer"); let hub = A::hub(self); - let root_token = &mut Token::root(); - let (mut device_guard, ref mut device_token) = hub.devices.write(root_token); - let device = device_guard - .get_mut(queue_id) - .map_err(|_| DeviceError::Invalid)?; + let queue = hub + .queues + .get(queue_id) + .map_err(|_| DeviceError::InvalidQueueId)?; + + let device = queue.device.as_ref().unwrap(); let (staging_buffer, staging_buffer_ptr) = - prepare_staging_buffer(&mut device.raw, buffer_size.get(), device.instance_flags)?; + prepare_staging_buffer(device, buffer_size.get(), device.instance_flags)?; - let fid = hub.staging_buffers.prepare(id_in); - let id = fid.assign(staging_buffer, device_token); + let fid = hub.staging_buffers.prepare::(id_in); + let (id, _) = fid.assign(staging_buffer); + log::info!("Created StagingBuffer {:?}", id); - Ok((id.0, staging_buffer_ptr)) + Ok((id, staging_buffer_ptr)) } pub fn queue_write_staging_buffer( &self, - queue_id: id::QueueId, + queue_id: QueueId, buffer_id: id::BufferId, buffer_offset: wgt::BufferAddress, staging_buffer_id: id::StagingBufferId, ) -> Result<(), QueueWriteError> { profiling::scope!("Queue::write_staging_buffer"); let hub = A::hub(self); - let root_token = &mut Token::root(); - let (mut device_guard, ref mut device_token) = hub.devices.write(root_token); - let device = device_guard - .get_mut(queue_id) - .map_err(|_| DeviceError::Invalid)?; + let queue = hub + .queues + .get(queue_id) + .map_err(|_| DeviceError::InvalidQueueId)?; - let staging_buffer = hub - .staging_buffers - .unregister(staging_buffer_id, device_token) - .0 - .ok_or(TransferError::InvalidBuffer(buffer_id))?; + let device = queue.device.as_ref().unwrap(); + + let staging_buffer = hub.staging_buffers.unregister(staging_buffer_id); + if staging_buffer.is_none() { + return Err(QueueWriteError::Transfer(TransferError::InvalidBuffer( + buffer_id, + ))); + } + let staging_buffer = staging_buffer.unwrap(); + let mut pending_writes = device.pending_writes.lock(); + let pending_writes = pending_writes.as_mut().unwrap(); // At this point, we have taken ownership of the staging_buffer from the // user. Platform validation requires that the staging buffer always // be freed, even if an error occurs. All paths from here must call // `device.pending_writes.consume`. - if let Err(flush_error) = unsafe { staging_buffer.flush(&device.raw) } { - device.pending_writes.consume(staging_buffer); + if let Err(flush_error) = unsafe { staging_buffer.flush(device.raw()) } { + pending_writes.consume(staging_buffer); return Err(flush_error.into()); } let result = self.queue_write_staging_buffer_impl( - queue_id, device, - device_token, + pending_writes, &staging_buffer, buffer_id, buffer_offset, ); - device.pending_writes.consume(staging_buffer); + pending_writes.consume(staging_buffer); result } pub fn queue_validate_write_buffer( &self, - _queue_id: id::QueueId, + _queue_id: QueueId, buffer_id: id::BufferId, buffer_offset: u64, buffer_size: u64, ) -> Result<(), QueueWriteError> { profiling::scope!("Queue::validate_write_buffer"); let hub = A::hub(self); - let root_token = &mut Token::root(); - - let (_, ref mut device_token) = hub.devices.read(root_token); - let buffer_guard = hub.buffers.read(device_token).0; - let buffer = buffer_guard + let buffer = hub + .buffers .get(buffer_id) .map_err(|_| TransferError::InvalidBuffer(buffer_id))?; - self.queue_validate_write_buffer_impl(buffer, buffer_id, buffer_offset, buffer_size)?; + self.queue_validate_write_buffer_impl(&buffer, buffer_id, buffer_offset, buffer_size)?; Ok(()) } fn queue_validate_write_buffer_impl( &self, - buffer: &crate::resource::Buffer, + buffer: &Buffer, buffer_id: id::BufferId, buffer_offset: u64, buffer_size: u64, @@ -535,62 +572,68 @@ impl Global { fn queue_write_staging_buffer_impl( &self, - device_id: id::DeviceId, - device: &mut super::Device, - device_token: &mut Token>, + device: &Device, + pending_writes: &mut PendingWrites, staging_buffer: &StagingBuffer, buffer_id: id::BufferId, buffer_offset: u64, ) -> Result<(), QueueWriteError> { let hub = A::hub(self); - let buffer_guard = hub.buffers.read(device_token).0; - - let mut trackers = device.trackers.lock(); - let (dst, transition) = trackers - .buffers - .set_single(&buffer_guard, buffer_id, hal::BufferUses::COPY_DST) - .ok_or(TransferError::InvalidBuffer(buffer_id))?; + let (dst, transition) = { + let buffer_guard = hub.buffers.read(); + let dst = buffer_guard + .get(buffer_id) + .map_err(|_| TransferError::InvalidBuffer(buffer_id))?; + let mut trackers = device.trackers.lock(); + trackers + .buffers + .set_single(dst, hal::BufferUses::COPY_DST) + .ok_or(TransferError::InvalidBuffer(buffer_id))? + }; let dst_raw = dst .raw .as_ref() .ok_or(TransferError::InvalidBuffer(buffer_id))?; - if dst.device_id.value.0 != device_id { + if dst.device.as_info().id() != device.as_info().id() { return Err(DeviceError::WrongDevice.into()); } let src_buffer_size = staging_buffer.size; - self.queue_validate_write_buffer_impl(dst, buffer_id, buffer_offset, src_buffer_size)?; + self.queue_validate_write_buffer_impl(&dst, buffer_id, buffer_offset, src_buffer_size)?; - dst.life_guard.use_at(device.active_submission_index + 1); + dst.info + .use_at(device.active_submission_index.load(Ordering::Relaxed) + 1); let region = wgt::BufferSize::new(src_buffer_size).map(|size| hal::BufferCopy { src_offset: 0, dst_offset: buffer_offset, size, }); + let inner_buffer = staging_buffer.raw.lock(); let barriers = iter::once(hal::BufferBarrier { - buffer: &staging_buffer.raw, + buffer: inner_buffer.as_ref().unwrap(), usage: hal::BufferUses::MAP_WRITE..hal::BufferUses::COPY_SRC, }) - .chain(transition.map(|pending| pending.into_hal(dst))); - let encoder = device.pending_writes.activate(); + .chain(transition.map(|pending| pending.into_hal(&dst))); + let encoder = pending_writes.activate(); unsafe { encoder.transition_buffers(barriers); - encoder.copy_buffer_to_buffer(&staging_buffer.raw, dst_raw, region.into_iter()); + encoder.copy_buffer_to_buffer( + inner_buffer.as_ref().unwrap(), + dst_raw, + region.into_iter(), + ); } - - device.pending_writes.dst_buffers.insert(buffer_id); + let dst = hub.buffers.get(buffer_id).unwrap(); + pending_writes.dst_buffers.insert(buffer_id, dst.clone()); // Ensure the overwritten bytes are marked as initialized so // they don't need to be nulled prior to mapping or binding. { - drop(buffer_guard); - let mut buffer_guard = hub.buffers.write(device_token).0; - - let dst = buffer_guard.get_mut(buffer_id).unwrap(); dst.initialization_status + .write() .drain(buffer_offset..(buffer_offset + src_buffer_size)); } @@ -599,7 +642,7 @@ impl Global { pub fn queue_write_texture( &self, - queue_id: id::QueueId, + queue_id: QueueId, destination: &ImageCopyTexture, data: &[u8], data_layout: &wgt::ImageDataLayout, @@ -608,15 +651,16 @@ impl Global { profiling::scope!("Queue::write_texture"); let hub = A::hub(self); - let mut token = Token::root(); - let (mut device_guard, mut token) = hub.devices.write(&mut token); - let device = device_guard - .get_mut(queue_id) - .map_err(|_| DeviceError::Invalid)?; + + let queue = hub + .queues + .get(queue_id) + .map_err(|_| DeviceError::InvalidQueueId)?; + + let device = queue.device.as_ref().unwrap(); #[cfg(feature = "trace")] - if let Some(ref trace) = device.trace { - let mut trace = trace.lock(); + if let Some(ref mut trace) = *device.trace.lock() { let data_path = trace.make_binary("bin", data); trace.add(Action::WriteTexture { to: *destination, @@ -631,12 +675,12 @@ impl Global { return Ok(()); } - let (mut texture_guard, _) = hub.textures.write(&mut token); // For clear we need write access to the texture. TODO: Can we acquire write lock later? - let dst = texture_guard - .get_mut(destination.texture) + let dst = hub + .textures + .get(destination.texture) .map_err(|_| TransferError::InvalidTexture(destination.texture))?; - if dst.device_id.value.0 != queue_id { + if dst.device.as_info().id() != queue_id { return Err(DeviceError::WrongDevice.into()); } @@ -651,7 +695,7 @@ impl Global { let (hal_copy_size, array_layer_count) = validate_texture_copy_range(destination, &dst.desc, CopySide::Destination, size)?; - let (selector, dst_base) = extract_texture_selector(destination, size, dst)?; + let (selector, dst_base) = extract_texture_selector(destination, size, &dst)?; if !dst_base.aspect.is_one() { return Err(TransferError::CopyAspectNotOne.into()); @@ -708,8 +752,9 @@ impl Global { (size.depth_or_array_layers - 1) * block_rows_per_image + height_blocks; let stage_size = stage_bytes_per_row as u64 * block_rows_in_copy as u64; - let mut trackers = device.trackers.lock(); - let encoder = device.pending_writes.activate(); + let mut pending_writes = device.pending_writes.lock(); + let pending_writes = pending_writes.as_mut().unwrap(); + let encoder = pending_writes.activate(); // If the copy does not fully cover the layers, we need to initialize to // zero *first* as we don't keep track of partial texture layer inits. @@ -722,18 +767,19 @@ impl Global { } else { destination.origin.z..destination.origin.z + size.depth_or_array_layers }; - if dst.initialization_status.mips[destination.mip_level as usize] + let mut dst_initialization_status = dst.initialization_status.write(); + if dst_initialization_status.mips[destination.mip_level as usize] .check(init_layer_range.clone()) .is_some() { if has_copy_partial_init_tracker_coverage(size, destination.mip_level, &dst.desc) { - for layer_range in dst.initialization_status.mips[destination.mip_level as usize] + for layer_range in dst_initialization_status.mips[destination.mip_level as usize] .drain(init_layer_range) .collect::>>() { + let mut trackers = device.trackers.lock(); crate::command::clear_texture( - &*texture_guard, - id::Valid(destination.texture), + &dst, TextureInitRange { mip_range: destination.mip_level..(destination.mip_level + 1), layer_range, @@ -741,35 +787,28 @@ impl Global { encoder, &mut trackers.textures, &device.alignments, - &device.zero_buffer, + device.zero_buffer.as_ref().unwrap(), ) .map_err(QueueWriteError::from)?; } } else { - dst.initialization_status.mips[destination.mip_level as usize] + dst_initialization_status.mips[destination.mip_level as usize] .drain(init_layer_range); } } // Re-get `dst` immutably here, so that the mutable borrow of the - // `texture_guard.get_mut` above ends in time for the `clear_texture` + // `texture_guard.get` above ends in time for the `clear_texture` // call above. Since we've held `texture_guard` the whole time, we know // the texture hasn't gone away in the mean time, so we can unwrap. - let dst = texture_guard.get(destination.texture).unwrap(); - let transition = trackers - .textures - .set_single( - dst, - destination.texture, - selector, - hal::TextureUses::COPY_DST, - ) - .ok_or(TransferError::InvalidTexture(destination.texture))?; - - dst.life_guard.use_at(device.active_submission_index + 1); + let dst = hub.textures.get(destination.texture).unwrap(); + dst.info + .use_at(device.active_submission_index.load(Ordering::Relaxed) + 1); - let dst_raw = dst - .inner + let dst_inner = dst.inner(); + let dst_raw = dst_inner + .as_ref() + .unwrap() .as_raw() .ok_or(TransferError::InvalidTexture(destination.texture))?; @@ -781,7 +820,10 @@ impl Global { // freed, even if an error occurs. All paths from here must call // `device.pending_writes.consume`. let (staging_buffer, staging_buffer_ptr) = - prepare_staging_buffer(&mut device.raw, stage_size, device.instance_flags)?; + prepare_staging_buffer(device, stage_size, device.instance_flags)?; + + let stage_fid = hub.staging_buffers.request(); + let staging_buffer = stage_fid.init(staging_buffer); if stage_bytes_per_row == bytes_per_row { profiling::scope!("copy aligned"); @@ -816,8 +858,8 @@ impl Global { } } - if let Err(e) = unsafe { staging_buffer.flush(&device.raw) } { - device.pending_writes.consume(staging_buffer); + if let Err(e) = unsafe { staging_buffer.flush(device.raw()) } { + pending_writes.consume(staging_buffer); return Err(e.into()); } @@ -836,22 +878,32 @@ impl Global { size: hal_copy_size, } }); - let barrier = hal::BufferBarrier { - buffer: &staging_buffer.raw, - usage: hal::BufferUses::MAP_WRITE..hal::BufferUses::COPY_SRC, - }; - unsafe { - encoder.transition_textures(transition.map(|pending| pending.into_hal(dst))); - encoder.transition_buffers(iter::once(barrier)); - encoder.copy_buffer_to_texture(&staging_buffer.raw, dst_raw, regions); + { + let inner_buffer = staging_buffer.raw.lock(); + let barrier = hal::BufferBarrier { + buffer: inner_buffer.as_ref().unwrap(), + usage: hal::BufferUses::MAP_WRITE..hal::BufferUses::COPY_SRC, + }; + + let mut trackers = device.trackers.lock(); + let transition = trackers + .textures + .set_single(&dst, selector, hal::TextureUses::COPY_DST) + .ok_or(TransferError::InvalidTexture(destination.texture))?; + unsafe { + encoder.transition_textures( + transition.map(|pending| pending.into_hal(dst_inner.as_ref().unwrap())), + ); + encoder.transition_buffers(iter::once(barrier)); + encoder.copy_buffer_to_texture(inner_buffer.as_ref().unwrap(), dst_raw, regions); + } } - device.pending_writes.consume(staging_buffer); - device - .pending_writes + pending_writes.consume(staging_buffer); + pending_writes .dst_textures - .insert(destination.texture); + .insert(destination.texture, dst.clone()); Ok(()) } @@ -859,7 +911,7 @@ impl Global { #[cfg(all(target_arch = "wasm32", not(target_os = "emscripten")))] pub fn queue_copy_external_image_to_texture( &self, - queue_id: id::QueueId, + queue_id: QueueId, source: &wgt::ImageCopyExternalImage, destination: crate::command::ImageCopyTextureTagged, size: wgt::Extent3d, @@ -867,11 +919,13 @@ impl Global { profiling::scope!("Queue::copy_external_image_to_texture"); let hub = A::hub(self); - let mut token = Token::root(); - let (mut device_guard, mut token) = hub.devices.write(&mut token); - let device = device_guard - .get_mut(queue_id) - .map_err(|_| DeviceError::Invalid)?; + + let queue = hub + .queues + .get(queue_id) + .map_err(|_| DeviceError::InvalidQueueId)?; + + let device = queue.device.as_ref().unwrap(); if size.width == 0 || size.height == 0 || size.depth_or_array_layers == 0 { log::trace!("Ignoring write_texture of size 0"); @@ -897,8 +951,7 @@ impl Global { let src_width = source.source.width(); let src_height = source.source.height(); - let (mut texture_guard, _) = hub.textures.write(&mut token); // For clear we need write access to the texture. TODO: Can we acquire write lock later? - let dst = texture_guard.get_mut(destination.texture).unwrap(); + let dst = hub.textures.get(destination.texture).unwrap(); if !conv::is_valid_external_image_copy_dst_texture_format(dst.desc.format) { return Err( @@ -970,10 +1023,10 @@ impl Global { )?; let (selector, dst_base) = - extract_texture_selector(&destination.to_untagged(), &size, dst)?; + extract_texture_selector(&destination.to_untagged(), &size, &dst)?; - let mut trackers = device.trackers.lock(); - let encoder = device.pending_writes.activate(); + let mut pending_writes = device.pending_writes.lock(); + let encoder = pending_writes.as_mut().unwrap().activate(); // If the copy does not fully cover the layers, we need to initialize to // zero *first* as we don't keep track of partial texture layer inits. @@ -986,18 +1039,19 @@ impl Global { } else { destination.origin.z..destination.origin.z + size.depth_or_array_layers }; - if dst.initialization_status.mips[destination.mip_level as usize] + let mut dst_initialization_status = dst.initialization_status.write(); + if dst_initialization_status.mips[destination.mip_level as usize] .check(init_layer_range.clone()) .is_some() { if has_copy_partial_init_tracker_coverage(&size, destination.mip_level, &dst.desc) { - for layer_range in dst.initialization_status.mips[destination.mip_level as usize] + for layer_range in dst_initialization_status.mips[destination.mip_level as usize] .drain(init_layer_range) .collect::>>() { + let mut trackers = device.trackers.lock(); crate::command::clear_texture( - &*texture_guard, - id::Valid(destination.texture), + &dst, TextureInitRange { mip_range: destination.mip_level..(destination.mip_level + 1), layer_range, @@ -1005,32 +1059,22 @@ impl Global { encoder, &mut trackers.textures, &device.alignments, - &device.zero_buffer, + device.zero_buffer.as_ref().unwrap(), ) .map_err(QueueWriteError::from)?; } } else { - dst.initialization_status.mips[destination.mip_level as usize] + dst_initialization_status.mips[destination.mip_level as usize] .drain(init_layer_range); } } + dst.info + .use_at(device.active_submission_index.load(Ordering::Relaxed) + 1); - let dst = texture_guard.get(destination.texture).unwrap(); - - let transitions = trackers - .textures - .set_single( - dst, - destination.texture, - selector, - hal::TextureUses::COPY_DST, - ) - .ok_or(TransferError::InvalidTexture(destination.texture))?; - - dst.life_guard.use_at(device.active_submission_index + 1); - - let dst_raw = dst - .inner + let dst_inner = dst.inner(); + let dst_raw = dst_inner + .as_ref() + .unwrap() .as_raw() .ok_or(TransferError::InvalidTexture(destination.texture))?; @@ -1046,7 +1090,14 @@ impl Global { }; unsafe { - encoder.transition_textures(transitions.map(|pending| pending.into_hal(dst))); + let mut trackers = device.trackers.lock(); + let transitions = trackers + .textures + .set_single(&dst, selector, hal::TextureUses::COPY_DST) + .ok_or(TransferError::InvalidTexture(destination.texture))?; + encoder.transition_textures( + transitions.map(|pending| pending.into_hal(dst_inner.as_ref().unwrap())), + ); encoder.copy_external_image_to_texture( source, dst_raw, @@ -1060,7 +1111,7 @@ impl Global { pub fn queue_submit( &self, - queue_id: id::QueueId, + queue_id: QueueId, command_buffer_ids: &[id::CommandBufferId], ) -> Result { profiling::scope!("Queue::submit"); @@ -1068,47 +1119,44 @@ impl Global { let (submit_index, callbacks) = { let hub = A::hub(self); - let mut token = Token::root(); - - let (mut device_guard, mut token) = hub.devices.write(&mut token); - let device = device_guard - .get_mut(queue_id) - .map_err(|_| DeviceError::Invalid)?; - device.temp_suspected.clear(); - device.active_submission_index += 1; - let submit_index = device.active_submission_index; + + let queue = hub + .queues + .get(queue_id) + .map_err(|_| DeviceError::InvalidQueueId)?; + + let device = queue.device.as_ref().unwrap(); + + let mut fence = device.fence.write(); + let fence = fence.as_mut().unwrap(); + let submit_index = device + .active_submission_index + .fetch_add(1, Ordering::Relaxed) + + 1; let mut active_executions = Vec::new(); let mut used_surface_textures = track::TextureUsageScope::new(); { - let (mut command_buffer_guard, mut token) = hub.command_buffers.write(&mut token); + let mut command_buffer_guard = hub.command_buffers.write(); if !command_buffer_ids.is_empty() { profiling::scope!("prepare"); - let (render_bundle_guard, mut token) = hub.render_bundles.read(&mut token); - let (_, mut token) = hub.pipeline_layouts.read(&mut token); - let (bind_group_guard, mut token) = hub.bind_groups.read(&mut token); - let (compute_pipe_guard, mut token) = hub.compute_pipelines.read(&mut token); - let (render_pipe_guard, mut token) = hub.render_pipelines.read(&mut token); - let (mut buffer_guard, mut token) = hub.buffers.write(&mut token); - let (mut texture_guard, mut token) = hub.textures.write(&mut token); - let (texture_view_guard, mut token) = hub.texture_views.read(&mut token); - let (sampler_guard, mut token) = hub.samplers.read(&mut token); - let (query_set_guard, _) = hub.query_sets.read(&mut token); - - //Note: locking the trackers has to be done after the storages - let mut trackers = device.trackers.lock(); - //TODO: if multiple command buffers are submitted, we can re-use the last // native command buffer of the previous chain instead of always creating // a temporary one, since the chains are not finished. + let mut temp_suspected = device.temp_suspected.lock(); + { + let mut suspected = + temp_suspected.replace(ResourceMaps::new::()).unwrap(); + suspected.clear(); + } // finish all the command buffers first for &cmb_id in command_buffer_ids { // we reset the used surface textures every time we use // it, so make sure to set_size on it. - used_surface_textures.set_size(texture_guard.len()); + used_surface_textures.set_size(hub.textures.read().len()); // TODO: ideally we would use `get_and_mark_destroyed` but the code here // wants to consume the command buffer. @@ -1118,142 +1166,173 @@ impl Global { Err(_) => continue, }; - if cmdbuf.device_id.value.0 != queue_id { + if cmdbuf.device.as_info().id() != queue_id { return Err(DeviceError::WrongDevice.into()); } #[cfg(feature = "trace")] - if let Some(ref trace) = device.trace { - trace.lock().add(Action::Submit( + if let Some(ref mut trace) = *device.trace.lock() { + trace.add(Action::Submit( submit_index, - cmdbuf.commands.take().unwrap(), + cmdbuf + .data + .lock() + .as_mut() + .unwrap() + .commands + .take() + .unwrap(), )); } if !cmdbuf.is_finished() { - device.destroy_command_buffer(cmdbuf); + if let Ok(cmdbuf) = Arc::try_unwrap(cmdbuf) { + device.destroy_command_buffer(cmdbuf); + } else { + panic!( + "Command buffer cannot be destroyed because is still in use" + ); + } continue; } // optimize the tracked states // cmdbuf.trackers.optimize(); - - // update submission IDs - for id in cmdbuf.trackers.buffers.used() { - let buffer = match buffer_guard.get(id.0) { - Ok(buf) => buf, - Err(..) => { - return Err(QueueSubmitError::DestroyedBuffer(id.0)); - } - }; - // get fails if the buffer is invalid or destroyed so we can assume - // the raw buffer is not None. - let raw_buf = buffer.raw.as_ref().unwrap(); - - if !buffer.life_guard.use_at(submit_index) { - if let BufferMapState::Active { .. } = buffer.map_state { - log::warn!("Dropped buffer has a pending mapping."); - unsafe { device.raw.unmap_buffer(raw_buf) } - .map_err(DeviceError::from)?; - } - device.temp_suspected.buffers.push(id); - } else { - match buffer.map_state { - BufferMapState::Idle => (), - _ => return Err(QueueSubmitError::BufferStillMapped(id.0)), + { + let cmd_buf_data = cmdbuf.data.lock(); + let cmd_buf_trackers = &cmd_buf_data.as_ref().unwrap().trackers; + + // update submission IDs + for buffer in cmd_buf_trackers.buffers.used_resources() { + let id = buffer.info.id(); + let raw_buf = match buffer.raw { + Some(ref raw) => raw, + None => { + return Err(QueueSubmitError::DestroyedBuffer(id)); + } + }; + buffer.info.use_at(submit_index); + if buffer.is_unique() { + if let BufferMapState::Active { .. } = *buffer.map_state.lock() + { + log::warn!("Dropped buffer has a pending mapping."); + unsafe { device.raw().unmap_buffer(raw_buf) } + .map_err(DeviceError::from)?; + } + temp_suspected.as_mut().unwrap().insert(id, buffer.clone()); + } else { + match *buffer.map_state.lock() { + BufferMapState::Idle => (), + _ => return Err(QueueSubmitError::BufferStillMapped(id)), + } } } - } - for id in cmdbuf.trackers.textures.used() { - let texture = match texture_guard.get_mut(id.0) { - Ok(tex) => tex, - Err(..) => { - return Err(QueueSubmitError::DestroyedTexture(id.0)); - } - }; - - let should_extend = match texture.inner { - TextureInner::Native { raw: None } => { - unreachable!(); + for texture in cmd_buf_trackers.textures.used_resources() { + let id = texture.info.id(); + let should_extend = match *texture.inner().as_ref().unwrap() { + TextureInner::Native { raw: None } => { + return Err(QueueSubmitError::DestroyedTexture(id)); + } + TextureInner::Native { raw: Some(_) } => false, + TextureInner::Surface { ref has_work, .. } => { + has_work.store(true, Ordering::Relaxed); + true + } + }; + texture.info.use_at(submit_index); + if texture.is_unique() { + temp_suspected.as_mut().unwrap().insert(id, texture.clone()); } - TextureInner::Native { raw: Some(_) } => false, - TextureInner::Surface { - ref mut has_work, .. - } => { - *has_work = true; - true + if should_extend { + unsafe { + used_surface_textures + .merge_single(&texture, None, hal::TextureUses::PRESENT) + .unwrap(); + }; } - }; - if !texture.life_guard.use_at(submit_index) { - device.temp_suspected.textures.push(id); - } - if should_extend { - unsafe { - let ref_count = cmdbuf.trackers.textures.get_ref_count(id); - used_surface_textures - .merge_single( - &*texture_guard, - id, - None, - ref_count, - hal::TextureUses::PRESENT, - ) - .unwrap(); - }; - } - } - for id in cmdbuf.trackers.views.used() { - if !texture_view_guard[id].life_guard.use_at(submit_index) { - device.temp_suspected.texture_views.push(id); - } - } - for id in cmdbuf.trackers.bind_groups.used() { - let bg = &bind_group_guard[id]; - if !bg.life_guard.use_at(submit_index) { - device.temp_suspected.bind_groups.push(id); - } - // We need to update the submission indices for the contained - // state-less (!) resources as well, so that they don't get - // deleted too early if the parent bind group goes out of scope. - for sub_id in bg.used.views.used() { - texture_view_guard[sub_id].life_guard.use_at(submit_index); } - for sub_id in bg.used.samplers.used() { - sampler_guard[sub_id].life_guard.use_at(submit_index); - } - } - // assert!(cmdbuf.trackers.samplers.is_empty()); - for id in cmdbuf.trackers.compute_pipelines.used() { - if !compute_pipe_guard[id].life_guard.use_at(submit_index) { - device.temp_suspected.compute_pipelines.push(id); + for texture_view in cmd_buf_trackers.views.used_resources() { + texture_view.info.use_at(submit_index); + if texture_view.is_unique() { + temp_suspected + .as_mut() + .unwrap() + .insert(texture_view.as_info().id(), texture_view.clone()); + } } - } - for id in cmdbuf.trackers.render_pipelines.used() { - if !render_pipe_guard[id].life_guard.use_at(submit_index) { - device.temp_suspected.render_pipelines.push(id); + { + for bg in cmd_buf_trackers.bind_groups.used_resources() { + bg.info.use_at(submit_index); + // We need to update the submission indices for the contained + // state-less (!) resources as well, so that they don't get + // deleted too early if the parent bind group goes out of scope. + for view in bg.used.views.used_resources() { + view.info.use_at(submit_index); + } + for sampler in bg.used.samplers.used_resources() { + sampler.info.use_at(submit_index); + } + if bg.is_unique() { + temp_suspected + .as_mut() + .unwrap() + .insert(bg.as_info().id(), bg.clone()); + } + } } - } - for id in cmdbuf.trackers.query_sets.used() { - if !query_set_guard[id].life_guard.use_at(submit_index) { - device.temp_suspected.query_sets.push(id); + // assert!(cmd_buf_trackers.samplers.is_empty()); + for compute_pipeline in + cmd_buf_trackers.compute_pipelines.used_resources() + { + compute_pipeline.info.use_at(submit_index); + if compute_pipeline.is_unique() { + temp_suspected.as_mut().unwrap().insert( + compute_pipeline.as_info().id(), + compute_pipeline.clone(), + ); + } } - } - for id in cmdbuf.trackers.bundles.used() { - let bundle = &render_bundle_guard[id]; - if !bundle.life_guard.use_at(submit_index) { - device.temp_suspected.render_bundles.push(id); + for render_pipeline in + cmd_buf_trackers.render_pipelines.used_resources() + { + render_pipeline.info.use_at(submit_index); + if render_pipeline.is_unique() { + temp_suspected.as_mut().unwrap().insert( + render_pipeline.as_info().id(), + render_pipeline.clone(), + ); + } } - // We need to update the submission indices for the contained - // state-less (!) resources as well, excluding the bind groups. - // They don't get deleted too early if the bundle goes out of scope. - for sub_id in bundle.used.render_pipelines.used() { - render_pipe_guard[sub_id].life_guard.use_at(submit_index); + for query_set in cmd_buf_trackers.query_sets.used_resources() { + query_set.info.use_at(submit_index); + if query_set.is_unique() { + temp_suspected + .as_mut() + .unwrap() + .insert(query_set.as_info().id(), query_set.clone()); + } } - for sub_id in bundle.used.query_sets.used() { - query_set_guard[sub_id].life_guard.use_at(submit_index); + for bundle in cmd_buf_trackers.bundles.used_resources() { + bundle.info.use_at(submit_index); + // We need to update the submission indices for the contained + // state-less (!) resources as well, excluding the bind groups. + // They don't get deleted too early if the bundle goes out of scope. + for render_pipeline in + bundle.used.render_pipelines.read().used_resources() + { + render_pipeline.info.use_at(submit_index); + } + for query_set in bundle.used.query_sets.read().used_resources() { + query_set.info.use_at(submit_index); + } + if bundle.is_unique() { + temp_suspected + .as_mut() + .unwrap() + .insert(bundle.as_info().id(), bundle.clone()); + } } } - - let mut baked = cmdbuf.into_baked(); + let mut baked = cmdbuf.from_arc_into_baked(); // execute resource transitions unsafe { baked @@ -1265,11 +1344,14 @@ impl Global { .map_err(DeviceError::from)? }; log::trace!("Stitching command buffer {:?} before submission", cmb_id); + + //Note: locking the trackers has to be done after the storages + let mut trackers = device.trackers.lock(); baked - .initialize_buffer_memory(&mut *trackers, &mut *buffer_guard) + .initialize_buffer_memory(&mut *trackers) .map_err(|err| QueueSubmitError::DestroyedBuffer(err.0))?; baked - .initialize_texture_memory(&mut *trackers, &mut *texture_guard, device) + .initialize_texture_memory(&mut *trackers, device) .map_err(|err| QueueSubmitError::DestroyedTexture(err.0))?; //Note: stateless trackers are not merged: // device already knows these resources exist. @@ -1277,8 +1359,6 @@ impl Global { &mut baked.encoder, &mut *trackers, &baked.trackers, - &*buffer_guard, - &*texture_guard, ); let transit = unsafe { baked.encoder.end_encoding().unwrap() }; @@ -1299,11 +1379,12 @@ impl Global { }; trackers .textures - .set_from_usage_scope(&*texture_guard, &used_surface_textures); - let texture_barriers = trackers.textures.drain().map(|pending| { - let tex = unsafe { texture_guard.get_unchecked(pending.id) }; - pending.into_hal(tex) - }); + .set_from_usage_scope(&used_surface_textures); + let (transitions, textures) = trackers.textures.drain_transitions(); + let texture_barriers = transitions + .into_iter() + .enumerate() + .map(|(i, p)| p.into_hal(textures[i].as_ref().unwrap())); let present = unsafe { baked.encoder.transition_textures(texture_barriers); baked.encoder.end_encoding().unwrap() @@ -1321,102 +1402,81 @@ impl Global { log::trace!("Device after submission {}", submit_index); } + } - let super::Device { - ref mut pending_writes, - ref mut queue, - ref mut fence, - .. - } = *device; + let mut pending_writes = device.pending_writes.lock(); + let pending_writes = pending_writes.as_mut().unwrap(); - { - // TODO: These blocks have a few organizational issues, and - // should be refactored. - // - // 1) It's similar to the code we have per-command-buffer - // (at the begin and end) Maybe we can merge some? - // - // 2) It's doing the extra locking unconditionally. Maybe we - // can only do so if any surfaces are being written to? - let (_, mut token) = hub.buffers.read(&mut token); // skip token - let (mut texture_guard, _) = hub.textures.write(&mut token); - - used_surface_textures.set_size(texture_guard.len()); - - for &id in pending_writes.dst_textures.iter() { - let texture = texture_guard.get_mut(id).unwrap(); - match texture.inner { - TextureInner::Native { raw: None } => { - return Err(QueueSubmitError::DestroyedTexture(id)); - } - TextureInner::Native { raw: Some(_) } => {} - TextureInner::Surface { - ref mut has_work, .. - } => { - *has_work = true; - let ref_count = texture.life_guard.add_ref(); - unsafe { - used_surface_textures - .merge_single( - &*texture_guard, - id::Valid(id), - None, - &ref_count, - hal::TextureUses::PRESENT, - ) - .unwrap() - }; - } + { + let texture_guard = hub.textures.read(); + + used_surface_textures.set_size(texture_guard.len()); + for (&id, texture) in pending_writes.dst_textures.iter() { + match *texture.inner().as_ref().unwrap() { + TextureInner::Native { raw: None } => { + return Err(QueueSubmitError::DestroyedTexture(id)); + } + TextureInner::Native { raw: Some(_) } => {} + TextureInner::Surface { ref has_work, .. } => { + has_work.store(true, Ordering::Relaxed); + unsafe { + used_surface_textures + .merge_single(texture, None, hal::TextureUses::PRESENT) + .unwrap() + }; } } + } - if !used_surface_textures.is_empty() { - let mut trackers = device.trackers.lock(); - - trackers - .textures - .set_from_usage_scope(&*texture_guard, &used_surface_textures); - let texture_barriers = trackers.textures.drain().map(|pending| { - let tex = unsafe { texture_guard.get_unchecked(pending.id) }; - pending.into_hal(tex) - }); + if !used_surface_textures.is_empty() { + let mut trackers = device.trackers.lock(); - unsafe { - pending_writes - .command_encoder - .transition_textures(texture_barriers); - }; - } + trackers + .textures + .set_from_usage_scope(&used_surface_textures); + let (transitions, textures) = trackers.textures.drain_transitions(); + let texture_barriers = transitions + .into_iter() + .enumerate() + .map(|(i, p)| p.into_hal(textures[i].as_ref().unwrap())); + unsafe { + pending_writes + .command_encoder + .transition_textures(texture_barriers); + }; } + } - let refs = pending_writes - .pre_submit() - .into_iter() - .chain( - active_executions - .iter() - .flat_map(|pool_execution| pool_execution.cmd_buffers.iter()), - ) - .collect::>(); - unsafe { - queue - .submit(&refs, Some((fence, submit_index))) - .map_err(DeviceError::from)?; - } + let refs = pending_writes + .pre_submit() + .into_iter() + .chain( + active_executions + .iter() + .flat_map(|pool_execution| pool_execution.cmd_buffers.iter()), + ) + .collect::>(); + unsafe { + queue + .raw + .as_ref() + .unwrap() + .submit(&refs, Some((fence, submit_index))) + .map_err(DeviceError::from)?; } profiling::scope!("cleanup"); - if let Some(pending_execution) = device.pending_writes.post_submit( - &device.command_allocator, - &device.raw, - &device.queue, + if let Some(pending_execution) = pending_writes.post_submit( + device.command_allocator.lock().as_mut().unwrap(), + device.raw(), + queue.raw.as_ref().unwrap(), ) { active_executions.push(pending_execution); } // this will register the new submission to the life time tracker - let mut pending_write_resources = mem::take(&mut device.pending_writes.temp_resources); - device.lock_life(&mut token).track_submission( + let mut pending_write_resources = mem::take(&mut pending_writes.temp_resources); + device.lock_life().track_submission( submit_index, pending_write_resources.drain(..), active_executions, @@ -1424,7 +1484,7 @@ impl Global { // This will schedule destruction of all resources that are no longer needed // by the user but used in the command stream, among other things. - let (closures, _) = match device.maintain(hub, wgt::Maintain::Poll, &mut token) { + let (closures, _) = match device.maintain(hub, fence, wgt::Maintain::Poll) { Ok(closures) => closures, Err(WaitIdleError::Device(err)) => return Err(QueueSubmitError::Queue(err)), Err(WaitIdleError::StuckGpu) => return Err(QueueSubmitError::StuckGpu), @@ -1433,9 +1493,8 @@ impl Global { // pending_write_resources has been drained, so it's empty, but we // want to retain its heap allocation. - device.pending_writes.temp_resources = pending_write_resources; - device.temp_suspected.clear(); - device.lock_life(&mut token).post_submit(); + pending_writes.temp_resources = pending_write_resources; + device.lock_life().post_submit(); (submit_index, closures) }; @@ -1451,30 +1510,31 @@ impl Global { pub fn queue_get_timestamp_period( &self, - queue_id: id::QueueId, + queue_id: QueueId, ) -> Result { let hub = A::hub(self); - let mut token = Token::root(); - let (device_guard, _) = hub.devices.read(&mut token); - match device_guard.get(queue_id) { - Ok(device) => Ok(unsafe { device.queue.get_timestamp_period() }), + match hub.queues.get(queue_id) { + Ok(queue) => Ok(unsafe { queue.raw.as_ref().unwrap().get_timestamp_period() }), Err(_) => Err(InvalidQueue), } } pub fn queue_on_submitted_work_done( &self, - queue_id: id::QueueId, + queue_id: QueueId, closure: SubmittedWorkDoneClosure, ) -> Result<(), InvalidQueue> { log::trace!("Queue::on_submitted_work_done {queue_id:?}"); //TODO: flush pending writes let hub = A::hub(self); - let mut token = Token::root(); - let (device_guard, mut token) = hub.devices.read(&mut token); - match device_guard.get(queue_id) { - Ok(device) => device.lock_life(&mut token).add_work_done_closure(closure), + match hub.queues.get(queue_id) { + Ok(queue) => queue + .device + .as_ref() + .unwrap() + .lock_life() + .add_work_done_closure(closure), Err(_) => return Err(InvalidQueue), } Ok(()) diff --git a/third_party/rust/wgpu-core/src/device/resource.rs b/third_party/rust/wgpu-core/src/device/resource.rs index de84baacf391c..035b2ea55416b 100644 --- a/third_party/rust/wgpu-core/src/device/resource.rs +++ b/third_party/rust/wgpu-core/src/device/resource.rs @@ -1,45 +1,58 @@ #[cfg(feature = "trace")] use crate::device::trace; use crate::{ - binding_model::{ - self, get_bind_group_layout, try_get_bind_group_layout, BglOrDuplicate, - BindGroupLayoutInner, - }, + binding_model::{self, BindGroupLayout, BindGroupLayoutEntryError}, command, conv, - device::life::WaitIdleError, + device::life::{LifetimeTracker, WaitIdleError}, + device::queue::PendingWrites, device::{ AttachmentData, CommandAllocator, DeviceLostInvocation, MissingDownlevelFlags, MissingFeatures, RenderPassContext, CLEANUP_WAIT_MS, }, hal_api::HalApi, hal_label, - hub::{Hub, Token}, - id, - identity::GlobalIdentityHandlerFactory, + hub::Hub, + id::{self, DeviceId, QueueId}, init_tracker::{ BufferInitTracker, BufferInitTrackerAction, MemoryInitKind, TextureInitRange, TextureInitTracker, TextureInitTrackerAction, }, instance::Adapter, pipeline, - resource::{self, Buffer, TextureViewNotRenderableReason}, + registry::Registry, + resource::ResourceInfo, + resource::{ + self, Buffer, QuerySet, Resource, ResourceType, Sampler, Texture, TextureView, + TextureViewNotRenderableReason, + }, storage::Storage, track::{BindGroupStates, TextureSelector, Tracker}, validation::{self, check_buffer_usage, check_texture_usage}, - FastHashMap, LabelHelpers as _, LifeGuard, MultiRefCount, RefCount, Stored, SubmissionIndex, + FastHashMap, LabelHelpers as _, SubmissionIndex, }; use arrayvec::ArrayVec; use hal::{CommandEncoder as _, Device as _}; -use parking_lot::{Mutex, MutexGuard}; +use parking_lot::{Mutex, MutexGuard, RwLock}; + use smallvec::SmallVec; use thiserror::Error; use wgt::{DeviceLostReason, TextureFormat, TextureSampleType, TextureViewDimension}; -use std::{borrow::Cow, iter, num::NonZeroU32}; +use std::{ + borrow::Cow, + iter, + num::NonZeroU32, + sync::{ + atomic::{AtomicBool, AtomicU64, Ordering}, + Arc, + }, +}; use super::{ - life, queue, DeviceDescriptor, DeviceError, ImplicitPipelineContext, UserClosures, EP_FAILURE, + life::{self, ResourceMaps}, + queue::{self}, + DeviceDescriptor, DeviceError, ImplicitPipelineContext, UserClosures, EP_FAILURE, IMPLICIT_FAILURE, ZERO_BUFFER_SIZE, }; @@ -47,34 +60,39 @@ use super::{ /// stored behind mutexes. /// /// TODO: establish clear order of locking for these: -/// `mem_allocator`, `desc_allocator`, `life_tracker`, `trackers`, -/// `render_passes`, `pending_writes`, `trace`. +/// `life_tracker`, `trackers`, `render_passes`, `pending_writes`, `trace`. /// /// Currently, the rules are: /// 1. `life_tracker` is locked after `hub.devices`, enforced by the type system /// 1. `self.trackers` is locked last (unenforced) /// 1. `self.trace` is locked last (unenforced) +/// +/// Right now avoid locking twice same resource or registry in a call execution +/// and minimize the locking to the minimum scope possibile +/// Unless otherwise specified, no lock may be acquired while holding another lock. +/// This means that you must inspect function calls made while a lock is held +/// to see what locks the callee may try to acquire. +/// +/// As far as this point: +/// device_maintain_ids locks Device::lifetime_tracker, and calls... +/// triage_suspected locks Device::trackers, and calls... +/// Registry::unregister locks Registry::storage +/// +/// Important: +/// When locking pending_writes please check that trackers is not locked +/// trackers should be locked only when needed for the shortest time possible pub struct Device { - pub(crate) raw: A::Device, - pub(crate) adapter_id: Stored, - pub(crate) queue: A::Queue, - pub(crate) zero_buffer: A::Buffer, - //pub(crate) cmd_allocator: command::CommandAllocator, - //mem_allocator: Mutex>, - //desc_allocator: Mutex>, + raw: Option, + pub(crate) adapter: Arc>, + pub(crate) queue_id: RwLock>, + queue_to_drop: RwLock>, + pub(crate) zero_buffer: Option, + pub(crate) info: ResourceInfo, + + pub(crate) command_allocator: Mutex>>, //Note: The submission index here corresponds to the last submission that is done. - pub(crate) life_guard: LifeGuard, - - /// A clone of `life_guard.ref_count`. - /// - /// Holding a separate clone of the `RefCount` here lets us tell whether the - /// device is referenced by other resources, even if `life_guard.ref_count` - /// was set to `None` by a call to `device_drop`. - pub(super) ref_count: RefCount, - - pub(super) command_allocator: Mutex>, - pub(crate) active_submission_index: SubmissionIndex, - pub(super) fence: A::Fence, + pub(crate) active_submission_index: AtomicU64, //SubmissionIndex, + pub(crate) fence: RwLock>, /// Is this device valid? Valid is closely associated with "lose the device", /// which can be triggered by various methods, including at the end of device @@ -87,40 +105,67 @@ pub struct Device { /// Error enums, we wouldn't need this. For now, we need it. All the call /// sites where we check it are areas that should be revisited if we start /// using ref-counted references for internal access. - pub(crate) valid: bool, + pub(crate) valid: AtomicBool, /// All live resources allocated with this [`Device`]. /// /// Has to be locked temporarily only (locked last) + /// and never before pending_writes pub(crate) trackers: Mutex>, // Life tracker should be locked right after the device and before anything else. - life_tracker: Mutex>, + life_tracker: Mutex>, /// Temporary storage for resource management functions. Cleared at the end /// of every call (unless an error occurs). - pub(super) temp_suspected: life::SuspectedResources, + pub(crate) temp_suspected: Mutex>, pub(crate) alignments: hal::Alignments, pub(crate) limits: wgt::Limits, pub(crate) features: wgt::Features, pub(crate) downlevel: wgt::DownlevelCapabilities, pub(crate) instance_flags: wgt::InstanceFlags, - // TODO: move this behind another mutex. This would allow several methods to - // switch to borrow Device immutably, such as `write_buffer`, `write_texture`, - // and `buffer_unmap`. - pub(super) pending_writes: queue::PendingWrites, + pub(crate) pending_writes: Mutex>>, #[cfg(feature = "trace")] - pub(crate) trace: Option>, + pub(crate) trace: Mutex>, +} + +impl std::fmt::Debug for Device { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("Device") + .field("adapter", &self.adapter.info.label()) + .field("limits", &self.limits) + .field("features", &self.features) + .field("downlevel", &self.downlevel) + .finish() + } +} + +impl Drop for Device { + fn drop(&mut self) { + log::info!("Destroying Device {:?}", self.info.label()); + let raw = self.raw.take().unwrap(); + let pending_writes = self.pending_writes.lock().take().unwrap(); + pending_writes.dispose(&raw); + self.command_allocator.lock().take().unwrap().dispose(&raw); + unsafe { + raw.destroy_buffer(self.zero_buffer.take().unwrap()); + raw.destroy_fence(self.fence.write().take().unwrap()); + let queue = self.queue_to_drop.write().take().unwrap(); + raw.exit(queue); + } + } } #[derive(Clone, Debug, Error)] -#[non_exhaustive] pub enum CreateDeviceError { - #[error("Not enough memory left")] + #[error("Not enough memory left to create device")] OutOfMemory, #[error("Failed to create internal buffer for initializing textures")] FailedToCreateZeroBuffer(#[from] DeviceError), } impl Device { + pub(crate) fn raw(&self) -> &A::Device { + self.raw.as_ref().unwrap() + } pub(crate) fn require_features(&self, feature: wgt::Features) -> Result<(), MissingFeatures> { if self.features.contains(feature) { Ok(()) @@ -143,8 +188,9 @@ impl Device { impl Device { pub(crate) fn new( - open: hal::OpenDevice, - adapter_id: Stored, + raw_device: A::Device, + raw_queue: &A::Queue, + adapter: &Arc>, alignments: hal::Alignments, downlevel: wgt::DownlevelCapabilities, desc: &DeviceDescriptor, @@ -156,19 +202,19 @@ impl Device { log::error!("Feature 'trace' is not enabled"); } let fence = - unsafe { open.device.create_fence() }.map_err(|_| CreateDeviceError::OutOfMemory)?; + unsafe { raw_device.create_fence() }.map_err(|_| CreateDeviceError::OutOfMemory)?; let mut com_alloc = CommandAllocator { free_encoders: Vec::new(), }; let pending_encoder = com_alloc - .acquire_encoder(&open.device, &open.queue) + .acquire_encoder(&raw_device, raw_queue) .map_err(|_| CreateDeviceError::OutOfMemory)?; let mut pending_writes = queue::PendingWrites::::new(pending_encoder); // Create zeroed buffer used for texture clears. let zero_buffer = unsafe { - open.device + raw_device .create_buffer(&hal::BufferDescriptor { label: hal_label(Some("(wgpu internal) zero init buffer"), instance_flags), size: ZERO_BUFFER_SIZE, @@ -196,54 +242,52 @@ impl Device { })); } - let life_guard = LifeGuard::new(""); - let ref_count = life_guard.add_ref(); Ok(Self { - raw: open.device, - adapter_id, - queue: open.queue, - zero_buffer, - life_guard, - ref_count, - command_allocator: Mutex::new(com_alloc), - active_submission_index: 0, - fence, - valid: true, + raw: Some(raw_device), + adapter: adapter.clone(), + queue_id: RwLock::new(None), + queue_to_drop: RwLock::new(None), + zero_buffer: Some(zero_buffer), + info: ResourceInfo::new(""), + command_allocator: Mutex::new(Some(com_alloc)), + active_submission_index: AtomicU64::new(0), + fence: RwLock::new(Some(fence)), + valid: AtomicBool::new(true), trackers: Mutex::new(Tracker::new()), life_tracker: Mutex::new(life::LifetimeTracker::new()), - temp_suspected: life::SuspectedResources::default(), + temp_suspected: Mutex::new(Some(life::ResourceMaps::new::())), #[cfg(feature = "trace")] - trace: trace_path.and_then(|path| match trace::Trace::new(path) { + trace: Mutex::new(trace_path.and_then(|path| match trace::Trace::new(path) { Ok(mut trace) => { trace.add(trace::Action::Init { desc: desc.clone(), backend: A::VARIANT, }); - Some(Mutex::new(trace)) + Some(trace) } Err(e) => { log::error!("Unable to start a trace in '{:?}': {:?}", path, e); None } - }), + })), alignments, limits: desc.limits.clone(), features: desc.features, downlevel, instance_flags, - pending_writes, + pending_writes: Mutex::new(Some(pending_writes)), }) } pub fn is_valid(&self) -> bool { - self.valid + self.valid.load(Ordering::Acquire) } - pub(super) fn lock_life<'this, 'token: 'this>( - &'this self, - //TODO: fix this - the token has to be borrowed for the lock - _token: &mut Token<'token, Self>, - ) -> MutexGuard<'this, life::LifetimeTracker> { + pub(crate) fn release_queue(&self, queue: A::Queue) { + self.queue_to_drop.write().replace(queue); + } + + pub(crate) fn lock_life<'a>(&'a self) -> MutexGuard<'a, LifetimeTracker> { self.life_tracker.lock() } @@ -260,32 +304,36 @@ impl Device { /// submissions still in flight. (We have to take the locks needed to /// produce this information for other reasons, so we might as well just /// return it to our callers.) - pub(super) fn maintain<'this, 'token: 'this, G: GlobalIdentityHandlerFactory>( + pub(crate) fn maintain<'this>( &'this self, - hub: &Hub, + hub: &Hub, + fence: &A::Fence, maintain: wgt::Maintain, - token: &mut Token<'token, Self>, ) -> Result<(UserClosures, bool), WaitIdleError> { profiling::scope!("Device::maintain"); - let mut life_tracker = self.lock_life(token); - - // Normally, `temp_suspected` exists only to save heap - // allocations: it's cleared at the start of the function - // call, and cleared by the end. But `Global::queue_submit` is - // fallible; if it exits early, it may leave some resources in - // `temp_suspected`. - life_tracker - .suspected_resources - .extend(&self.temp_suspected); - - life_tracker.triage_suspected( - hub, - &self.trackers, - #[cfg(feature = "trace")] - self.trace.as_ref(), - token, - ); - life_tracker.triage_mapped(hub, token); + { + // Normally, `temp_suspected` exists only to save heap + // allocations: it's cleared at the start of the function + // call, and cleared by the end. But `Global::queue_submit` is + // fallible; if it exits early, it may leave some resources in + // `temp_suspected`. + let temp_suspected = self + .temp_suspected + .lock() + .replace(ResourceMaps::new::()) + .unwrap(); + + let mut life_tracker = self.lock_life(); + life_tracker.suspected_resources.extend(temp_suspected); + + life_tracker.triage_suspected( + hub, + &self.trackers, + #[cfg(feature = "trace")] + self.trace.lock().as_mut(), + ); + life_tracker.triage_mapped(); + } let last_done_index = if maintain.is_wait() { let index_to_wait_for = match maintain { @@ -294,26 +342,35 @@ impl Device { // as we already checked this from inside the poll call. submission_index.index } - _ => self.active_submission_index, + _ => self.active_submission_index.load(Ordering::Relaxed), }; unsafe { self.raw - .wait(&self.fence, index_to_wait_for, CLEANUP_WAIT_MS) + .as_ref() + .unwrap() + .wait(fence, index_to_wait_for, CLEANUP_WAIT_MS) .map_err(DeviceError::from)? }; index_to_wait_for } else { unsafe { self.raw - .get_fence_value(&self.fence) + .as_ref() + .unwrap() + .get_fence_value(fence) .map_err(DeviceError::from)? } }; - let submission_closures = - life_tracker.triage_submissions(last_done_index, &self.command_allocator); - let mapping_closures = life_tracker.handle_mapping(hub, &self.raw, &self.trackers, token); - life_tracker.cleanup(&self.raw); + let mut life_tracker = self.lock_life(); + let submission_closures = life_tracker.triage_submissions( + last_done_index, + self.command_allocator.lock().as_mut().unwrap(), + ); + let mapping_closures = life_tracker.handle_mapping(hub, self.raw(), &self.trackers); + + //Cleaning up resources and released all unused suspected ones + life_tracker.cleanup(); // Detect if we have been destroyed and now need to lose the device. // If we are invalid (set at start of destroy) and our queue is empty, @@ -321,7 +378,10 @@ impl Device { // our caller. This will complete the steps for both destroy and for // "lose the device". let mut device_lost_invocations = SmallVec::new(); - if !self.valid && life_tracker.queue_empty() && life_tracker.device_lost_closure.is_some() { + if !self.is_valid() + && life_tracker.queue_empty() + && life_tracker.device_lost_closure.is_some() + { device_lost_invocations.push(DeviceLostInvocation { closure: life_tracker.device_lost_closure.take().unwrap(), reason: DeviceLostReason::Destroyed, @@ -337,129 +397,66 @@ impl Device { Ok((closures, life_tracker.queue_empty())) } - pub(super) fn untrack<'this, 'token: 'this, G: GlobalIdentityHandlerFactory>( - &'this mut self, - hub: &Hub, - trackers: &Tracker, - token: &mut Token<'token, Self>, - ) { - self.temp_suspected.clear(); + pub(crate) fn untrack(&self, trackers: &Tracker) { + let mut temp_suspected = self + .temp_suspected + .lock() + .replace(ResourceMaps::new::()) + .unwrap(); + temp_suspected.clear(); // As the tracker is cleared/dropped, we need to consider all the resources // that it references for destruction in the next GC pass. { - let (bind_group_guard, mut token) = hub.bind_groups.read(token); - let (compute_pipe_guard, mut token) = hub.compute_pipelines.read(&mut token); - let (render_pipe_guard, mut token) = hub.render_pipelines.read(&mut token); - let (query_set_guard, mut token) = hub.query_sets.read(&mut token); - let (buffer_guard, mut token) = hub.buffers.read(&mut token); - let (texture_guard, mut token) = hub.textures.read(&mut token); - let (texture_view_guard, mut token) = hub.texture_views.read(&mut token); - let (sampler_guard, _) = hub.samplers.read(&mut token); - - for id in trackers.buffers.used() { - if buffer_guard - .get_occupied_or_destroyed(id.0) - .unwrap() - .life_guard - .ref_count - .is_none() - { - self.temp_suspected.buffers.push(id); + for resource in trackers.buffers.used_resources() { + if resource.is_unique() { + temp_suspected.insert(resource.as_info().id(), resource.clone()); } } - for id in trackers.textures.used() { - if texture_guard - .get_occupied_or_destroyed(id.0) - .unwrap() - .life_guard - .ref_count - .is_none() - { - self.temp_suspected.textures.push(id); + for resource in trackers.textures.used_resources() { + if resource.is_unique() { + temp_suspected.insert(resource.as_info().id(), resource.clone()); } } - for id in trackers.views.used() { - if texture_view_guard - .get_occupied_or_destroyed(id.0) - .unwrap() - .life_guard - .ref_count - .is_none() - { - self.temp_suspected.texture_views.push(id); + for resource in trackers.views.used_resources() { + if resource.is_unique() { + temp_suspected.insert(resource.as_info().id(), resource.clone()); } } - for id in trackers.bind_groups.used() { - if bind_group_guard - .get_occupied_or_destroyed(id.0) - .unwrap() - .life_guard - .ref_count - .is_none() - { - self.temp_suspected.bind_groups.push(id); + for resource in trackers.bind_groups.used_resources() { + if resource.is_unique() { + temp_suspected.insert(resource.as_info().id(), resource.clone()); } } - for id in trackers.samplers.used() { - if sampler_guard - .get_occupied_or_destroyed(id.0) - .unwrap() - .life_guard - .ref_count - .is_none() - { - self.temp_suspected.samplers.push(id); + for resource in trackers.samplers.used_resources() { + if resource.is_unique() { + temp_suspected.insert(resource.as_info().id(), resource.clone()); } } - for id in trackers.compute_pipelines.used() { - if compute_pipe_guard - .get_occupied_or_destroyed(id.0) - .unwrap() - .life_guard - .ref_count - .is_none() - { - self.temp_suspected.compute_pipelines.push(id); + for resource in trackers.compute_pipelines.used_resources() { + if resource.is_unique() { + temp_suspected.insert(resource.as_info().id(), resource.clone()); } } - for id in trackers.render_pipelines.used() { - if render_pipe_guard - .get_occupied_or_destroyed(id.0) - .unwrap() - .life_guard - .ref_count - .is_none() - { - self.temp_suspected.render_pipelines.push(id); + for resource in trackers.render_pipelines.used_resources() { + if resource.is_unique() { + temp_suspected.insert(resource.as_info().id(), resource.clone()); } } - for id in trackers.query_sets.used() { - if query_set_guard - .get_occupied_or_destroyed(id.0) - .unwrap() - .life_guard - .ref_count - .is_none() - { - self.temp_suspected.query_sets.push(id); + for resource in trackers.query_sets.used_resources() { + if resource.is_unique() { + temp_suspected.insert(resource.as_info().id(), resource.clone()); } } } - - self.lock_life(token) - .suspected_resources - .extend(&self.temp_suspected); - - self.temp_suspected.clear(); + self.lock_life().suspected_resources.extend(temp_suspected); } - pub(super) fn create_buffer( - &self, - self_id: id::DeviceId, + pub(crate) fn create_buffer( + self: &Arc, desc: &resource::BufferDescriptor, transient: bool, ) -> Result, resource::CreateBufferError> { - debug_assert_eq!(self_id.backend(), A::VARIANT); + debug_assert_eq!(self.as_info().id().backend(), A::VARIANT); if desc.size > self.limits.max_buffer_size { return Err(resource::CreateBufferError::MaxBufferSize { @@ -538,87 +535,75 @@ impl Device { usage, memory_flags, }; - let buffer = unsafe { self.raw.create_buffer(&hal_desc) }.map_err(DeviceError::from)?; + let buffer = unsafe { self.raw().create_buffer(&hal_desc) }.map_err(DeviceError::from)?; Ok(Buffer { raw: Some(buffer), - device_id: Stored { - value: id::Valid(self_id), - ref_count: self.life_guard.add_ref(), - }, + device: self.clone(), usage: desc.usage, size: desc.size, - initialization_status: BufferInitTracker::new(desc.size), - sync_mapped_writes: None, - map_state: resource::BufferMapState::Idle, - life_guard: LifeGuard::new(desc.label.borrow_or_default()), + initialization_status: RwLock::new(BufferInitTracker::new(desc.size)), + sync_mapped_writes: Mutex::new(None), + map_state: Mutex::new(resource::BufferMapState::Idle), + info: ResourceInfo::new(desc.label.borrow_or_default()), }) } - pub(super) fn create_texture_from_hal( - &self, + pub(crate) fn create_texture_from_hal( + self: &Arc, hal_texture: A::Texture, hal_usage: hal::TextureUses, - self_id: id::DeviceId, desc: &resource::TextureDescriptor, format_features: wgt::TextureFormatFeatures, clear_mode: resource::TextureClearMode, - ) -> resource::Texture { - debug_assert_eq!(self_id.backend(), A::VARIANT); + ) -> Texture { + debug_assert_eq!(self.as_info().id().backend(), A::VARIANT); - resource::Texture { - inner: resource::TextureInner::Native { + Texture { + inner: RwLock::new(Some(resource::TextureInner::Native { raw: Some(hal_texture), - }, - device_id: Stored { - value: id::Valid(self_id), - ref_count: self.life_guard.add_ref(), - }, + })), + device: self.clone(), desc: desc.map_label(|_| ()), hal_usage, format_features, - initialization_status: TextureInitTracker::new( + initialization_status: RwLock::new(TextureInitTracker::new( desc.mip_level_count, desc.array_layer_count(), - ), + )), full_range: TextureSelector { mips: 0..desc.mip_level_count, layers: 0..desc.array_layer_count(), }, - life_guard: LifeGuard::new(desc.label.borrow_or_default()), - clear_mode, + info: ResourceInfo::new(desc.label.borrow_or_default()), + clear_mode: RwLock::new(clear_mode), } } pub fn create_buffer_from_hal( - &self, + self: &Arc, hal_buffer: A::Buffer, - self_id: id::DeviceId, desc: &resource::BufferDescriptor, ) -> Buffer { - debug_assert_eq!(self_id.backend(), A::VARIANT); + debug_assert_eq!(self.as_info().id().backend(), A::VARIANT); Buffer { raw: Some(hal_buffer), - device_id: Stored { - value: id::Valid(self_id), - ref_count: self.life_guard.add_ref(), - }, + device: self.clone(), usage: desc.usage, size: desc.size, - initialization_status: BufferInitTracker::new(0), - sync_mapped_writes: None, - map_state: resource::BufferMapState::Idle, - life_guard: LifeGuard::new(desc.label.borrow_or_default()), + initialization_status: RwLock::new(BufferInitTracker::new(0)), + sync_mapped_writes: Mutex::new(None), + map_state: Mutex::new(resource::BufferMapState::Idle), + info: ResourceInfo::new(desc.label.borrow_or_default()), } } - pub(super) fn create_texture( - &self, - self_id: id::DeviceId, + pub(crate) fn create_texture( + self: &Arc, adapter: &Adapter, desc: &resource::TextureDescriptor, - ) -> Result, resource::CreateTextureError> { + ) -> Result, resource::CreateTextureError> { use resource::{CreateTextureError, TextureDimensionError}; if desc.usage.is_empty() || desc.usage.contains_invalid_bits() { @@ -775,26 +760,7 @@ impl Device { self.require_downlevel_flags(wgt::DownlevelFlags::VIEW_FORMATS)?; } - // Enforce having COPY_DST/DEPTH_STENCIL_WRITE/COLOR_TARGET otherwise we - // wouldn't be able to initialize the texture. - let hal_usage = conv::map_texture_usage(desc.usage, desc.format.into()) - | if desc.format.is_depth_stencil_format() { - hal::TextureUses::DEPTH_STENCIL_WRITE - } else if desc.usage.contains(wgt::TextureUsages::COPY_DST) { - hal::TextureUses::COPY_DST // (set already) - } else { - // Use COPY_DST only if we can't use COLOR_TARGET - if format_features - .allowed_usages - .contains(wgt::TextureUsages::RENDER_ATTACHMENT) - && desc.dimension == wgt::TextureDimension::D2 - // Render targets dimension must be 2d - { - hal::TextureUses::COLOR_TARGET - } else { - hal::TextureUses::COPY_DST - } - }; + let hal_usage = conv::map_texture_usage_for_texture(desc, &format_features); let hal_desc = hal::TextureDescriptor { label: desc.label.to_hal(self.instance_flags), @@ -810,6 +776,8 @@ impl Device { let raw_texture = unsafe { self.raw + .as_ref() + .unwrap() .create_texture(&hal_desc) .map_err(DeviceError::from)? }; @@ -849,10 +817,10 @@ impl Device { array_layer_count: Some(1), }, }; - clear_views.push( - unsafe { self.raw.create_texture_view(&raw_texture, &desc) } + clear_views.push(Some( + unsafe { self.raw().create_texture_view(&raw_texture, &desc) } .map_err(DeviceError::from)?, - ); + )); } } resource::TextureClearMode::RenderPass { @@ -863,29 +831,23 @@ impl Device { resource::TextureClearMode::BufferCopy }; - let mut texture = self.create_texture_from_hal( - raw_texture, - hal_usage, - self_id, - desc, - format_features, - clear_mode, - ); + let mut texture = + self.create_texture_from_hal(raw_texture, hal_usage, desc, format_features, clear_mode); texture.hal_usage = hal_usage; Ok(texture) } - pub(super) fn create_texture_view( - &self, - texture: &resource::Texture, - texture_id: id::TextureId, + pub(crate) fn create_texture_view( + self: &Arc, + texture: &Arc>, desc: &resource::TextureViewDescriptor, - ) -> Result, resource::CreateTextureViewError> { - let texture_raw = texture - .inner + ) -> Result, resource::CreateTextureViewError> { + let inner = texture.inner(); + let texture_raw = inner + .as_ref() + .unwrap() .as_raw() .ok_or(resource::CreateTextureViewError::InvalidTexture)?; - // resolve TextureViewDescriptor defaults // https://gpuweb.github.io/gpuweb/#abstract-opdef-resolving-gputextureviewdescriptor-defaults @@ -1119,7 +1081,7 @@ impl Device { log::debug!( "Create view for texture {:?} filters usages to {:?}", - texture_id, + texture.as_info().id(), usage ); @@ -1148,6 +1110,8 @@ impl Device { let raw = unsafe { self.raw + .as_ref() + .unwrap() .create_texture_view(texture_raw, &hal_desc) .map_err(|_| resource::CreateTextureViewError::OutOfMemory)? }; @@ -1157,13 +1121,10 @@ impl Device { layers: desc.range.base_array_layer..array_layer_end, }; - Ok(resource::TextureView { - raw, - parent_id: Stored { - value: id::Valid(texture_id), - ref_count: texture.life_guard.add_ref(), - }, - device_id: texture.device_id.clone(), + Ok(TextureView { + raw: Some(raw), + parent: RwLock::new(Some(texture.clone())), + device: self.clone(), desc: resource::HalTextureViewDescriptor { format: resolved_format, dimension: resolved_dimension, @@ -1173,15 +1134,14 @@ impl Device { render_extent, samples: texture.desc.sample_count, selector, - life_guard: LifeGuard::new(desc.label.borrow_or_default()), + info: ResourceInfo::new(desc.label.borrow_or_default()), }) } - pub(super) fn create_sampler( - &self, - self_id: id::DeviceId, + pub(crate) fn create_sampler( + self: &Arc, desc: &resource::SamplerDescriptor, - ) -> Result, resource::CreateSamplerError> { + ) -> Result, resource::CreateSamplerError> { if desc .address_modes .iter() @@ -1270,25 +1230,23 @@ impl Device { let raw = unsafe { self.raw + .as_ref() + .unwrap() .create_sampler(&hal_desc) .map_err(DeviceError::from)? }; - Ok(resource::Sampler { - raw, - device_id: Stored { - value: id::Valid(self_id), - ref_count: self.life_guard.add_ref(), - }, - life_guard: LifeGuard::new(desc.label.borrow_or_default()), + Ok(Sampler { + raw: Some(raw), + device: self.clone(), + info: ResourceInfo::new(desc.label.borrow_or_default()), comparison: desc.compare.is_some(), filtering: desc.min_filter == wgt::FilterMode::Linear || desc.mag_filter == wgt::FilterMode::Linear, }) } - pub(super) fn create_shader_module<'a>( - &self, - self_id: id::DeviceId, + pub(crate) fn create_shader_module<'a>( + self: &Arc, desc: &pipeline::ShaderModuleDescriptor<'a>, source: pipeline::ShaderModuleSource<'a>, ) -> Result, pipeline::CreateShaderModuleError> { @@ -1423,7 +1381,12 @@ impl Device { label: desc.label.to_hal(self.instance_flags), runtime_checks: desc.shader_bound_checks.runtime_checks(), }; - let raw = match unsafe { self.raw.create_shader_module(&hal_desc, hal_shader) } { + let raw = match unsafe { + self.raw + .as_ref() + .unwrap() + .create_shader_module(&hal_desc, hal_shader) + } { Ok(raw) => raw, Err(error) => { return Err(match error { @@ -1439,21 +1402,18 @@ impl Device { }; Ok(pipeline::ShaderModule { - raw, - device_id: Stored { - value: id::Valid(self_id), - ref_count: self.life_guard.add_ref(), - }, + raw: Some(raw), + device: self.clone(), interface: Some(interface), + info: ResourceInfo::new(desc.label.borrow_or_default()), #[cfg(debug_assertions)] label: desc.label.borrow_or_default().to_string(), }) } #[allow(unused_unsafe)] - pub(super) unsafe fn create_shader_module_spirv<'a>( - &self, - self_id: id::DeviceId, + pub(crate) unsafe fn create_shader_module_spirv<'a>( + self: &Arc, desc: &pipeline::ShaderModuleDescriptor<'a>, source: &'a [u32], ) -> Result, pipeline::CreateShaderModuleError> { @@ -1463,7 +1423,12 @@ impl Device { runtime_checks: desc.shader_bound_checks.runtime_checks(), }; let hal_shader = hal::ShaderInput::SpirV(source); - let raw = match unsafe { self.raw.create_shader_module(&hal_desc, hal_shader) } { + let raw = match unsafe { + self.raw + .as_ref() + .unwrap() + .create_shader_module(&hal_desc, hal_shader) + } { Ok(raw) => raw, Err(error) => { return Err(match error { @@ -1479,70 +1444,53 @@ impl Device { }; Ok(pipeline::ShaderModule { - raw, - device_id: Stored { - value: id::Valid(self_id), - ref_count: self.life_guard.add_ref(), - }, + raw: Some(raw), + device: self.clone(), interface: None, + info: ResourceInfo::new(desc.label.borrow_or_default()), #[cfg(debug_assertions)] label: desc.label.borrow_or_default().to_string(), }) } - pub(super) fn deduplicate_bind_group_layout( - self_id: id::DeviceId, - entry_map: &binding_model::BindEntryMap, - guard: &Storage, id::BindGroupLayoutId>, - ) -> Option { + pub(crate) fn deduplicate_bind_group_layout<'a>( + self: &Arc, + entry_map: &'a binding_model::BindEntryMap, + guard: &'a Storage, id::BindGroupLayoutId>, + ) -> Option<(id::BindGroupLayoutId, Arc>)> { guard - .iter(self_id.backend()) + .iter(self.as_info().id().backend()) .find(|&(_, bgl)| { - bgl.device_id.value.0 == self_id - && bgl - .as_inner() - .map_or(false, |inner| inner.entries == *entry_map) - }) - .map(|(id, value)| { - value.multi_ref_count.inc(); - id + bgl.device.info.id() == self.as_info().id() && bgl.entries == *entry_map }) + .map(|(id, resource)| (id, resource.clone())) } - fn get_introspection_bind_group_layouts<'a>( - pipeline_layout: &binding_model::PipelineLayout, - bgl_guard: &'a Storage, id::BindGroupLayoutId>, + pub(crate) fn get_introspection_bind_group_layouts<'a>( + pipeline_layout: &'a binding_model::PipelineLayout, ) -> ArrayVec<&'a binding_model::BindEntryMap, { hal::MAX_BIND_GROUPS }> { pipeline_layout - .bind_group_layout_ids + .bind_group_layouts .iter() - .map(|&id| { - &get_bind_group_layout(bgl_guard, id) - .1 - .assume_deduplicated() - .entries - }) + .map(|layout| &layout.entries) .collect() } /// Generate information about late-validated buffer bindings for pipelines. //TODO: should this be combined with `get_introspection_bind_group_layouts` in some way? - fn make_late_sized_buffer_groups<'a>( + pub(crate) fn make_late_sized_buffer_groups( shader_binding_sizes: &FastHashMap, layout: &binding_model::PipelineLayout, - bgl_guard: &'a Storage, id::BindGroupLayoutId>, ) -> ArrayVec { // Given the shader-required binding sizes and the pipeline layout, // return the filtered list of them in the layout order, // removing those with given `min_binding_size`. layout - .bind_group_layout_ids + .bind_group_layouts .iter() .enumerate() - .map(|(group_index, &bgl_id)| pipeline::LateSizedBufferGroup { - shader_sizes: get_bind_group_layout(bgl_guard, bgl_id) - .1 - .assume_deduplicated() + .map(|(group_index, bgl)| pipeline::LateSizedBufferGroup { + shader_sizes: bgl .entries .values() .filter_map(|entry| match entry.ty { @@ -1565,12 +1513,11 @@ impl Device { .collect() } - pub(super) fn create_bind_group_layout( - &self, - self_id: id::DeviceId, + pub(crate) fn create_bind_group_layout( + self: &Arc, label: &crate::Label, entry_map: binding_model::BindEntryMap, - ) -> Result, binding_model::CreateBindGroupLayoutError> { + ) -> Result, binding_model::CreateBindGroupLayoutError> { #[derive(PartialEq)] enum WritableStorage { Yes, @@ -1623,7 +1570,8 @@ impl Device { } => { return Err(binding_model::CreateBindGroupLayoutError::Entry { binding: entry.binding, - error: binding_model::BindGroupLayoutEntryError::SampleTypeFloatFilterableBindingMultisampled, + error: + BindGroupLayoutEntryError::SampleTypeFloatFilterableBindingMultisampled, }); } Bt::Texture { .. } => ( @@ -1639,7 +1587,7 @@ impl Device { wgt::TextureViewDimension::Cube | wgt::TextureViewDimension::CubeArray => { return Err(binding_model::CreateBindGroupLayoutError::Entry { binding: entry.binding, - error: binding_model::BindGroupLayoutEntryError::StorageTextureCube, + error: BindGroupLayoutEntryError::StorageTextureCube, }) } _ => (), @@ -1653,7 +1601,7 @@ impl Device { { return Err(binding_model::CreateBindGroupLayoutError::Entry { binding: entry.binding, - error: binding_model::BindGroupLayoutEntryError::StorageTextureReadWrite, + error: BindGroupLayoutEntryError::StorageTextureReadWrite, }); } _ => (), @@ -1683,7 +1631,7 @@ impl Device { // Validate the count parameter if entry.count.is_some() { required_features |= array_feature - .ok_or(binding_model::BindGroupLayoutEntryError::ArrayUnsupported) + .ok_or(BindGroupLayoutEntryError::ArrayUnsupported) .map_err(|error| binding_model::CreateBindGroupLayoutError::Entry { binding: entry.binding, error, @@ -1715,13 +1663,13 @@ impl Device { } self.require_features(required_features) - .map_err(binding_model::BindGroupLayoutEntryError::MissingFeatures) + .map_err(BindGroupLayoutEntryError::MissingFeatures) .map_err(|error| binding_model::CreateBindGroupLayoutError::Entry { binding: entry.binding, error, })?; self.require_downlevel_flags(required_downlevel_flags) - .map_err(binding_model::BindGroupLayoutEntryError::MissingDownlevelFlags) + .map_err(BindGroupLayoutEntryError::MissingDownlevelFlags) .map_err(|error| binding_model::CreateBindGroupLayoutError::Entry { binding: entry.binding, error, @@ -1731,14 +1679,17 @@ impl Device { let bgl_flags = conv::bind_group_layout_flags(self.features); let mut hal_bindings = entry_map.values().cloned().collect::>(); + let label = label.to_hal(self.instance_flags); hal_bindings.sort_by_key(|b| b.binding); let hal_desc = hal::BindGroupLayoutDescriptor { - label: label.to_hal(self.instance_flags), + label, flags: bgl_flags, entries: &hal_bindings, }; let raw = unsafe { self.raw + .as_ref() + .unwrap() .create_bind_group_layout(&hal_desc) .map_err(DeviceError::from)? }; @@ -1753,32 +1704,26 @@ impl Device { .validate(&self.limits) .map_err(binding_model::CreateBindGroupLayoutError::TooManyBindings)?; - Ok(binding_model::BindGroupLayout { - device_id: Stored { - value: id::Valid(self_id), - ref_count: self.life_guard.add_ref(), - }, - multi_ref_count: MultiRefCount::new(), - inner: BglOrDuplicate::Inner(BindGroupLayoutInner { - raw, - dynamic_count: entry_map - .values() - .filter(|b| b.ty.has_dynamic_offset()) - .count(), - count_validator, - entries: entry_map, - #[cfg(debug_assertions)] - label: label.borrow_or_default().to_string(), - }), + Ok(BindGroupLayout { + raw: Some(raw), + device: self.clone(), + info: ResourceInfo::new(label.unwrap_or("")), + dynamic_count: entry_map + .values() + .filter(|b| b.ty.has_dynamic_offset()) + .count(), + count_validator, + entries: entry_map, + #[cfg(debug_assertions)] + label: label.unwrap_or_default().to_string(), }) } - fn create_buffer_binding<'a>( - device_id: id::DeviceId, + pub(crate) fn create_buffer_binding<'a>( bb: &binding_model::BufferBinding, binding: u32, decl: &wgt::BindGroupLayoutEntry, - used_buffer_ranges: &mut Vec, + used_buffer_ranges: &mut Vec>, dynamic_binding_info: &mut Vec, late_buffer_binding_sizes: &mut FastHashMap, used: &mut BindGroupStates, @@ -1833,10 +1778,6 @@ impl Device { .add_single(storage, bb.buffer_id, internal_use) .ok_or(Error::InvalidBuffer(bb.buffer_id))?; - if buffer.device_id.value.0 != device_id { - return Err(DeviceError::WrongDevice.into()); - } - check_buffer_usage(buffer.usage, pub_usage)?; let raw_buffer = buffer .raw @@ -1893,8 +1834,8 @@ impl Device { } assert_eq!(bb.offset % wgt::COPY_BUFFER_ALIGNMENT, 0); - used_buffer_ranges.extend(buffer.initialization_status.create_action( - bb.buffer_id, + used_buffer_ranges.extend(buffer.initialization_status.read().create_action( + buffer, bb.offset..bb.offset + bind_size, MemoryInitKind::NeedsInitializedMemory, )); @@ -1906,38 +1847,36 @@ impl Device { }) } - fn create_texture_binding( - device_id: id::DeviceId, - view: &resource::TextureView, - texture_guard: &Storage, id::TextureId>, + pub(crate) fn create_texture_binding( + view: &TextureView, internal_use: hal::TextureUses, pub_usage: wgt::TextureUsages, used: &mut BindGroupStates, - used_texture_ranges: &mut Vec, + used_texture_ranges: &mut Vec>, ) -> Result<(), binding_model::CreateBindGroupError> { + let texture = view.parent.read(); + let texture_id = texture.as_ref().unwrap().as_info().id(); // Careful here: the texture may no longer have its own ref count, // if it was deleted by the user. let texture = used .textures .add_single( - texture_guard, - view.parent_id.value.0, - view.parent_id.ref_count.clone(), + texture.as_ref().unwrap(), Some(view.selector.clone()), internal_use, ) .ok_or(binding_model::CreateBindGroupError::InvalidTexture( - view.parent_id.value.0, + texture_id, ))?; - if texture.device_id.value.0 != device_id { + if texture.device.as_info().id() != view.device.as_info().id() { return Err(DeviceError::WrongDevice.into()); } check_texture_usage(texture.desc.usage, pub_usage)?; used_texture_ranges.push(TextureInitTrackerAction { - id: view.parent_id.value.0, + texture: texture.clone(), range: TextureInitRange { mip_range: view.desc.range.mip_range(texture.desc.mip_level_count), layer_range: view @@ -1953,21 +1892,18 @@ impl Device { // This function expects the provided bind group layout to be resolved // (not passing a duplicate) beforehand. - pub(super) fn create_bind_group( - &self, - self_id: id::DeviceId, - layout: &binding_model::BindGroupLayout, - layout_id: id::Valid, + pub(crate) fn create_bind_group( + self: &Arc, + layout: &Arc>, desc: &binding_model::BindGroupDescriptor, - hub: &Hub, - token: &mut Token>, + hub: &Hub, ) -> Result, binding_model::CreateBindGroupError> { use crate::binding_model::{BindingResource as Br, CreateBindGroupError as Error}; { // Check that the number of entries in the descriptor matches // the number of entries in the layout. let actual = desc.entries.len(); - let expected = layout.assume_deduplicated().entries.len(); + let expected = layout.entries.len(); if actual != expected { return Err(Error::BindingsNumMismatch { expected, actual }); } @@ -1983,10 +1919,9 @@ impl Device { // fill out the descriptors let mut used = BindGroupStates::new(); - let (buffer_guard, mut token) = hub.buffers.read(token); - let (texture_guard, mut token) = hub.textures.read(&mut token); //skip token - let (texture_view_guard, mut token) = hub.texture_views.read(&mut token); - let (sampler_guard, _) = hub.samplers.read(&mut token); + let buffer_guard = hub.buffers.read(); + let texture_view_guard = hub.texture_views.read(); + let sampler_guard = hub.samplers.read(); let mut used_buffer_ranges = Vec::new(); let mut used_texture_ranges = Vec::new(); @@ -1998,14 +1933,12 @@ impl Device { let binding = entry.binding; // Find the corresponding declaration in the layout let decl = layout - .assume_deduplicated() .entries .get(&binding) .ok_or(Error::MissingBindingDeclaration(binding))?; let (res_index, count) = match entry.resource { Br::Buffer(ref bb) => { let bb = Self::create_buffer_binding( - self_id, bb, binding, decl, @@ -2028,7 +1961,6 @@ impl Device { let res_index = hal_buffers.len(); for bb in bindings_array.iter() { let bb = Self::create_buffer_binding( - self_id, bb, binding, decl, @@ -2051,7 +1983,7 @@ impl Device { .add_single(&*sampler_guard, id) .ok_or(Error::InvalidSampler(id))?; - if sampler.device_id.value.0 != self_id { + if sampler.device.as_info().id() != self.as_info().id() { return Err(DeviceError::WrongDevice.into()); } @@ -2081,7 +2013,7 @@ impl Device { } let res_index = hal_samplers.len(); - hal_samplers.push(&sampler.raw); + hal_samplers.push(sampler.raw()); (res_index, 1) } _ => { @@ -2103,12 +2035,10 @@ impl Device { .samplers .add_single(&*sampler_guard, id) .ok_or(Error::InvalidSampler(id))?; - - if sampler.device_id.value.0 != self_id { + if sampler.device.as_info().id() != self.as_info().id() { return Err(DeviceError::WrongDevice.into()); } - - hal_samplers.push(&sampler.raw); + hal_samplers.push(sampler.raw()); } (res_index, num_bindings) @@ -2125,9 +2055,7 @@ impl Device { "SampledTexture, ReadonlyStorageTexture or WriteonlyStorageTexture", )?; Self::create_texture_binding( - self_id, view, - &texture_guard, internal_use, pub_usage, &mut used, @@ -2135,7 +2063,7 @@ impl Device { )?; let res_index = hal_textures.len(); hal_textures.push(hal::TextureBinding { - view: &view.raw, + view: view.raw(), usage: internal_use, }); (res_index, 1) @@ -2154,16 +2082,14 @@ impl Device { Self::texture_use_parameters(binding, decl, view, "SampledTextureArray, ReadonlyStorageTextureArray or WriteonlyStorageTextureArray")?; Self::create_texture_binding( - self_id, view, - &texture_guard, internal_use, pub_usage, &mut used, &mut used_texture_ranges, )?; hal_textures.push(hal::TextureBinding { - view: &view.raw, + view: view.raw(), usage: internal_use, }); } @@ -2187,12 +2113,9 @@ impl Device { return Err(Error::DuplicateBinding(a.binding)); } } - - let layout_inner = layout.assume_deduplicated(); - let hal_desc = hal::BindGroupDescriptor { label: desc.label.to_hal(self.instance_flags), - layout: &layout_inner.raw, + layout: layout.raw(), entries: &hal_entries, buffers: &hal_buffers, samplers: &hal_samplers, @@ -2200,27 +2123,23 @@ impl Device { }; let raw = unsafe { self.raw + .as_ref() + .unwrap() .create_bind_group(&hal_desc) .map_err(DeviceError::from)? }; - // manually add a dependency on BGL - layout.multi_ref_count.inc(); - Ok(binding_model::BindGroup { - raw, - device_id: Stored { - value: id::Valid(self_id), - ref_count: self.life_guard.add_ref(), - }, - layout_id, - life_guard: LifeGuard::new(desc.label.borrow_or_default()), + raw: Some(raw), + device: self.clone(), + layout: layout.clone(), + info: ResourceInfo::new(desc.label.borrow_or_default()), used, - used_buffer_ranges, - used_texture_ranges, - dynamic_binding_info, + used_buffer_ranges: RwLock::new(used_buffer_ranges), + used_texture_ranges: RwLock::new(used_texture_ranges), + dynamic_binding_info: RwLock::new(dynamic_binding_info), // collect in the order of BGL iteration - late_buffer_binding_sizes: layout_inner + late_buffer_binding_sizes: layout .entries .keys() .flat_map(|binding| late_buffer_binding_sizes.get(binding).cloned()) @@ -2228,7 +2147,7 @@ impl Device { }) } - fn check_array_binding( + pub(crate) fn check_array_binding( features: wgt::Features, count: Option, num_bindings: usize, @@ -2261,10 +2180,10 @@ impl Device { Ok(()) } - fn texture_use_parameters( + pub(crate) fn texture_use_parameters( binding: u32, decl: &wgt::BindGroupLayoutEntry, - view: &crate::resource::TextureView, + view: &TextureView, expected: &'static str, ) -> Result<(wgt::TextureUsages, hal::TextureUses), binding_model::CreateBindGroupError> { use crate::binding_model::CreateBindGroupError as Error; @@ -2391,11 +2310,10 @@ impl Device { } } - pub(super) fn create_pipeline_layout( - &self, - self_id: id::DeviceId, + pub(crate) fn create_pipeline_layout( + self: &Arc, desc: &binding_model::PipelineLayoutDescriptor, - bgl_guard: &Storage, id::BindGroupLayoutId>, + bgl_guard: &Storage, id::BindGroupLayoutId>, ) -> Result, binding_model::CreatePipelineLayoutError> { use crate::binding_model::CreatePipelineLayoutError as Error; @@ -2450,15 +2368,15 @@ impl Device { // validate total resource counts for &id in desc.bind_group_layouts.iter() { - let Some(bind_group_layout) = try_get_bind_group_layout(bgl_guard, id) else { + let Ok(bind_group_layout) = bgl_guard.get(id) else { return Err(Error::InvalidBindGroupLayout(id)); }; - if bind_group_layout.device_id.value.0 != self_id { + if bind_group_layout.device.as_info().id() != self.as_info().id() { return Err(DeviceError::WrongDevice.into()); } - count_validator.merge(&bind_group_layout.assume_deduplicated().count_validator); + count_validator.merge(&bind_group_layout.count_validator); } count_validator .validate(&self.limits) @@ -2467,12 +2385,7 @@ impl Device { let bgl_vec = desc .bind_group_layouts .iter() - .map(|&id| { - &try_get_bind_group_layout(bgl_guard, id) - .unwrap() - .assume_deduplicated() - .raw - }) + .map(|&id| bgl_guard.get(id).unwrap().raw()) .collect::>(); let hal_desc = hal::PipelineLayoutDescriptor { label: desc.label.to_hal(self.instance_flags), @@ -2483,26 +2396,20 @@ impl Device { let raw = unsafe { self.raw + .as_ref() + .unwrap() .create_pipeline_layout(&hal_desc) .map_err(DeviceError::from)? }; Ok(binding_model::PipelineLayout { - raw, - device_id: Stored { - value: id::Valid(self_id), - ref_count: self.life_guard.add_ref(), - }, - life_guard: LifeGuard::new(desc.label.borrow_or_default()), - bind_group_layout_ids: desc + raw: Some(raw), + device: self.clone(), + info: ResourceInfo::new(desc.label.borrow_or_default()), + bind_group_layouts: desc .bind_group_layouts .iter() - .map(|&id| { - // manually add a dependency to BGL - let (id, layout) = get_bind_group_layout(bgl_guard, id::Valid(id)); - layout.multi_ref_count.inc(); - id - }) + .map(|&id| bgl_guard.get(id).unwrap().clone()) .collect(), push_constant_ranges: desc.push_constant_ranges.iter().cloned().collect(), }) @@ -2510,13 +2417,12 @@ impl Device { //TODO: refactor this. It's the only method of `Device` that registers new objects // (the pipeline layout). - fn derive_pipeline_layout( - &self, - self_id: id::DeviceId, + pub(crate) fn derive_pipeline_layout( + self: &Arc, implicit_context: Option, mut derived_group_layouts: ArrayVec, - bgl_guard: &mut Storage, id::BindGroupLayoutId>, - pipeline_layout_guard: &mut Storage, id::PipelineLayoutId>, + bgl_registry: &Registry>, + pipeline_layout_registry: &Registry>, ) -> Result { while derived_group_layouts .last() @@ -2536,15 +2442,16 @@ impl Device { } for (bgl_id, map) in ids.group_ids.iter_mut().zip(derived_group_layouts) { - match Device::deduplicate_bind_group_layout(self_id, &map, bgl_guard) { - Some(dedup_id) => { + let bgl = match self.deduplicate_bind_group_layout(&map, &bgl_registry.read()) { + Some((dedup_id, _)) => { *bgl_id = dedup_id; + None } - None => { - let bgl = self.create_bind_group_layout(self_id, &None, map)?; - bgl_guard.force_replace(*bgl_id, bgl); - } + None => Some(self.create_bind_group_layout(&None, map)?), }; + if let Some(bgl) = bgl { + bgl_registry.force_replace(*bgl_id, bgl); + } } let layout_desc = binding_model::PipelineLayoutDescriptor { @@ -2552,27 +2459,23 @@ impl Device { bind_group_layouts: Cow::Borrowed(&ids.group_ids[..group_count]), push_constant_ranges: Cow::Borrowed(&[]), //TODO? }; - let layout = self.create_pipeline_layout(self_id, &layout_desc, bgl_guard)?; - pipeline_layout_guard.force_replace(ids.root_id, layout); + let layout = self.create_pipeline_layout(&layout_desc, &bgl_registry.read())?; + pipeline_layout_registry.force_replace(ids.root_id, layout); Ok(ids.root_id) } - pub(super) fn create_compute_pipeline( - &self, - self_id: id::DeviceId, + pub(crate) fn create_compute_pipeline( + self: &Arc, desc: &pipeline::ComputePipelineDescriptor, implicit_context: Option, - hub: &Hub, - token: &mut Token, + hub: &Hub, ) -> Result, pipeline::CreateComputePipelineError> { - //TODO: only lock mutable if the layout is derived - let (mut pipeline_layout_guard, mut token) = hub.pipeline_layouts.write(token); - let (mut bgl_guard, mut token) = hub.bind_group_layouts.write(&mut token); - // This has to be done first, or otherwise the IDs may be pointing to entries // that are not even in the storage. if let Some(ref ids) = implicit_context { + let mut pipeline_layout_guard = hub.pipeline_layouts.write(); pipeline_layout_guard.insert_error(ids.root_id, IMPLICIT_FAILURE); + let mut bgl_guard = hub.bind_group_layouts.write(); for &bgl_id in ids.group_ids.iter() { bgl_guard.insert_error(bgl_id, IMPLICIT_FAILURE); } @@ -2585,24 +2488,24 @@ impl Device { let mut shader_binding_sizes = FastHashMap::default(); let io = validation::StageIo::default(); - let (shader_module_guard, _) = hub.shader_modules.read(&mut token); - let shader_module = shader_module_guard + let shader_module = hub + .shader_modules .get(desc.stage.module) .map_err(|_| validation::StageError::InvalidModule)?; - if shader_module.device_id.value.0 != self_id { + if shader_module.device.as_info().id() != self.as_info().id() { return Err(DeviceError::WrongDevice.into()); } { let flag = wgt::ShaderStages::COMPUTE; + let pipeline_layout_guard = hub.pipeline_layouts.read(); let provided_layouts = match desc.layout { Some(pipeline_layout_id) => Some(Device::get_introspection_bind_group_layouts( pipeline_layout_guard .get(pipeline_layout_id) .map_err(|_| pipeline::CreateComputePipelineError::InvalidLayout)?, - &*bgl_guard, )), None => { for _ in 0..self.limits.max_bind_groups { @@ -2627,82 +2530,79 @@ impl Device { let pipeline_layout_id = match desc.layout { Some(id) => id, None => self.derive_pipeline_layout( - self_id, implicit_context, derived_group_layouts, - &mut *bgl_guard, - &mut *pipeline_layout_guard, + &hub.bind_group_layouts, + &hub.pipeline_layouts, )?, }; + let pipeline_layout_guard = hub.pipeline_layouts.read(); let layout = pipeline_layout_guard .get(pipeline_layout_id) .map_err(|_| pipeline::CreateComputePipelineError::InvalidLayout)?; - if layout.device_id.value.0 != self_id { + if layout.device.as_info().id() != self.as_info().id() { return Err(DeviceError::WrongDevice.into()); } let late_sized_buffer_groups = - Device::make_late_sized_buffer_groups(&shader_binding_sizes, layout, &*bgl_guard); + Device::make_late_sized_buffer_groups(&shader_binding_sizes, layout); let pipeline_desc = hal::ComputePipelineDescriptor { label: desc.label.to_hal(self.instance_flags), - layout: &layout.raw, + layout: layout.raw(), stage: hal::ProgrammableStage { entry_point: desc.stage.entry_point.as_ref(), - module: &shader_module.raw, + module: shader_module.raw(), }, }; - let raw = - unsafe { self.raw.create_compute_pipeline(&pipeline_desc) }.map_err( - |err| match err { - hal::PipelineError::Device(error) => { - pipeline::CreateComputePipelineError::Device(error.into()) - } - hal::PipelineError::Linkage(_stages, msg) => { - pipeline::CreateComputePipelineError::Internal(msg) - } - hal::PipelineError::EntryPoint(_stage) => { - pipeline::CreateComputePipelineError::Internal(EP_FAILURE.to_string()) - } - }, - )?; + let raw = unsafe { + self.raw + .as_ref() + .unwrap() + .create_compute_pipeline(&pipeline_desc) + } + .map_err(|err| match err { + hal::PipelineError::Device(error) => { + pipeline::CreateComputePipelineError::Device(error.into()) + } + hal::PipelineError::Linkage(_stages, msg) => { + pipeline::CreateComputePipelineError::Internal(msg) + } + hal::PipelineError::EntryPoint(_stage) => { + pipeline::CreateComputePipelineError::Internal(EP_FAILURE.to_string()) + } + })?; let pipeline = pipeline::ComputePipeline { - raw, - layout_id: Stored { - value: id::Valid(pipeline_layout_id), - ref_count: layout.life_guard.add_ref(), - }, - device_id: Stored { - value: id::Valid(self_id), - ref_count: self.life_guard.add_ref(), - }, + raw: Some(raw), + layout: layout.clone(), + device: self.clone(), + _shader_module: shader_module, late_sized_buffer_groups, - life_guard: LifeGuard::new(desc.label.borrow_or_default()), + info: ResourceInfo::new(desc.label.borrow_or_default()), }; Ok(pipeline) } - pub(super) fn create_render_pipeline( - &self, - self_id: id::DeviceId, + pub(crate) fn create_render_pipeline( + self: &Arc, adapter: &Adapter, desc: &pipeline::RenderPipelineDescriptor, implicit_context: Option, - hub: &Hub, - token: &mut Token, + hub: &Hub, ) -> Result, pipeline::CreateRenderPipelineError> { use wgt::TextureFormatFeatureFlags as Tfff; - //TODO: only lock mutable if the layout is derived - let (mut pipeline_layout_guard, mut token) = hub.pipeline_layouts.write(token); - let (mut bgl_guard, mut token) = hub.bind_group_layouts.write(&mut token); + let mut shader_modules = Vec::new(); // This has to be done first, or otherwise the IDs may be pointing to entries // that are not even in the storage. if let Some(ref ids) = implicit_context { + //TODO: only lock mutable if the layout is derived + let mut pipeline_layout_guard = hub.pipeline_layouts.write(); + let mut bgl_guard = hub.bind_group_layouts.write(); pipeline_layout_guard.insert_error(ids.root_id, IMPLICIT_FAILURE); for &bgl_id in ids.group_ids.iter() { bgl_guard.insert_error(bgl_id, IMPLICIT_FAILURE); @@ -2737,7 +2637,7 @@ impl Device { .iter() .any(|ct| ct.write_mask != first.write_mask || ct.blend != first.blend) } { - log::info!("Color targets: {:?}", color_targets); + log::debug!("Color targets: {:?}", color_targets); self.require_downlevel_flags(wgt::DownlevelFlags::INDEPENDENT_BLEND)?; } @@ -2992,7 +2892,7 @@ impl Device { sc }; - let (shader_module_guard, _) = hub.shader_modules.read(&mut token); + let shader_module_guard = hub.shader_modules.read(); let vertex_stage = { let stage = &desc.vertex.stage; @@ -3004,10 +2904,12 @@ impl Device { error: validation::StageError::InvalidModule, } })?; - - if shader_module.device_id.value.0 != self_id { + if shader_module.device.as_info().id() != self.as_info().id() { return Err(DeviceError::WrongDevice.into()); } + shader_modules.push(shader_module.clone()); + + let pipeline_layout_guard = hub.pipeline_layouts.read(); let provided_layouts = match desc.layout { Some(pipeline_layout_id) => { @@ -3015,13 +2917,12 @@ impl Device { .get(pipeline_layout_id) .map_err(|_| pipeline::CreateRenderPipelineError::InvalidLayout)?; - if pipeline_layout.device_id.value.0 != self_id { + if pipeline_layout.device.as_info().id() != self.as_info().id() { return Err(DeviceError::WrongDevice.into()); } Some(Device::get_introspection_bind_group_layouts( pipeline_layout, - &*bgl_guard, )) } None => None, @@ -3046,7 +2947,7 @@ impl Device { } hal::ProgrammableStage { - module: &shader_module.raw, + module: shader_module.raw(), entry_point: stage.entry_point.as_ref(), } }; @@ -3062,13 +2963,15 @@ impl Device { stage: flag, error: validation::StageError::InvalidModule, })?; + shader_modules.push(shader_module.clone()); + let pipeline_layout_guard = hub.pipeline_layouts.read(); let provided_layouts = match desc.layout { Some(pipeline_layout_id) => Some(Device::get_introspection_bind_group_layouts( pipeline_layout_guard .get(pipeline_layout_id) + .as_ref() .map_err(|_| pipeline::CreateRenderPipelineError::InvalidLayout)?, - &*bgl_guard, )), None => None, }; @@ -3103,7 +3006,7 @@ impl Device { } Some(hal::ProgrammableStage { - module: &shader_module.raw, + module: shader_module.raw(), entry_point: fragment.stage.entry_point.as_ref(), }) } @@ -3138,7 +3041,7 @@ impl Device { )?; } _ => { - log::info!( + log::warn!( "The fragment stage {:?} output @location({}) values are ignored", fragment_stage .as_ref() @@ -3160,16 +3063,19 @@ impl Device { let pipeline_layout_id = match desc.layout { Some(id) => id, None => self.derive_pipeline_layout( - self_id, implicit_context, derived_group_layouts, - &mut *bgl_guard, - &mut *pipeline_layout_guard, + &hub.bind_group_layouts, + &hub.pipeline_layouts, )?, }; - let layout = pipeline_layout_guard - .get(pipeline_layout_id) - .map_err(|_| pipeline::CreateRenderPipelineError::InvalidLayout)?; + let layout = { + let pipeline_layout_guard = hub.pipeline_layouts.read(); + pipeline_layout_guard + .get(pipeline_layout_id) + .map_err(|_| pipeline::CreateRenderPipelineError::InvalidLayout)? + .clone() + }; // Multiview is only supported if the feature is enabled if desc.multiview.is_some() { @@ -3193,11 +3099,11 @@ impl Device { } let late_sized_buffer_groups = - Device::make_late_sized_buffer_groups(&shader_binding_sizes, layout, &*bgl_guard); + Device::make_late_sized_buffer_groups(&shader_binding_sizes, &layout); let pipeline_desc = hal::RenderPipelineDescriptor { label: desc.label.to_hal(self.instance_flags), - layout: &layout.raw, + layout: layout.raw(), vertex_buffers: &vertex_buffers, vertex_stage, primitive: desc.primitive, @@ -3207,23 +3113,26 @@ impl Device { color_targets, multiview: desc.multiview, }; - let raw = - unsafe { self.raw.create_render_pipeline(&pipeline_desc) }.map_err( - |err| match err { - hal::PipelineError::Device(error) => { - pipeline::CreateRenderPipelineError::Device(error.into()) - } - hal::PipelineError::Linkage(stage, msg) => { - pipeline::CreateRenderPipelineError::Internal { stage, error: msg } - } - hal::PipelineError::EntryPoint(stage) => { - pipeline::CreateRenderPipelineError::Internal { - stage: hal::auxil::map_naga_stage(stage), - error: EP_FAILURE.to_string(), - } - } - }, - )?; + let raw = unsafe { + self.raw + .as_ref() + .unwrap() + .create_render_pipeline(&pipeline_desc) + } + .map_err(|err| match err { + hal::PipelineError::Device(error) => { + pipeline::CreateRenderPipelineError::Device(error.into()) + } + hal::PipelineError::Linkage(stage, msg) => { + pipeline::CreateRenderPipelineError::Internal { stage, error: msg } + } + hal::PipelineError::EntryPoint(stage) => { + pipeline::CreateRenderPipelineError::Internal { + stage: hal::auxil::map_naga_stage(stage), + error: EP_FAILURE.to_string(), + } + } + })?; let pass_context = RenderPassContext { attachments: AttachmentData { @@ -3259,26 +3168,21 @@ impl Device { } let pipeline = pipeline::RenderPipeline { - raw, - layout_id: Stored { - value: id::Valid(pipeline_layout_id), - ref_count: layout.life_guard.add_ref(), - }, - device_id: Stored { - value: id::Valid(self_id), - ref_count: self.life_guard.add_ref(), - }, + raw: Some(raw), + layout: layout.clone(), + device: self.clone(), pass_context, + _shader_modules: shader_modules, flags, strip_index_format: desc.primitive.strip_index_format, vertex_steps, late_sized_buffer_groups, - life_guard: LifeGuard::new(desc.label.borrow_or_default()), + info: ResourceInfo::new(desc.label.borrow_or_default()), }; Ok(pipeline) } - pub(super) fn describe_format_features( + pub(crate) fn describe_format_features( &self, adapter: &Adapter, format: TextureFormat, @@ -3299,26 +3203,33 @@ impl Device { } } - pub(super) fn wait_for_submit( + pub(crate) fn wait_for_submit( &self, submission_index: SubmissionIndex, - token: &mut Token, ) -> Result<(), WaitIdleError> { + let guard = self.fence.read(); + let fence = guard.as_ref().unwrap(); let last_done_index = unsafe { self.raw - .get_fence_value(&self.fence) + .as_ref() + .unwrap() + .get_fence_value(fence) .map_err(DeviceError::from)? }; if last_done_index < submission_index { log::info!("Waiting for submission {:?}", submission_index); unsafe { self.raw - .wait(&self.fence, submission_index, !0) + .as_ref() + .unwrap() + .wait(fence, submission_index, !0) .map_err(DeviceError::from)? }; - let closures = self - .lock_life(token) - .triage_submissions(submission_index, &self.command_allocator); + drop(guard); + let closures = self.lock_life().triage_submissions( + submission_index, + self.command_allocator.lock().as_mut().unwrap(), + ); assert!( closures.is_empty(), "wait_for_submit is not expected to work with closures" @@ -3327,11 +3238,10 @@ impl Device { Ok(()) } - pub(super) fn create_query_set( - &self, - self_id: id::DeviceId, + pub(crate) fn create_query_set( + self: &Arc, desc: &resource::QuerySetDescriptor, - ) -> Result, resource::CreateQuerySetError> { + ) -> Result, resource::CreateQuerySetError> { use resource::CreateQuerySetError as Error; match desc.ty { @@ -3356,32 +3266,24 @@ impl Device { } let hal_desc = desc.map_label(|label| label.to_hal(self.instance_flags)); - Ok(resource::QuerySet { - raw: unsafe { self.raw.create_query_set(&hal_desc).unwrap() }, - device_id: Stored { - value: id::Valid(self_id), - ref_count: self.life_guard.add_ref(), - }, - life_guard: LifeGuard::new(""), + Ok(QuerySet { + raw: Some(unsafe { self.raw().create_query_set(&hal_desc).unwrap() }), + device: self.clone(), + info: ResourceInfo::new(""), desc: desc.map_label(|_| ()), }) } - pub(crate) fn lose<'this, 'token: 'this>( - &'this mut self, - token: &mut Token<'token, Self>, - message: &str, - ) { + pub(crate) fn lose(&self, message: &str) { // Follow the steps at https://gpuweb.github.io/gpuweb/#lose-the-device. // Mark the device explicitly as invalid. This is checked in various // places to prevent new work from being submitted. - self.valid = false; + self.valid.store(false, Ordering::Release); // 1) Resolve the GPUDevice device.lost promise. - let mut life_tracker = self.lock_life(token); - if life_tracker.device_lost_closure.is_some() { - let device_lost_closure = life_tracker.device_lost_closure.take().unwrap(); + let closure = self.lock_life().device_lost_closure.take(); + if let Some(device_lost_closure) = closure { device_lost_closure.call(DeviceLostReason::Unknown, message.to_string()); } @@ -3396,55 +3298,54 @@ impl Device { } impl Device { - pub(crate) fn destroy_buffer(&self, buffer: Buffer) { - if let Some(raw) = buffer.raw { - unsafe { - self.raw.destroy_buffer(raw); - } - } - } - - pub(crate) fn destroy_command_buffer(&self, cmd_buf: command::CommandBuffer) { - let mut baked = cmd_buf.into_baked(); + pub(crate) fn destroy_command_buffer(&self, mut cmd_buf: command::CommandBuffer) { + let mut baked = cmd_buf.extract_baked_commands(); unsafe { baked.encoder.reset_all(baked.list.into_iter()); } unsafe { - self.raw.destroy_command_encoder(baked.encoder); + self.raw + .as_ref() + .unwrap() + .destroy_command_encoder(baked.encoder); } } /// Wait for idle and remove resources that we can, before we die. - pub(crate) fn prepare_to_die(&mut self) { - self.pending_writes.deactivate(); - let mut life_tracker = self.life_tracker.lock(); - let current_index = self.active_submission_index; - if let Err(error) = unsafe { self.raw.wait(&self.fence, current_index, CLEANUP_WAIT_MS) } { + pub(crate) fn prepare_to_die(&self) { + self.pending_writes.lock().as_mut().unwrap().deactivate(); + let current_index = self.active_submission_index.load(Ordering::Relaxed); + if let Err(error) = unsafe { + let fence = self.fence.read(); + let fence = fence.as_ref().unwrap(); + self.raw + .as_ref() + .unwrap() + .wait(fence, current_index, CLEANUP_WAIT_MS) + } { log::error!("failed to wait for the device: {:?}", error); } - let _ = life_tracker.triage_submissions(current_index, &self.command_allocator); - life_tracker.cleanup(&self.raw); + let mut life_tracker = self.lock_life(); + let _ = life_tracker.triage_submissions( + current_index, + self.command_allocator.lock().as_mut().unwrap(), + ); + life_tracker.cleanup(); #[cfg(feature = "trace")] { - self.trace = None; - } - } - - pub(crate) fn dispose(self) { - self.pending_writes.dispose(&self.raw); - self.command_allocator.into_inner().dispose(&self.raw); - unsafe { - self.raw.destroy_buffer(self.zero_buffer); - self.raw.destroy_fence(self.fence); - self.raw.exit(self.queue); + *self.trace.lock() = None; } } } -impl crate::resource::Resource for Device { - const TYPE: &'static str = "Device"; +impl Resource for Device { + const TYPE: ResourceType = "Device"; + + fn as_info(&self) -> &ResourceInfo { + &self.info + } - fn life_guard(&self) -> &LifeGuard { - &self.life_guard + fn as_info_mut(&mut self) -> &mut ResourceInfo { + &mut self.info } } diff --git a/third_party/rust/wgpu-core/src/error.rs b/third_party/rust/wgpu-core/src/error.rs index 62ecebe9b93c3..aa9797fd42dfb 100644 --- a/third_party/rust/wgpu-core/src/error.rs +++ b/third_party/rust/wgpu-core/src/error.rs @@ -17,7 +17,7 @@ impl<'a> ErrorFormatter<'a> { writeln!(self.writer, " note: {note}").expect("Error formatting error"); } - pub fn label(&mut self, label_key: &str, label_value: &str) { + pub fn label(&mut self, label_key: &str, label_value: &String) { if !label_key.is_empty() && !label_value.is_empty() { self.note(&format!("{label_key} = `{label_value}`")); } diff --git a/third_party/rust/wgpu-core/src/global.rs b/third_party/rust/wgpu-core/src/global.rs index e3ed2be761034..3cb0e84b2b190 100644 --- a/third_party/rust/wgpu-core/src/global.rs +++ b/third_party/rust/wgpu-core/src/global.rs @@ -1,16 +1,20 @@ +use std::{marker::PhantomData, sync::Arc}; + +use wgt::Backend; + use crate::{ hal_api::HalApi, hub::{HubReport, Hubs}, - id, + id::SurfaceId, identity::GlobalIdentityHandlerFactory, instance::{Instance, Surface}, - registry::Registry, - storage::{Element, StorageReport}, + registry::{Registry, RegistryReport}, + storage::Element, }; -#[derive(Debug)] +#[derive(Debug, PartialEq, Eq)] pub struct GlobalReport { - pub surfaces: StorageReport, + pub surfaces: RegistryReport, #[cfg(all(feature = "vulkan", not(target_arch = "wasm32")))] pub vulkan: Option, #[cfg(all(feature = "metal", any(target_os = "macos", target_os = "ios")))] @@ -23,10 +27,32 @@ pub struct GlobalReport { pub gl: Option, } +impl GlobalReport { + pub fn surfaces(&self) -> &RegistryReport { + &self.surfaces + } + pub fn hub_report(&self, backend: Backend) -> &HubReport { + match backend { + #[cfg(all(feature = "vulkan", not(target_arch = "wasm32")))] + Backend::Vulkan => self.vulkan.as_ref().unwrap(), + #[cfg(all(feature = "metal", any(target_os = "macos", target_os = "ios")))] + Backend::Metal => self.metal.as_ref().unwrap(), + #[cfg(all(feature = "dx12", windows))] + Backend::Dx12 => self.dx12.as_ref().unwrap(), + #[cfg(all(feature = "dx11", windows))] + Backend::Dx11 => self.dx11.as_ref().unwrap(), + #[cfg(feature = "gles")] + Backend::Gl => self.gl.as_ref().unwrap(), + _ => panic!("HubReport is not supported on this backend"), + } + } +} + pub struct Global { pub instance: Instance, - pub surfaces: Registry, - pub(crate) hubs: Hubs, + pub surfaces: Registry, + pub(crate) hubs: Hubs, + _phantom: PhantomData, } impl Global { @@ -34,8 +60,9 @@ impl Global { profiling::scope!("Global::new"); Self { instance: Instance::new(name, instance_desc), - surfaces: Registry::without_backend(&factory, "Surface"), + surfaces: Registry::without_backend(&factory), hubs: Hubs::new(&factory), + _phantom: PhantomData, } } @@ -50,8 +77,9 @@ impl Global { profiling::scope!("Global::new"); Self { instance: A::create_instance_from_hal(name, hal_instance), - surfaces: Registry::without_backend(&factory, "Surface"), + surfaces: Registry::without_backend(&factory), hubs: Hubs::new(&factory), + _phantom: PhantomData, } } @@ -69,21 +97,22 @@ impl Global { profiling::scope!("Global::new"); Self { instance, - surfaces: Registry::without_backend(&factory, "Surface"), + surfaces: Registry::without_backend(&factory), hubs: Hubs::new(&factory), + _phantom: PhantomData, } } pub fn clear_backend(&self, _dummy: ()) { - let mut surface_guard = self.surfaces.data.write(); let hub = A::hub(self); + let surfaces_locked = self.surfaces.read(); // this is used for tests, which keep the adapter - hub.clear(&mut surface_guard, false); + hub.clear(&surfaces_locked, false); } pub fn generate_report(&self) -> GlobalReport { GlobalReport { - surfaces: self.surfaces.data.read().generate_report(), + surfaces: self.surfaces.generate_report(), #[cfg(all(feature = "vulkan", not(target_arch = "wasm32")))] vulkan: if self.instance.vulkan.is_some() { Some(self.hubs.vulkan.generate_report()) @@ -121,35 +150,39 @@ impl Global { impl Drop for Global { fn drop(&mut self) { profiling::scope!("Global::drop"); - log::info!("Dropping Global"); - let mut surface_guard = self.surfaces.data.write(); + log::info!("Destroying Global"); + let mut surfaces_locked = self.surfaces.write(); // destroy hubs before the instance gets dropped #[cfg(all(feature = "vulkan", not(target_arch = "wasm32")))] { - self.hubs.vulkan.clear(&mut surface_guard, true); + self.hubs.vulkan.clear(&surfaces_locked, true); } #[cfg(all(feature = "metal", any(target_os = "macos", target_os = "ios")))] { - self.hubs.metal.clear(&mut surface_guard, true); + self.hubs.metal.clear(&surfaces_locked, true); } #[cfg(all(feature = "dx12", windows))] { - self.hubs.dx12.clear(&mut surface_guard, true); + self.hubs.dx12.clear(&surfaces_locked, true); } #[cfg(all(feature = "dx11", windows))] { - self.hubs.dx11.clear(&mut surface_guard, true); + self.hubs.dx11.clear(&surfaces_locked, true); } #[cfg(feature = "gles")] { - self.hubs.gl.clear(&mut surface_guard, true); + self.hubs.gl.clear(&surfaces_locked, true); } // destroy surfaces - for element in surface_guard.map.drain(..) { - if let Element::Occupied(surface, _) = element { - self.instance.destroy_surface(surface); + for element in surfaces_locked.map.drain(..) { + if let Element::Occupied(arc_surface, _) = element { + if let Ok(surface) = Arc::try_unwrap(arc_surface) { + self.instance.destroy_surface(surface); + } else { + panic!("Surface cannot be destroyed because is still in use"); + } } } } diff --git a/third_party/rust/wgpu-core/src/hal_api.rs b/third_party/rust/wgpu-core/src/hal_api.rs index 00180d8ac5197..cb3e25b09e2a6 100644 --- a/third_party/rust/wgpu-core/src/hal_api.rs +++ b/third_party/rust/wgpu-core/src/hal_api.rs @@ -1,4 +1,4 @@ -use wgt::Backend; +use wgt::{Backend, WasmNotSendSync}; use crate::{ global::Global, @@ -7,13 +7,12 @@ use crate::{ instance::{HalSurface, Instance, Surface}, }; -pub trait HalApi: hal::Api { +pub trait HalApi: hal::Api + 'static + WasmNotSendSync { const VARIANT: Backend; fn create_instance_from_hal(name: &str, hal_instance: Self::Instance) -> Instance; fn instance_as_hal(instance: &Instance) -> Option<&Self::Instance>; - fn hub(global: &Global) -> &Hub; + fn hub(global: &Global) -> &Hub; fn get_surface(surface: &Surface) -> Option<&HalSurface>; - fn get_surface_mut(surface: &mut Surface) -> Option<&mut HalSurface>; } impl HalApi for hal::api::Empty { @@ -24,15 +23,12 @@ impl HalApi for hal::api::Empty { fn instance_as_hal(_: &Instance) -> Option<&Self::Instance> { unimplemented!("called empty api") } - fn hub(_: &Global) -> &Hub { + fn hub(_: &Global) -> &Hub { unimplemented!("called empty api") } fn get_surface(_: &Surface) -> Option<&HalSurface> { unimplemented!("called empty api") } - fn get_surface_mut(_: &mut Surface) -> Option<&mut HalSurface> { - unimplemented!("called empty api") - } } #[cfg(all(feature = "vulkan", not(target_arch = "wasm32")))] @@ -48,14 +44,11 @@ impl HalApi for hal::api::Vulkan { fn instance_as_hal(instance: &Instance) -> Option<&Self::Instance> { instance.vulkan.as_ref() } - fn hub(global: &Global) -> &Hub { + fn hub(global: &Global) -> &Hub { &global.hubs.vulkan } fn get_surface(surface: &Surface) -> Option<&HalSurface> { - surface.vulkan.as_ref() - } - fn get_surface_mut(surface: &mut Surface) -> Option<&mut HalSurface> { - surface.vulkan.as_mut() + surface.raw.downcast_ref() } } @@ -72,14 +65,11 @@ impl HalApi for hal::api::Metal { fn instance_as_hal(instance: &Instance) -> Option<&Self::Instance> { instance.metal.as_ref() } - fn hub(global: &Global) -> &Hub { + fn hub(global: &Global) -> &Hub { &global.hubs.metal } fn get_surface(surface: &Surface) -> Option<&HalSurface> { - surface.metal.as_ref() - } - fn get_surface_mut(surface: &mut Surface) -> Option<&mut HalSurface> { - surface.metal.as_mut() + surface.raw.downcast_ref() } } @@ -96,14 +86,11 @@ impl HalApi for hal::api::Dx12 { fn instance_as_hal(instance: &Instance) -> Option<&Self::Instance> { instance.dx12.as_ref() } - fn hub(global: &Global) -> &Hub { + fn hub(global: &Global) -> &Hub { &global.hubs.dx12 } fn get_surface(surface: &Surface) -> Option<&HalSurface> { - surface.dx12.as_ref() - } - fn get_surface_mut(surface: &mut Surface) -> Option<&mut HalSurface> { - surface.dx12.as_mut() + surface.raw.downcast_ref() } } @@ -120,14 +107,11 @@ impl HalApi for hal::api::Dx11 { fn instance_as_hal(instance: &Instance) -> Option<&Self::Instance> { instance.dx11.as_ref() } - fn hub(global: &Global) -> &Hub { + fn hub(global: &Global) -> &Hub { &global.hubs.dx11 } fn get_surface(surface: &Surface) -> Option<&HalSurface> { - surface.dx11.as_ref() - } - fn get_surface_mut(surface: &mut Surface) -> Option<&mut HalSurface> { - surface.dx11.as_mut() + surface.raw.downcast_ref() } } @@ -145,13 +129,10 @@ impl HalApi for hal::api::Gles { fn instance_as_hal(instance: &Instance) -> Option<&Self::Instance> { instance.gl.as_ref() } - fn hub(global: &Global) -> &Hub { + fn hub(global: &Global) -> &Hub { &global.hubs.gl } fn get_surface(surface: &Surface) -> Option<&HalSurface> { - surface.gl.as_ref() - } - fn get_surface_mut(surface: &mut Surface) -> Option<&mut HalSurface> { - surface.gl.as_mut() + surface.raw.downcast_ref() } } diff --git a/third_party/rust/wgpu-core/src/hub.rs b/third_party/rust/wgpu-core/src/hub.rs index 6445a328ed3f6..6cb9ee64d119f 100644 --- a/third_party/rust/wgpu-core/src/hub.rs +++ b/third_party/rust/wgpu-core/src/hub.rs @@ -7,7 +7,7 @@ resources of type `R`. For example, [`id::DeviceId`] is an alias for of course `Debug`. Each `Id` contains not only an index for the resource it denotes but -also a [`Backend`] indicating which `wgpu` backend it belongs to. You +also a Backend indicating which `wgpu` backend it belongs to. You can use the [`gfx_select`] macro to dynamically dispatch on an id's backend to a function specialized at compile time for a specific backend. See that macro's documentation for details. @@ -57,54 +57,15 @@ itself to choose ids always pass `()`. In either case, the id ultimately assigned is returned as the first element of the tuple. Producing true identifiers from `id_in` values is the job of an -[`IdentityHandler`] implementation, which has an associated type -[`Input`] saying what type of `id_in` values it accepts, and a -[`process`] method that turns such values into true identifiers of -type `I`. There are two kinds of `IdentityHandler`s: - -- Users that want `wgpu_core` to assign ids generally use - [`IdentityManager`] ([wrapped in a mutex]). Its `Input` type is - `()`, and it tracks assigned ids and generation numbers as - necessary. (This is what `wgpu` does.) - -- Users that want to assign ids themselves use an `IdentityHandler` - whose `Input` type is `I` itself, and whose `process` method simply - passes the `id_in` argument through unchanged. For example, the - `player` crate uses an `IdentityPassThrough` type whose `process` - method simply adjusts the id's backend (since recordings can be - replayed on a different backend than the one they were created on) - but passes the rest of the id's content through unchanged. - -Because an `IdentityHandler` can only create ids for a single -resource type `I`, constructing a [`Global`] entails constructing a -separate `IdentityHandler` for each resource type `I` that the -`Global` will manage: an `IdentityHandler`, an -`IdentityHandler`, and so on. - -The [`Global::new`] function could simply take a large collection of -`IdentityHandler` implementations as arguments, but that would be -ungainly. Instead, `Global::new` expects a `factory` argument that +[`crate::identity::IdentityManager`], but only if the `IdentityHandlerFactory` +create it and then generated by it, otherwise ids will be received from outside. + +`Global::new` expects a `factory` argument that implements the [`GlobalIdentityHandlerFactory`] trait, which extends -[`IdentityHandlerFactory`] for each resource id type `I`. This +[`crate::identity::IdentityHandlerFactory`] for each resource id type `I`. This trait, in turn, has a `spawn` method that constructs an -`IdentityHandler` for the `Global` to use. - -What this means is that the types of resource creation functions' -`id_in` arguments depend on the `Global`'s `G` type parameter. A -`Global`'s `IdentityHandler` implementation is: - -```ignore ->::Filter -``` - -where `Filter` is an associated type of the `IdentityHandlerFactory` trait. -Thus, its `id_in` type is: - -```ignore -<>::Filter as IdentityHandler>::Input -``` - -The [`Input`] type is an alias for this construction. +`crate::identity::IdentityManager` for the `Global` to use, +if ids should be generated by wgpu or will return None otherwise. ## Id allocation and streaming @@ -140,240 +101,48 @@ creation fails, the id supplied for that resource is marked to indicate as much, allowing subsequent operations using that id to be properly flagged as errors as well. -[`Backend`]: wgt::Backend -[`Global`]: crate::global::Global -[`Global::new`]: crate::global::Global::new [`gfx_select`]: crate::gfx_select -[`IdentityHandler`]: crate::identity::IdentityHandler -[`Input`]: crate::identity::IdentityHandler::Input -[`process`]: crate::identity::IdentityHandler::process +[`Input`]: crate::identity::IdentityHandlerFactory::Input +[`process`]: crate::identity::IdentityManager::process [`Id`]: crate::id::Id -[wrapped in a mutex]: ../identity/trait.IdentityHandler.html#impl-IdentityHandler%3CI%3E-for-Mutex%3CIdentityManager%3E +[wrapped in a mutex]: trait.IdentityHandler.html#impl-IdentityHandler%3CI%3E-for-Mutex%3CIdentityManager%3E [WebGPU]: https://www.w3.org/TR/webgpu/ -[`IdentityManager`]: crate::identity::IdentityManager -[`Input`]: crate::identity::Input -[`IdentityHandlerFactory`]: crate::identity::IdentityHandlerFactory + */ use crate::{ binding_model::{BindGroup, BindGroupLayout, PipelineLayout}, command::{CommandBuffer, RenderBundle}, - device::Device, + device::{queue::Queue, Device}, hal_api::HalApi, id, identity::GlobalIdentityHandlerFactory, - instance::{Adapter, HalSurface, Instance, Surface}, + instance::{Adapter, HalSurface, Surface}, pipeline::{ComputePipeline, RenderPipeline, ShaderModule}, - registry::Registry, - resource::{Buffer, QuerySet, Sampler, StagingBuffer, Texture, TextureClearMode, TextureView}, - storage::{Element, Storage, StorageReport}, + registry::{Registry, RegistryReport}, + resource::{Buffer, QuerySet, Sampler, StagingBuffer, Texture, TextureView}, + storage::{Element, Storage}, }; +use std::fmt::Debug; -use wgt::{strict_assert_eq, strict_assert_ne}; - -#[cfg(any(debug_assertions, feature = "strict_asserts"))] -use std::cell::Cell; -use std::{fmt::Debug, marker::PhantomData}; - -/// Type system for enforcing the lock order on [`Hub`] fields. -/// -/// If type `A` implements `Access`, that means we are allowed to -/// proceed with locking resource `B` after we lock `A`. -/// -/// The implementations of `Access` basically describe the edges in an -/// acyclic directed graph of lock transitions. As long as it doesn't have -/// cycles, any number of threads can acquire locks along paths through -/// the graph without deadlock. That is, if you look at each thread's -/// lock acquisitions as steps along a path in the graph, then because -/// there are no cycles in the graph, there must always be some thread -/// that is able to acquire its next lock, or that is about to release -/// a lock. (Assume that no thread just sits on its locks forever.) -/// -/// Locks must be acquired in the following order: -/// -/// - [`Adapter`] -/// - [`Device`] -/// - [`CommandBuffer`] -/// - [`RenderBundle`] -/// - [`PipelineLayout`] -/// - [`BindGroupLayout`] -/// - [`BindGroup`] -/// - [`ComputePipeline`] -/// - [`RenderPipeline`] -/// - [`ShaderModule`] -/// - [`Buffer`] -/// - [`StagingBuffer`] -/// - [`Texture`] -/// - [`TextureView`] -/// - [`Sampler`] -/// - [`QuerySet`] -/// -/// That is, you may only acquire a new lock on a `Hub` field if it -/// appears in the list after all the other fields you're already -/// holding locks for. When you are holding no locks, you can start -/// anywhere. -/// -/// It's fine to add more `Access` implementations as needed, as long -/// as you do not introduce a cycle. In other words, as long as there -/// is some ordering you can put the resource types in that respects -/// the extant `Access` implementations, that's fine. -/// -/// See the documentation for [`Hub`] for more details. -pub trait Access {} - -pub enum Root {} - -// These impls are arranged so that the target types (that is, the `T` -// in `Access`) appear in locking order. -// -// TODO: establish an order instead of declaring all the pairs. -impl Access for Root {} -impl Access for Root {} -impl Access for Instance {} -impl Access> for Root {} -impl Access> for Surface {} -impl Access> for Root {} -impl Access> for Surface {} -impl Access> for Adapter {} -impl Access> for Root {} -impl Access> for Device {} -impl Access> for Device {} -impl Access> for CommandBuffer {} -impl Access> for Root {} -impl Access> for Device {} -impl Access> for RenderBundle {} -impl Access> for Root {} -impl Access> for Device {} -impl Access> for PipelineLayout {} -impl Access> for QuerySet {} -impl Access> for Root {} -impl Access> for Device {} -impl Access> for BindGroupLayout {} -impl Access> for PipelineLayout {} -impl Access> for CommandBuffer {} -impl Access> for Device {} -impl Access> for BindGroup {} -impl Access> for Device {} -impl Access> for BindGroup {} -impl Access> for ComputePipeline {} -impl Access> for Device {} -impl Access> for BindGroupLayout {} -impl Access> for Root {} -impl Access> for Device {} -impl Access> for BindGroupLayout {} -impl Access> for BindGroup {} -impl Access> for CommandBuffer {} -impl Access> for ComputePipeline {} -impl Access> for RenderPipeline {} -impl Access> for QuerySet {} -impl Access> for Device {} -impl Access> for Root {} -impl Access> for Device {} -impl Access> for Buffer {} -impl Access> for Root {} -impl Access> for Device {} -impl Access> for Texture {} -impl Access> for Root {} -impl Access> for Device {} -impl Access> for TextureView {} -impl Access> for Root {} -impl Access> for Device {} -impl Access> for CommandBuffer {} -impl Access> for RenderPipeline {} -impl Access> for ComputePipeline {} -impl Access> for Sampler {} - -#[cfg(any(debug_assertions, feature = "strict_asserts"))] -thread_local! { - /// Per-thread state checking `Token` creation in debug builds. - /// - /// This is the number of `Token` values alive on the current - /// thread. Since `Token` creation respects the [`Access`] graph, - /// there can never be more tokens alive than there are fields of - /// [`Hub`], so a `u8` is plenty. - static ACTIVE_TOKEN: Cell = Cell::new(0); -} - -/// A zero-size permission token to lock some fields of [`Hub`]. -/// -/// Access to a `Token` grants permission to lock any field of -/// [`Hub`] following the one of type [`Registry`], where -/// "following" is as defined by the [`Access`] implementations. -/// -/// Calling [`Token::root()`] returns a `Token`, which grants -/// permission to lock any field. Dynamic checks ensure that each -/// thread has at most one `Token` live at a time, in debug -/// builds. -/// -/// The locking methods on `Registry` take a `&'t mut -/// Token`, and return a fresh `Token<'t, T>` and a lock guard with -/// lifetime `'t`, so the caller cannot access their `Token` again -/// until they have dropped both the `Token` and the lock guard. -/// -/// Tokens are `!Send`, so one thread can't send its permissions to -/// another. -pub(crate) struct Token<'a, T: 'a> { - // The `*const` makes us `!Send` and `!Sync`. - level: PhantomData<&'a *const T>, -} - -impl<'a, T> Token<'a, T> { - /// Return a new token for a locked field. - /// - /// This should only be used by `Registry` locking methods. - pub(crate) fn new() -> Self { - #[cfg(any(debug_assertions, feature = "strict_asserts"))] - ACTIVE_TOKEN.with(|active| { - let old = active.get(); - strict_assert_ne!(old, 0, "Root token was dropped"); - active.set(old + 1); - }); - Self { level: PhantomData } - } -} - -impl Token<'static, Root> { - /// Return a `Token`, granting permission to lock any [`Hub`] field. - /// - /// Debug builds check dynamically that each thread has at most - /// one root token at a time. - pub fn root() -> Self { - #[cfg(any(debug_assertions, feature = "strict_asserts"))] - ACTIVE_TOKEN.with(|active| { - strict_assert_eq!(0, active.replace(1), "Root token is already active"); - }); - - Self { level: PhantomData } - } -} - -impl<'a, T> Drop for Token<'a, T> { - fn drop(&mut self) { - #[cfg(any(debug_assertions, feature = "strict_asserts"))] - ACTIVE_TOKEN.with(|active| { - let old = active.get(); - active.set(old - 1); - }); - } -} - -#[derive(Debug)] +#[derive(Debug, PartialEq, Eq)] pub struct HubReport { - pub adapters: StorageReport, - pub devices: StorageReport, - pub pipeline_layouts: StorageReport, - pub shader_modules: StorageReport, - pub bind_group_layouts: StorageReport, - pub bind_groups: StorageReport, - pub command_buffers: StorageReport, - pub render_bundles: StorageReport, - pub render_pipelines: StorageReport, - pub compute_pipelines: StorageReport, - pub query_sets: StorageReport, - pub buffers: StorageReport, - pub textures: StorageReport, - pub texture_views: StorageReport, - pub samplers: StorageReport, + pub adapters: RegistryReport, + pub devices: RegistryReport, + pub queues: RegistryReport, + pub pipeline_layouts: RegistryReport, + pub shader_modules: RegistryReport, + pub bind_group_layouts: RegistryReport, + pub bind_groups: RegistryReport, + pub command_buffers: RegistryReport, + pub render_bundles: RegistryReport, + pub render_pipelines: RegistryReport, + pub compute_pipelines: RegistryReport, + pub query_sets: RegistryReport, + pub buffers: RegistryReport, + pub textures: RegistryReport, + pub texture_views: RegistryReport, + pub samplers: RegistryReport, } impl HubReport { @@ -383,7 +152,7 @@ impl HubReport { } #[allow(rustdoc::private_intra_doc_links)] -/// All the resources for a particular backend in a [`Global`]. +/// All the resources for a particular backend in a [`crate::global::Global`]. /// /// To obtain `global`'s `Hub` for some [`HalApi`] backend type `A`, /// call [`A::hub(global)`]. @@ -391,87 +160,48 @@ impl HubReport { /// ## Locking /// /// Each field in `Hub` is a [`Registry`] holding all the values of a -/// particular type of resource, all protected by a single [`RwLock`]. +/// particular type of resource, all protected by a single RwLock. /// So for example, to access any [`Buffer`], you must acquire a read -/// lock on the `Hub`s entire [`buffers`] registry. The lock guard +/// lock on the `Hub`s entire buffers registry. The lock guard /// gives you access to the `Registry`'s [`Storage`], which you can /// then index with the buffer's id. (Yes, this design causes /// contention; see [#2272].) /// /// But most `wgpu` operations require access to several different /// kinds of resource, so you often need to hold locks on several -/// different fields of your [`Hub`] simultaneously. To avoid -/// deadlock, there is an ordering imposed on the fields, and you may -/// only acquire new locks on fields that come *after* all those you -/// are already holding locks on, in this ordering. (The ordering is -/// described in the documentation for the [`Access`] trait.) -/// -/// We use Rust's type system to statically check that `wgpu_core` can -/// only ever acquire locks in the correct order: -/// -/// - A value of type [`Token`] represents proof that the owner -/// only holds locks on the `Hub` fields holding resources of type -/// `T` or earlier in the lock ordering. A special value of type -/// `Token`, obtained by calling [`Token::root`], represents -/// proof that no `Hub` field locks are held. -/// -/// - To lock the `Hub` field holding resources of type `T`, you must -/// call its [`read`] or [`write`] methods. These require you to -/// pass in a `&mut Token`, for some `A` that implements -/// [`Access`]. This implementation exists only if `T` follows `A` -/// in the field ordering, which statically ensures that you are -/// indeed allowed to lock this new `Hub` field. -/// -/// - The locking methods return both an [`RwLock`] guard that you can -/// use to access the field's resources, and a new `Token` value. -/// These both borrow from the lifetime of your `Token`, so since -/// you passed that by mutable reference, you cannot access it again -/// until you drop the new token and lock guard. +/// different fields of your [`Hub`] simultaneously. /// -/// Because a thread only ever has access to the `Token` for the -/// last resource type `T` it holds a lock for, and the `Access` trait -/// implementations only permit acquiring locks for types `U` that -/// follow `T` in the lock ordering, it is statically impossible for a -/// program to violate the locking order. +/// Inside the `Registry` there are `Arc` where `T` is a Resource +/// Lock of `Registry` happens only when accessing to get the specific resource /// -/// This does assume that threads cannot call `Token` when they -/// already hold locks (dynamically enforced in debug builds) and that -/// threads cannot send their `Token`s to other threads (enforced by -/// making `Token` neither `Send` nor `Sync`). /// -/// [`Global`]: crate::global::Global /// [`A::hub(global)`]: HalApi::hub -/// [`RwLock`]: parking_lot::RwLock -/// [`buffers`]: Hub::buffers -/// [`read`]: Registry::read -/// [`write`]: Registry::write -/// [`Token`]: Token -/// [`Access`]: Access -/// [#2272]: https://github.com/gfx-rs/wgpu/pull/2272 -pub struct Hub { - pub adapters: Registry, id::AdapterId, F>, - pub devices: Registry, id::DeviceId, F>, - pub pipeline_layouts: Registry, id::PipelineLayoutId, F>, - pub shader_modules: Registry, id::ShaderModuleId, F>, - pub bind_group_layouts: Registry, id::BindGroupLayoutId, F>, - pub bind_groups: Registry, id::BindGroupId, F>, - pub command_buffers: Registry, id::CommandBufferId, F>, - pub render_bundles: Registry, id::RenderBundleId, F>, - pub render_pipelines: Registry, id::RenderPipelineId, F>, - pub compute_pipelines: Registry, id::ComputePipelineId, F>, - pub query_sets: Registry, id::QuerySetId, F>, - pub buffers: Registry, id::BufferId, F>, - pub staging_buffers: Registry, id::StagingBufferId, F>, - pub textures: Registry, id::TextureId, F>, - pub texture_views: Registry, id::TextureViewId, F>, - pub samplers: Registry, id::SamplerId, F>, +pub struct Hub { + pub adapters: Registry>, + pub devices: Registry>, + pub queues: Registry>, + pub pipeline_layouts: Registry>, + pub shader_modules: Registry>, + pub bind_group_layouts: Registry>, + pub bind_groups: Registry>, + pub command_buffers: Registry>, + pub render_bundles: Registry>, + pub render_pipelines: Registry>, + pub compute_pipelines: Registry>, + pub query_sets: Registry>, + pub buffers: Registry>, + pub staging_buffers: Registry>, + pub textures: Registry>, + pub texture_views: Registry>, + pub samplers: Registry>, } -impl Hub { - fn new(factory: &F) -> Self { +impl Hub { + fn new(factory: &F) -> Self { Self { adapters: Registry::new(A::VARIANT, factory), devices: Registry::new(A::VARIANT, factory), + queues: Registry::new(A::VARIANT, factory), pipeline_layouts: Registry::new(A::VARIANT, factory), shader_modules: Registry::new(A::VARIANT, factory), bind_group_layouts: Registry::new(A::VARIANT, factory), @@ -494,209 +224,94 @@ impl Hub { // everything related to a logical device. pub(crate) fn clear( &self, - surface_guard: &mut Storage, + surface_guard: &Storage, with_adapters: bool, ) { - use crate::resource::TextureInner; - use hal::{Device as _, Surface as _}; + use hal::Surface; - let mut devices = self.devices.data.write(); - for element in devices.map.iter_mut() { - if let Element::Occupied(ref mut device, _) = *element { + let mut devices = self.devices.write(); + for element in devices.map.iter() { + if let Element::Occupied(ref device, _) = *element { device.prepare_to_die(); } } - // destroy command buffers first, since otherwise DX12 isn't happy - for element in self.command_buffers.data.write().map.drain(..) { - if let Element::Occupied(command_buffer, _) = element { - let device = &devices[command_buffer.device_id.value]; - device.destroy_command_buffer(command_buffer); - } - } - - for element in self.samplers.data.write().map.drain(..) { - if let Element::Occupied(sampler, _) = element { - unsafe { - devices[sampler.device_id.value] - .raw - .destroy_sampler(sampler.raw); - } - } - } - - for element in self.texture_views.data.write().map.drain(..) { - if let Element::Occupied(texture_view, _) = element { - let device = &devices[texture_view.device_id.value]; - unsafe { - device.raw.destroy_texture_view(texture_view.raw); - } - } - } - - for element in self.textures.data.write().map.drain(..) { - if let Element::Occupied(texture, _) = element { - let device = &devices[texture.device_id.value]; - if let TextureInner::Native { raw: Some(raw) } = texture.inner { - unsafe { - device.raw.destroy_texture(raw); - } - } - if let TextureClearMode::RenderPass { clear_views, .. } = texture.clear_mode { - for view in clear_views { + self.command_buffers.write().map.clear(); + self.samplers.write().map.clear(); + self.texture_views.write().map.clear(); + self.textures.write().map.clear(); + self.buffers.write().map.clear(); + self.bind_groups.write().map.clear(); + self.shader_modules.write().map.clear(); + self.bind_group_layouts.write().map.clear(); + self.pipeline_layouts.write().map.clear(); + self.compute_pipelines.write().map.clear(); + self.render_pipelines.write().map.clear(); + self.query_sets.write().map.clear(); + + for element in surface_guard.map.iter() { + if let Element::Occupied(ref surface, _epoch) = *element { + if let Some(ref mut present) = surface.presentation.lock().take() { + if let Some(device) = present.device.downcast_ref::() { + let suf = A::get_surface(surface); unsafe { - device.raw.destroy_texture_view(view); + suf.unwrap().raw.unconfigure(device.raw()); + //TODO: we could destroy the surface here } } } } } - for element in self.buffers.data.write().map.drain(..) { - if let Element::Occupied(buffer, _) = element { - //TODO: unmap if needed - devices[buffer.device_id.value].destroy_buffer(buffer); - } - } - for element in self.bind_groups.data.write().map.drain(..) { - if let Element::Occupied(bind_group, _) = element { - let device = &devices[bind_group.device_id.value]; - unsafe { - device.raw.destroy_bind_group(bind_group.raw); - } - } - } - for element in self.shader_modules.data.write().map.drain(..) { - if let Element::Occupied(module, _) = element { - let device = &devices[module.device_id.value]; - unsafe { - device.raw.destroy_shader_module(module.raw); - } - } - } - for element in self.bind_group_layouts.data.write().map.drain(..) { - if let Element::Occupied(bgl, _) = element { - let device = &devices[bgl.device_id.value]; - if let Some(inner) = bgl.into_inner() { - unsafe { - device.raw.destroy_bind_group_layout(inner.raw); - } - } - } - } - for element in self.pipeline_layouts.data.write().map.drain(..) { - if let Element::Occupied(pipeline_layout, _) = element { - let device = &devices[pipeline_layout.device_id.value]; - unsafe { - device.raw.destroy_pipeline_layout(pipeline_layout.raw); - } - } - } - for element in self.compute_pipelines.data.write().map.drain(..) { - if let Element::Occupied(pipeline, _) = element { - let device = &devices[pipeline.device_id.value]; - unsafe { - device.raw.destroy_compute_pipeline(pipeline.raw); - } - } - } - for element in self.render_pipelines.data.write().map.drain(..) { - if let Element::Occupied(pipeline, _) = element { - let device = &devices[pipeline.device_id.value]; - unsafe { - device.raw.destroy_render_pipeline(pipeline.raw); - } - } - } - - for element in surface_guard.map.iter_mut() { - if let Element::Occupied(ref mut surface, _epoch) = *element { - if surface - .presentation - .as_ref() - .map_or(wgt::Backend::Empty, |p| p.backend()) - != A::VARIANT - { - continue; - } - if let Some(present) = surface.presentation.take() { - let device = &devices[present.device_id.value]; - let suf = A::get_surface_mut(surface); - unsafe { - suf.unwrap().raw.unconfigure(&device.raw); - //TODO: we could destroy the surface here - } - } - } - } - - for element in self.query_sets.data.write().map.drain(..) { - if let Element::Occupied(query_set, _) = element { - let device = &devices[query_set.device_id.value]; - unsafe { - device.raw.destroy_query_set(query_set.raw); - } - } - } - - for element in devices.map.drain(..) { - if let Element::Occupied(device, _) = element { - device.dispose(); - } - } + self.queues.write().map.clear(); + devices.map.clear(); if with_adapters { drop(devices); - self.adapters.data.write().map.clear(); + self.adapters.write().map.clear(); } } - pub(crate) fn surface_unconfigure( - &self, - device_id: id::Valid, - surface: &mut HalSurface, - ) { - use hal::Surface as _; - - let devices = self.devices.data.read(); - let device = &devices[device_id]; + pub(crate) fn surface_unconfigure(&self, device: &Device, surface: &HalSurface) { unsafe { - surface.raw.unconfigure(&device.raw); + use hal::Surface; + surface.raw.unconfigure(device.raw()); } } pub fn generate_report(&self) -> HubReport { HubReport { - adapters: self.adapters.data.read().generate_report(), - devices: self.devices.data.read().generate_report(), - pipeline_layouts: self.pipeline_layouts.data.read().generate_report(), - shader_modules: self.shader_modules.data.read().generate_report(), - bind_group_layouts: self.bind_group_layouts.data.read().generate_report(), - bind_groups: self.bind_groups.data.read().generate_report(), - command_buffers: self.command_buffers.data.read().generate_report(), - render_bundles: self.render_bundles.data.read().generate_report(), - render_pipelines: self.render_pipelines.data.read().generate_report(), - compute_pipelines: self.compute_pipelines.data.read().generate_report(), - query_sets: self.query_sets.data.read().generate_report(), - buffers: self.buffers.data.read().generate_report(), - textures: self.textures.data.read().generate_report(), - texture_views: self.texture_views.data.read().generate_report(), - samplers: self.samplers.data.read().generate_report(), + adapters: self.adapters.generate_report(), + devices: self.devices.generate_report(), + queues: self.queues.generate_report(), + pipeline_layouts: self.pipeline_layouts.generate_report(), + shader_modules: self.shader_modules.generate_report(), + bind_group_layouts: self.bind_group_layouts.generate_report(), + bind_groups: self.bind_groups.generate_report(), + command_buffers: self.command_buffers.generate_report(), + render_bundles: self.render_bundles.generate_report(), + render_pipelines: self.render_pipelines.generate_report(), + compute_pipelines: self.compute_pipelines.generate_report(), + query_sets: self.query_sets.generate_report(), + buffers: self.buffers.generate_report(), + textures: self.textures.generate_report(), + texture_views: self.texture_views.generate_report(), + samplers: self.samplers.generate_report(), } } } -pub struct Hubs { +pub struct Hubs { #[cfg(all(feature = "vulkan", not(target_arch = "wasm32")))] - pub(crate) vulkan: Hub, + pub(crate) vulkan: Hub, #[cfg(all(feature = "metal", any(target_os = "macos", target_os = "ios")))] - pub(crate) metal: Hub, + pub(crate) metal: Hub, #[cfg(all(feature = "dx12", windows))] - pub(crate) dx12: Hub, + pub(crate) dx12: Hub, #[cfg(all(feature = "dx11", windows))] - pub(crate) dx11: Hub, + pub(crate) dx11: Hub, #[cfg(feature = "gles")] - pub(crate) gl: Hub, + pub(crate) gl: Hub, #[cfg(all( not(all(feature = "vulkan", not(target_arch = "wasm32"))), not(all(feature = "metal", any(target_os = "macos", target_os = "ios"))), @@ -704,11 +319,11 @@ pub struct Hubs { not(all(feature = "dx11", windows)), not(feature = "gles"), ))] - pub(crate) empty: Hub, + pub(crate) empty: Hub, } -impl Hubs { - pub(crate) fn new(factory: &F) -> Self { +impl Hubs { + pub(crate) fn new(factory: &F) -> Self { Self { #[cfg(all(feature = "vulkan", not(target_arch = "wasm32")))] vulkan: Hub::new(factory), diff --git a/third_party/rust/wgpu-core/src/id.rs b/third_party/rust/wgpu-core/src/id.rs index 2386bfb6e725d..97c119af5ce2d 100644 --- a/third_party/rust/wgpu-core/src/id.rs +++ b/third_party/rust/wgpu-core/src/id.rs @@ -1,6 +1,12 @@ use crate::{Epoch, Index}; -use std::{cmp::Ordering, fmt, marker::PhantomData}; -use wgt::Backend; +use std::{ + any::Any, + cmp::Ordering, + fmt::{self, Debug}, + hash::Hash, + marker::PhantomData, +}; +use wgt::{Backend, WasmNotSendSync}; #[cfg(feature = "id32")] type IdType = u32; @@ -66,7 +72,7 @@ type Dummy = hal::api::Empty; all(feature = "serde", not(feature = "replay")), derive(serde::Deserialize) )] -pub struct Id(NonZeroId, PhantomData); +pub struct Id(NonZeroId, PhantomData); // This type represents Id in a more readable (and editable) way. #[allow(dead_code)] @@ -77,14 +83,20 @@ enum SerialId { Id(Index, Epoch, Backend), } #[cfg(feature = "trace")] -impl From> for SerialId { +impl From> for SerialId +where + T: 'static + WasmNotSendSync, +{ fn from(id: Id) -> Self { let (index, epoch, backend) = id.unzip(); Self::Id(index, epoch, backend) } } #[cfg(feature = "replay")] -impl From for Id { +impl From for Id +where + T: 'static + WasmNotSendSync, +{ fn from(id: SerialId) -> Self { match id { SerialId::Id(index, epoch, backend) => TypedId::zip(index, epoch, backend), @@ -92,7 +104,10 @@ impl From for Id { } } -impl Id { +impl Id +where + T: 'static + WasmNotSendSync, +{ /// # Safety /// /// The raw id must be valid for the type. @@ -101,8 +116,13 @@ impl Id { } #[allow(dead_code)] - pub(crate) fn dummy(index: u32) -> Valid { - Valid(Id::zip(index, 1, Backend::Empty)) + pub(crate) fn dummy(index: u32) -> Self { + Id::zip(index, 1, Backend::Empty) + } + + #[allow(dead_code)] + pub(crate) fn is_valid(&self) -> bool { + self.backend() != Backend::Empty } pub fn backend(self) -> Backend { @@ -118,15 +138,21 @@ impl Id { } } -impl Copy for Id {} +impl Copy for Id where T: 'static + WasmNotSendSync {} -impl Clone for Id { +impl Clone for Id +where + T: 'static + WasmNotSendSync, +{ fn clone(&self) -> Self { *self } } -impl fmt::Debug for Id { +impl Debug for Id +where + T: 'static + WasmNotSendSync, +{ fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { let (index, epoch, backend) = self.unzip(); formatter @@ -138,53 +164,60 @@ impl fmt::Debug for Id { } } -impl std::hash::Hash for Id { +impl Hash for Id +where + T: 'static + WasmNotSendSync, +{ fn hash(&self, state: &mut H) { self.0.hash(state); } } -impl PartialEq for Id { +impl PartialEq for Id +where + T: 'static + WasmNotSendSync, +{ fn eq(&self, other: &Self) -> bool { self.0 == other.0 } } -impl Eq for Id {} +impl Eq for Id where T: 'static + WasmNotSendSync {} -impl PartialOrd for Id { +impl PartialOrd for Id +where + T: 'static + WasmNotSendSync, +{ fn partial_cmp(&self, other: &Self) -> Option { self.0.partial_cmp(&other.0) } } -impl Ord for Id { +impl Ord for Id +where + T: 'static + WasmNotSendSync, +{ fn cmp(&self, other: &Self) -> Ordering { self.0.cmp(&other.0) } } -/// An internal ID that has been checked to point to -/// a valid object in the storages. -#[repr(transparent)] -#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq, PartialOrd, Ord)] -#[cfg_attr(feature = "trace", derive(serde::Serialize))] -#[cfg_attr(feature = "replay", derive(serde::Deserialize))] -pub(crate) struct Valid(pub I); - /// Trait carrying methods for direct `Id` access. /// /// Most `wgpu-core` clients should not use this trait. Unusual clients that /// need to construct `Id` values directly, or access their components, like the /// WGPU recording player, may use this trait to do so. -pub trait TypedId: Copy { +pub trait TypedId: Copy + Debug + Any + 'static + WasmNotSendSync + Eq + Hash { fn zip(index: Index, epoch: Epoch, backend: Backend) -> Self; fn unzip(self) -> (Index, Epoch, Backend); fn into_raw(self) -> NonZeroId; } #[allow(trivial_numeric_casts)] -impl TypedId for Id { +impl TypedId for Id +where + T: 'static + WasmNotSendSync, +{ fn zip(index: Index, epoch: Epoch, backend: Backend) -> Self { assert_eq!(0, epoch >> EPOCH_BITS); assert_eq!(0, (index as IdType) >> INDEX_BITS); diff --git a/third_party/rust/wgpu-core/src/identity.rs b/third_party/rust/wgpu-core/src/identity.rs index fe10bedb0e015..3f421dd697a74 100644 --- a/third_party/rust/wgpu-core/src/identity.rs +++ b/third_party/rust/wgpu-core/src/identity.rs @@ -1,8 +1,11 @@ use parking_lot::Mutex; use wgt::Backend; -use crate::{id, Epoch, Index}; -use std::fmt::Debug; +use crate::{ + id::{self}, + Epoch, FastHashMap, Index, +}; +use std::{fmt::Debug, marker::PhantomData, sync::Arc}; /// A simple structure to allocate [`Id`] identifiers. /// @@ -32,98 +35,99 @@ use std::fmt::Debug; /// [`alloc`]: IdentityManager::alloc /// [`free`]: IdentityManager::free #[derive(Debug, Default)] -pub struct IdentityManager { - /// Available index values. If empty, then `epochs.len()` is the next index - /// to allocate. - free: Vec, - - /// The next or currently-live epoch value associated with each `Id` index. - /// - /// If there is a live id with index `i`, then `epochs[i]` is its epoch; any - /// id with the same index but an older epoch is dead. - /// - /// If index `i` is currently unused, `epochs[i]` is the epoch to use in its - /// next `Id`. - epochs: Vec, +pub(super) struct IdentityValues { + free: Vec<(Index, Epoch)>, + //sorted by Index + used: FastHashMap>, + count: usize, } -impl IdentityManager { +impl IdentityValues { /// Allocate a fresh, never-before-seen id with the given `backend`. /// /// The backend is incorporated into the id, so that ids allocated with /// different `backend` values are always distinct. pub fn alloc(&mut self, backend: Backend) -> I { + self.count += 1; match self.free.pop() { - Some(index) => I::zip(index, self.epochs[index as usize], backend), + Some((index, epoch)) => I::zip(index, epoch + 1, backend), None => { let epoch = 1; - let id = I::zip(self.epochs.len() as Index, epoch, backend); - self.epochs.push(epoch); - id + let used = self.used.entry(epoch).or_insert_with(Default::default); + let index = if let Some(i) = used.iter().max_by_key(|v| *v) { + i + 1 + } else { + 0 + }; + used.push(index); + I::zip(index, epoch, backend) } } } - /// Free `id`. It will never be returned from `alloc` again. - pub fn free(&mut self, id: I) { + pub fn mark_as_used(&mut self, id: I) -> I { + self.count += 1; let (index, epoch, _backend) = id.unzip(); - let pe = &mut self.epochs[index as usize]; - assert_eq!(*pe, epoch); - // If the epoch reaches EOL, the index doesn't go - // into the free list, will never be reused again. - if epoch < id::EPOCH_MASK { - *pe = epoch + 1; - self.free.push(index); - } + let used = self.used.entry(epoch).or_insert_with(Default::default); + used.push(index); + id } -} -/// A type that can build true ids from proto-ids, and free true ids. -/// -/// For some implementations, the true id is based on the proto-id. -/// The caller is responsible for providing well-allocated proto-ids. -/// -/// For other implementations, the proto-id carries no information -/// (it's `()`, say), and this `IdentityHandler` type takes care of -/// allocating a fresh true id. -/// -/// See the module-level documentation for details. -pub trait IdentityHandler: Debug { - /// The type of proto-id consumed by this filter, to produce a true id. - type Input: Clone + Debug; + /// Free `id`. It will never be returned from `alloc` again. + pub fn release(&mut self, id: I) { + let (index, epoch, _backend) = id.unzip(); + self.free.push((index, epoch)); + self.count -= 1; + } - /// Given a proto-id value `id`, return a true id for `backend`. - fn process(&self, id: Self::Input, backend: Backend) -> I; + pub fn count(&self) -> usize { + self.count + } +} - /// Free the true id `id`. - fn free(&self, id: I); +#[derive(Debug)] +pub struct IdentityManager { + pub(super) values: Mutex, + _phantom: PhantomData, } -impl IdentityHandler for Mutex { - type Input = (); - fn process(&self, _id: Self::Input, backend: Backend) -> I { - self.lock().alloc(backend) +impl IdentityManager { + pub fn process(&self, backend: Backend) -> I { + self.values.lock().alloc(backend) + } + pub fn mark_as_used(&self, id: I) -> I { + self.values.lock().mark_as_used(id) } - fn free(&self, id: I) { - self.lock().free(id) + pub fn free(&self, id: I) { + self.values.lock().release(id) + } +} + +impl IdentityManager { + pub fn new() -> Self { + Self { + values: Mutex::new(IdentityValues::default()), + _phantom: PhantomData, + } } } -/// A type that can produce [`IdentityHandler`] filters for ids of type `I`. +/// A type that can produce [`IdentityManager`] filters for ids of type `I`. /// /// See the module-level documentation for details. -pub trait IdentityHandlerFactory { - /// The type of filter this factory constructs. - /// - /// "Filter" and "handler" seem to both mean the same thing here: - /// something that can produce true ids from proto-ids. - type Filter: IdentityHandler; - - /// Create an [`IdentityHandler`] implementation that can +pub trait IdentityHandlerFactory { + type Input: Copy; + /// Create an [`IdentityManager`] implementation that can /// transform proto-ids into ids of type `I`. + /// It can return None if ids are passed from outside + /// and are not generated by wgpu /// - /// [`IdentityHandler`]: IdentityHandler - fn spawn(&self) -> Self::Filter; + /// [`IdentityManager`]: IdentityManager + fn spawn(&self) -> Arc> { + Arc::new(IdentityManager::new()) + } + fn autogenerate_ids() -> bool; + fn input_to_id(id_in: Self::Input) -> I; } /// A global identity handler factory based on [`IdentityManager`]. @@ -134,14 +138,18 @@ pub trait IdentityHandlerFactory { #[derive(Debug)] pub struct IdentityManagerFactory; -impl IdentityHandlerFactory for IdentityManagerFactory { - type Filter = Mutex; - fn spawn(&self) -> Self::Filter { - Mutex::new(IdentityManager::default()) +impl IdentityHandlerFactory for IdentityManagerFactory { + type Input = (); + fn autogenerate_ids() -> bool { + true + } + + fn input_to_id(_id_in: Self::Input) -> I { + unreachable!("It should not be called") } } -/// A factory that can build [`IdentityHandler`]s for all resource +/// A factory that can build [`IdentityManager`]s for all resource /// types. pub trait GlobalIdentityHandlerFactory: IdentityHandlerFactory @@ -162,27 +170,23 @@ pub trait GlobalIdentityHandlerFactory: + IdentityHandlerFactory + IdentityHandlerFactory { - fn ids_are_generated_in_wgpu() -> bool; } -impl GlobalIdentityHandlerFactory for IdentityManagerFactory { - fn ids_are_generated_in_wgpu() -> bool { - true - } -} +impl GlobalIdentityHandlerFactory for IdentityManagerFactory {} -pub type Input = <>::Filter as IdentityHandler>::Input; +pub type Input = >::Input; #[test] fn test_epoch_end_of_life() { use id::TypedId as _; - let mut man = IdentityManager::default(); - man.epochs.push(id::EPOCH_MASK); - man.free.push(0); - let id1 = man.alloc::(Backend::Empty); - assert_eq!(id1.unzip().0, 0); + let man = IdentityManager::::new(); + let forced_id = man.mark_as_used(id::BufferId::zip(0, 1, Backend::Empty)); + assert_eq!(forced_id.unzip().0, 0); + let id1 = man.process(Backend::Empty); + assert_eq!(id1.unzip().0, 1); man.free(id1); - let id2 = man.alloc::(Backend::Empty); - // confirm that the index 0 is no longer re-used + let id2 = man.process(Backend::Empty); + // confirm that the epoch 1 is no longer re-used assert_eq!(id2.unzip().0, 1); + assert_eq!(id2.unzip().1, 2); } diff --git a/third_party/rust/wgpu-core/src/init_tracker/buffer.rs b/third_party/rust/wgpu-core/src/init_tracker/buffer.rs index ea9b9f6a8dcec..2c0fa8d372a36 100644 --- a/third_party/rust/wgpu-core/src/init_tracker/buffer.rs +++ b/third_party/rust/wgpu-core/src/init_tracker/buffer.rs @@ -1,10 +1,10 @@ use super::{InitTracker, MemoryInitKind}; -use crate::id::BufferId; -use std::ops::Range; +use crate::{hal_api::HalApi, resource::Buffer}; +use std::{ops::Range, sync::Arc}; #[derive(Debug, Clone)] -pub(crate) struct BufferInitTrackerAction { - pub id: BufferId, +pub(crate) struct BufferInitTrackerAction { + pub buffer: Arc>, pub range: Range, pub kind: MemoryInitKind, } @@ -14,22 +14,26 @@ pub(crate) type BufferInitTracker = InitTracker; impl BufferInitTracker { /// Checks if an action has/requires any effect on the initialization status /// and shrinks its range if possible. - pub(crate) fn check_action( + pub(crate) fn check_action( &self, - action: &BufferInitTrackerAction, - ) -> Option { - self.create_action(action.id, action.range.clone(), action.kind) + action: &BufferInitTrackerAction, + ) -> Option> { + self.create_action(&action.buffer, action.range.clone(), action.kind) } /// Creates an action if it would have any effect on the initialization /// status and shrinks the range if possible. - pub(crate) fn create_action( + pub(crate) fn create_action( &self, - id: BufferId, + buffer: &Arc>, query_range: Range, kind: MemoryInitKind, - ) -> Option { + ) -> Option> { self.check(query_range) - .map(|range| BufferInitTrackerAction { id, range, kind }) + .map(|range| BufferInitTrackerAction { + buffer: buffer.clone(), + range, + kind, + }) } } diff --git a/third_party/rust/wgpu-core/src/init_tracker/texture.rs b/third_party/rust/wgpu-core/src/init_tracker/texture.rs index 17368e1014fea..a859b5f78409a 100644 --- a/third_party/rust/wgpu-core/src/init_tracker/texture.rs +++ b/third_party/rust/wgpu-core/src/init_tracker/texture.rs @@ -1,7 +1,7 @@ use super::{InitTracker, MemoryInitKind}; -use crate::{id::TextureId, track::TextureSelector}; +use crate::{hal_api::HalApi, resource::Texture, track::TextureSelector}; use arrayvec::ArrayVec; -use std::ops::Range; +use std::{ops::Range, sync::Arc}; #[derive(Debug, Clone)] pub(crate) struct TextureInitRange { @@ -35,8 +35,8 @@ impl From for TextureInitRange { } #[derive(Debug, Clone)] -pub(crate) struct TextureInitTrackerAction { - pub(crate) id: TextureId, +pub(crate) struct TextureInitTrackerAction { + pub(crate) texture: Arc>, pub(crate) range: TextureInitRange, pub(crate) kind: MemoryInitKind, } @@ -57,10 +57,10 @@ impl TextureInitTracker { } } - pub(crate) fn check_action( + pub(crate) fn check_action( &self, - action: &TextureInitTrackerAction, - ) -> Option { + action: &TextureInitTrackerAction, + ) -> Option> { let mut mip_range_start = std::usize::MAX; let mut mip_range_end = std::usize::MIN; let mut layer_range_start = std::u32::MAX; @@ -85,7 +85,7 @@ impl TextureInitTracker { if mip_range_start < mip_range_end && layer_range_start < layer_range_end { Some(TextureInitTrackerAction { - id: action.id, + texture: action.texture.clone(), range: TextureInitRange { mip_range: mip_range_start as u32..mip_range_end as u32, layer_range: layer_range_start..layer_range_end, diff --git a/third_party/rust/wgpu-core/src/instance.rs b/third_party/rust/wgpu-core/src/instance.rs index e0c406c4732be..37428aeae9738 100644 --- a/third_party/rust/wgpu-core/src/instance.rs +++ b/third_party/rust/wgpu-core/src/instance.rs @@ -1,25 +1,29 @@ +use std::sync::Arc; + use crate::{ - device::{Device, DeviceDescriptor}, + any_surface::AnySurface, + device::{queue::Queue, resource::Device, DeviceDescriptor}, global::Global, hal_api::HalApi, - hub::Token, - id::{AdapterId, DeviceId, SurfaceId, Valid}, + id::{AdapterId, DeviceId, QueueId, SurfaceId}, identity::{GlobalIdentityHandlerFactory, Input}, present::Presentation, - LabelHelpers, LifeGuard, Stored, DOWNLEVEL_WARNING_MESSAGE, + resource::{Resource, ResourceInfo, ResourceType}, + LabelHelpers, DOWNLEVEL_WARNING_MESSAGE, }; +use parking_lot::Mutex; use wgt::{Backend, Backends, PowerPreference}; -use hal::{Adapter as _, Instance as _}; +use hal::{Adapter as _, Instance as _, OpenDevice}; use thiserror::Error; pub type RequestAdapterOptions = wgt::RequestAdapterOptions; type HalInstance = ::Instance; //TODO: remove this -pub struct HalSurface { - pub raw: A::Surface, - //pub acquired_texture: Option, +#[derive(Clone)] +pub struct HalSurface { + pub raw: Arc, } #[derive(Clone, Debug, Error)] @@ -117,53 +121,56 @@ impl Instance { } pub(crate) fn destroy_surface(&self, surface: Surface) { - fn destroy( - _: A, - instance: &Option, - surface: Option>, - ) { + fn destroy(_: A, instance: &Option, surface: AnySurface) { unsafe { - if let Some(suf) = surface { - instance.as_ref().unwrap().destroy_surface(suf.raw); + if let Some(surface) = surface.take::() { + if let Ok(suf) = Arc::try_unwrap(surface) { + if let Ok(raw) = Arc::try_unwrap(suf.raw) { + instance.as_ref().unwrap().destroy_surface(raw); + } else { + panic!("Surface cannot be destroyed because is still in use"); + } + } else { + panic!("Surface cannot be destroyed because is still in use"); + } } } } - #[cfg(all(feature = "vulkan", not(target_arch = "wasm32")))] - destroy(hal::api::Vulkan, &self.vulkan, surface.vulkan); - #[cfg(all(feature = "metal", any(target_os = "macos", target_os = "ios")))] - destroy(hal::api::Metal, &self.metal, surface.metal); - #[cfg(all(feature = "dx12", windows))] - destroy(hal::api::Dx12, &self.dx12, surface.dx12); - #[cfg(all(feature = "dx11", windows))] - destroy(hal::api::Dx11, &self.dx11, surface.dx11); - #[cfg(feature = "gles")] - destroy(hal::api::Gles, &self.gl, surface.gl); + match surface.raw.backend() { + #[cfg(all(feature = "vulkan", not(target_arch = "wasm32")))] + Backend::Vulkan => destroy(hal::api::Vulkan, &self.vulkan, surface.raw), + #[cfg(all(feature = "metal", any(target_os = "macos", target_os = "ios")))] + Backend::Metal => destroy(hal::api::Metal, &self.metal, surface.raw), + #[cfg(all(feature = "dx12", windows))] + Backend::Dx12 => destroy(hal::api::Dx12, &self.dx12, surface.raw), + #[cfg(all(feature = "dx11", windows))] + Backend::Dx11 => destroy(hal::api::Dx11, &self.dx11, surface.raw), + #[cfg(feature = "gles")] + Backend::Gl => destroy(hal::api::Gles, &self.gl, surface.raw), + _ => unreachable!(), + } } } pub struct Surface { - pub(crate) presentation: Option, - #[cfg(all(feature = "vulkan", not(target_arch = "wasm32")))] - pub vulkan: Option>, - #[cfg(all(feature = "metal", any(target_os = "macos", target_os = "ios")))] - pub metal: Option>, - #[cfg(all(feature = "dx12", windows))] - pub dx12: Option>, - #[cfg(all(feature = "dx11", windows))] - pub dx11: Option>, - #[cfg(feature = "gles")] - pub gl: Option>, + pub(crate) presentation: Mutex>, + pub(crate) info: ResourceInfo, + pub(crate) raw: AnySurface, } -impl crate::resource::Resource for Surface { - const TYPE: &'static str = "Surface"; +impl Resource for Surface { + const TYPE: ResourceType = "Surface"; + + fn as_info(&self) -> &ResourceInfo { + &self.info + } - fn life_guard(&self) -> &LifeGuard { - unreachable!() + fn as_info_mut(&mut self) -> &mut ResourceInfo { + &mut self.info } - fn label(&self) -> &str { - "" + fn label(&self) -> String { + String::from("") } } @@ -186,9 +193,9 @@ impl Surface { } } -pub struct Adapter { +pub struct Adapter { pub(crate) raw: hal::ExposedAdapter, - life_guard: LifeGuard, + pub(crate) info: ResourceInfo, } impl Adapter { @@ -207,7 +214,7 @@ impl Adapter { Self { raw, - life_guard: LifeGuard::new(""), + info: ResourceInfo::new(""), } } @@ -292,39 +299,42 @@ impl Adapter { } } - fn create_device_from_hal( - &self, - self_id: AdapterId, - open: hal::OpenDevice, + fn create_device_and_queue_from_hal( + self: &Arc, + hal_device: OpenDevice, desc: &DeviceDescriptor, instance_flags: wgt::InstanceFlags, trace_path: Option<&std::path::Path>, - ) -> Result, RequestDeviceError> { - log::trace!("Adapter::create_device"); + ) -> Result<(Device, Queue), RequestDeviceError> { + log::info!("Adapter::create_device"); let caps = &self.raw.capabilities; - Device::new( - open, - Stored { - value: Valid(self_id), - ref_count: self.life_guard.add_ref(), - }, + if let Ok(device) = Device::new( + hal_device.device, + &hal_device.queue, + self, caps.alignments.clone(), caps.downlevel.clone(), desc, trace_path, instance_flags, - ) - .or(Err(RequestDeviceError::OutOfMemory)) + ) { + let queue = Queue { + device: None, + raw: Some(hal_device.queue), + info: ResourceInfo::new(""), + }; + return Ok((device, queue)); + } + Err(RequestDeviceError::OutOfMemory) } - fn create_device( - &self, - self_id: AdapterId, + fn create_device_and_queue( + self: &Arc, desc: &DeviceDescriptor, instance_flags: wgt::InstanceFlags, trace_path: Option<&std::path::Path>, - ) -> Result, RequestDeviceError> { + ) -> Result<(Device, Queue), RequestDeviceError> { // Verify all features were exposed by the adapter if !self.raw.features.contains(desc.features) { return Err(RequestDeviceError::UnsupportedFeature( @@ -342,7 +352,7 @@ impl Adapter { missing_flags, DOWNLEVEL_WARNING_MESSAGE ); - log::info!("{:#?}", caps.downlevel); + log::warn!("{:#?}", caps.downlevel); } // Verify feature preconditions @@ -373,15 +383,19 @@ impl Adapter { }, )?; - self.create_device_from_hal(self_id, open, desc, instance_flags, trace_path) + self.create_device_and_queue_from_hal(open, desc, instance_flags, trace_path) } } -impl crate::resource::Resource for Adapter { - const TYPE: &'static str = "Adapter"; +impl Resource for Adapter { + const TYPE: ResourceType = "Adapter"; + + fn as_info(&self) -> &ResourceInfo { + &self.info + } - fn life_guard(&self) -> &LifeGuard { - &self.life_guard + fn as_info_mut(&mut self) -> &mut ResourceInfo { + &mut self.info } } @@ -419,7 +433,7 @@ pub enum RequestDeviceError { LimitsExceeded(#[from] FailedLimit), #[error("Device has no queue supporting graphics")] NoGraphicsQueue, - #[error("Not enough memory left")] + #[error("Not enough memory left to request device")] OutOfMemory, #[error("Unsupported features were requested: {0:?}")] UnsupportedFeature(wgt::Features), @@ -430,10 +444,10 @@ pub enum AdapterInputs<'a, I> { Mask(Backends, fn(Backend) -> I), } -impl AdapterInputs<'_, I> { +impl AdapterInputs<'_, I> { fn find(&self, b: Backend) -> Option { match *self { - Self::IdSet(ids, ref fun) => ids.iter().find(|id| fun(id) == b).cloned(), + Self::IdSet(ids, ref fun) => ids.iter().find(|id| fun(id) == b).copied(), Self::Mask(bits, ref fun) => { if bits.contains(b.into()) { Some(fun(b)) @@ -473,42 +487,72 @@ impl Global { ) -> SurfaceId { profiling::scope!("Instance::create_surface"); - fn init( + fn init( + any_surface: &mut Option, inst: &Option, display_handle: raw_window_handle::RawDisplayHandle, window_handle: raw_window_handle::RawWindowHandle, - ) -> Option> { - inst.as_ref().and_then(|inst| unsafe { - match inst.create_surface(display_handle, window_handle) { - Ok(raw) => Some(HalSurface { - raw, - //acquired_texture: None, - }), - Err(e) => { - log::warn!("Error: {:?}", e); - None + ) { + if any_surface.is_none() { + if let Some(surface) = inst.as_ref().and_then(|inst| unsafe { + match inst.create_surface(display_handle, window_handle) { + Ok(raw) => Some(HalSurface:: { raw: Arc::new(raw) }), + Err(e) => { + log::warn!("Error: {:?}", e); + None + } } + }) { + *any_surface = Some(AnySurface::new(surface)); } - }) + } } + let mut hal_surface = None; + #[cfg(all(feature = "vulkan", not(target_arch = "wasm32")))] + init::( + &mut hal_surface, + &self.instance.vulkan, + display_handle, + window_handle, + ); + #[cfg(all(feature = "metal", any(target_os = "macos", target_os = "ios")))] + init::( + &mut hal_surface, + &self.instance.metal, + display_handle, + window_handle, + ); + #[cfg(all(feature = "dx12", windows))] + init::( + &mut hal_surface, + &self.instance.dx12, + display_handle, + window_handle, + ); + #[cfg(all(feature = "dx11", windows))] + init::( + &mut hal_surface, + &self.instance.dx11, + display_handle, + window_handle, + ); + #[cfg(feature = "gles")] + init::( + &mut hal_surface, + &self.instance.gl, + display_handle, + window_handle, + ); + let surface = Surface { - presentation: None, - #[cfg(all(feature = "vulkan", not(target_arch = "wasm32")))] - vulkan: init::(&self.instance.vulkan, display_handle, window_handle), - #[cfg(all(feature = "metal", any(target_os = "macos", target_os = "ios")))] - metal: init::(&self.instance.metal, display_handle, window_handle), - #[cfg(all(feature = "dx12", windows))] - dx12: init::(&self.instance.dx12, display_handle, window_handle), - #[cfg(all(feature = "dx11", windows))] - dx11: init::(&self.instance.dx11, display_handle, window_handle), - #[cfg(feature = "gles")] - gl: init::(&self.instance.gl, display_handle, window_handle), + presentation: Mutex::new(None), + info: ResourceInfo::new(""), + raw: hal_surface.unwrap(), }; - let mut token = Token::root(); - let id = self.surfaces.prepare(id_in).assign(surface, &mut token); - id.0 + let (id, _) = self.surfaces.prepare::(id_in).assign(surface); + id } /// # Safety @@ -523,24 +567,27 @@ impl Global { profiling::scope!("Instance::create_surface_metal"); let surface = Surface { - presentation: None, - metal: self.instance.metal.as_ref().map(|inst| HalSurface { - raw: { - // we don't want to link to metal-rs for this - #[allow(clippy::transmute_ptr_to_ref)] - inst.create_surface_from_layer(unsafe { std::mem::transmute(layer) }) - }, - //acquired_texture: None, - }), - #[cfg(all(feature = "vulkan", not(target_arch = "wasm32")))] - vulkan: None, - #[cfg(feature = "gles")] - gl: None, + presentation: Mutex::new(None), + info: ResourceInfo::new(""), + raw: { + let hal_surface: HalSurface = self + .instance + .metal + .as_ref() + .map(|inst| HalSurface { + raw: Arc::new( + // we don't want to link to metal-rs for this + #[allow(clippy::transmute_ptr_to_ref)] + inst.create_surface_from_layer(unsafe { std::mem::transmute(layer) }), + ), //acquired_texture: None, + }) + .unwrap(); + AnySurface::new(hal_surface) + }, }; - let mut token = Token::root(); - let id = self.surfaces.prepare(id_in).assign(surface, &mut token); - id.0 + let (id, _) = self.surfaces.prepare::(id_in).assign(surface); + id } #[cfg(all( @@ -556,22 +603,27 @@ impl Global { profiling::scope!("Instance::create_surface_webgl_canvas"); let surface = Surface { - presentation: None, - gl: self - .instance - .gl - .as_ref() - .map(|inst| { - Ok(HalSurface { - raw: inst.create_surface_from_canvas(canvas)?, + presentation: Mutex::new(None), + info: ResourceInfo::new(""), + raw: { + let hal_surface: HalSurface = self + .instance + .gl + .as_ref() + .map(|inst| { + let raw_surface = inst.create_surface_from_canvas(canvas)?; + Ok(HalSurface { + raw: Arc::new(raw_surface), + }) }) - }) - .transpose()?, + .transpose()? + .unwrap(); + AnySurface::new(hal_surface) + }, }; - let mut token = Token::root(); - let id = self.surfaces.prepare(id_in).assign(surface, &mut token); - Ok(id.0) + let (id, _) = self.surfaces.prepare::(id_in).assign(surface); + Ok(id) } #[cfg(all( @@ -587,22 +639,27 @@ impl Global { profiling::scope!("Instance::create_surface_webgl_offscreen_canvas"); let surface = Surface { - presentation: None, - gl: self - .instance - .gl - .as_ref() - .map(|inst| { - Ok(HalSurface { - raw: inst.create_surface_from_offscreen_canvas(canvas)?, + presentation: Mutex::new(None), + info: ResourceInfo::new(""), + raw: { + let hal_surface: HalSurface = self + .instance + .gl + .as_ref() + .map(|inst| { + let raw_surface = inst.create_surface_from_offscreen_canvas(canvas)?; + Ok(HalSurface { + raw: Arc::new(raw_surface), + }) }) - }) - .transpose()?, + .transpose()? + .unwrap(); + AnySurface::new(hal_surface) + }, }; - let mut token = Token::root(); - let id = self.surfaces.prepare(id_in).assign(surface, &mut token); - Ok(id.0) + let (id, _) = self.surfaces.prepare::(id_in).assign(surface); + Ok(id) } #[cfg(all(feature = "dx12", windows))] @@ -617,22 +674,23 @@ impl Global { profiling::scope!("Instance::instance_create_surface_from_visual"); let surface = Surface { - presentation: None, - #[cfg(all(feature = "vulkan", not(target_arch = "wasm32")))] - vulkan: None, - #[cfg(all(feature = "dx12", windows))] - dx12: self.instance.dx12.as_ref().map(|inst| HalSurface { - raw: unsafe { inst.create_surface_from_visual(visual as _) }, - }), - #[cfg(all(feature = "dx11", windows))] - dx11: None, - #[cfg(feature = "gles")] - gl: None, + presentation: Mutex::new(None), + info: ResourceInfo::new(""), + raw: { + let hal_surface: HalSurface = self + .instance + .dx12 + .as_ref() + .map(|inst| HalSurface { + raw: Arc::new(unsafe { inst.create_surface_from_visual(visual as _) }), + }) + .unwrap(); + AnySurface::new(hal_surface) + }, }; - let mut token = Token::root(); - let id = self.surfaces.prepare(id_in).assign(surface, &mut token); - id.0 + let (id, _) = self.surfaces.prepare::(id_in).assign(surface); + id } #[cfg(all(feature = "dx12", windows))] @@ -647,22 +705,25 @@ impl Global { profiling::scope!("Instance::instance_create_surface_from_surface_handle"); let surface = Surface { - presentation: None, - #[cfg(all(feature = "vulkan", not(target_arch = "wasm32")))] - vulkan: None, - #[cfg(all(feature = "dx12", windows))] - dx12: self.instance.dx12.as_ref().map(|inst| HalSurface { - raw: unsafe { inst.create_surface_from_surface_handle(surface_handle) }, - }), - #[cfg(all(feature = "dx11", windows))] - dx11: None, - #[cfg(feature = "gles")] - gl: None, + presentation: Mutex::new(None), + info: ResourceInfo::new(""), + raw: { + let hal_surface: HalSurface = self + .instance + .dx12 + .as_ref() + .map(|inst| HalSurface { + raw: Arc::new(unsafe { + inst.create_surface_from_surface_handle(surface_handle) + }), + }) + .unwrap(); + AnySurface::new(hal_surface) + }, }; - let mut token = Token::root(); - let id = self.surfaces.prepare(id_in).assign(surface, &mut token); - id.0 + let (id, _) = self.surfaces.prepare::(id_in).assign(surface); + id } #[cfg(all(feature = "dx12", windows))] @@ -677,58 +738,64 @@ impl Global { profiling::scope!("Instance::instance_create_surface_from_swap_chain_panel"); let surface = Surface { - presentation: None, - #[cfg(all(feature = "vulkan", not(target_arch = "wasm32")))] - vulkan: None, - #[cfg(all(feature = "dx12", windows))] - dx12: self.instance.dx12.as_ref().map(|inst| HalSurface { - raw: unsafe { inst.create_surface_from_swap_chain_panel(swap_chain_panel as _) }, - }), - #[cfg(all(feature = "dx11", windows))] - dx11: None, - #[cfg(feature = "gles")] - gl: None, + presentation: Mutex::new(None), + info: ResourceInfo::new(""), + raw: { + let hal_surface: HalSurface = self + .instance + .dx12 + .as_ref() + .map(|inst| HalSurface { + raw: Arc::new(unsafe { + inst.create_surface_from_swap_chain_panel(swap_chain_panel as _) + }), + }) + .unwrap(); + AnySurface::new(hal_surface) + }, }; - let mut token = Token::root(); - let id = self.surfaces.prepare(id_in).assign(surface, &mut token); - id.0 + let (id, _) = self.surfaces.prepare::(id_in).assign(surface); + id } pub fn surface_drop(&self, id: SurfaceId) { profiling::scope!("Surface::drop"); - log::trace!("Surface::drop {id:?}"); - let mut token = Token::root(); - let (surface, _) = self.surfaces.unregister(id, &mut token); - let mut surface = surface.unwrap(); + log::info!("Surface::drop {id:?}"); fn unconfigure( global: &Global, - surface: &mut HalSurface, + surface: &AnySurface, present: &Presentation, ) { let hub = HalApi::hub(global); - hub.surface_unconfigure(present.device_id.value, surface); + if let Some(hal_surface) = surface.downcast_ref::() { + if let Some(device) = present.device.downcast_ref::() { + hub.surface_unconfigure(device, hal_surface); + } + } } - if let Some(present) = surface.presentation.take() { - match present.backend() { + let surface = self.surfaces.unregister(id); + if let Ok(surface) = Arc::try_unwrap(surface.unwrap()) { + if let Some(present) = surface.presentation.lock().take() { #[cfg(all(feature = "vulkan", not(target_arch = "wasm32")))] - Backend::Vulkan => unconfigure(self, surface.vulkan.as_mut().unwrap(), &present), + unconfigure::<_, hal::api::Vulkan>(self, &surface.raw, &present); #[cfg(all(feature = "metal", any(target_os = "macos", target_os = "ios")))] - Backend::Metal => unconfigure(self, surface.metal.as_mut().unwrap(), &present), + unconfigure::<_, hal::api::Metal>(self, &surface.raw, &present); #[cfg(all(feature = "dx12", windows))] - Backend::Dx12 => unconfigure(self, surface.dx12.as_mut().unwrap(), &present), + unconfigure::<_, hal::api::Dx12>(self, &surface.raw, &present); #[cfg(all(feature = "dx11", windows))] - Backend::Dx11 => unconfigure(self, surface.dx11.as_mut().unwrap(), &present), + unconfigure::<_, hal::api::Dx11>(self, &surface.raw, &present); #[cfg(feature = "gles")] - Backend::Gl => unconfigure(self, surface.gl.as_mut().unwrap(), &present), - _ => unreachable!(), + unconfigure::<_, hal::api::Gles>(self, &surface.raw, &present); } - } - self.instance.destroy_surface(surface); + self.instance.destroy_surface(surface); + } else { + panic!("Surface cannot be destroyed because is still in use"); + } } fn enumerate( @@ -749,17 +816,13 @@ impl Global { profiling::scope!("enumerating", &*format!("{:?}", A::VARIANT)); let hub = HalApi::hub(self); - let mut token = Token::root(); let hal_adapters = unsafe { inst.enumerate_adapters() }; for raw in hal_adapters { let adapter = Adapter::new(raw); log::info!("Adapter {:?} {:?}", A::VARIANT, adapter.raw.info); - let id = hub - .adapters - .prepare(id_backend.clone()) - .assign(adapter, &mut token); - list.push(id.0); + let (id, _) = hub.adapters.prepare::(id_backend).assign(adapter); + list.push(id); } } @@ -805,14 +868,13 @@ impl Global { None } None => { - let mut token = Token::root(); let adapter = Adapter::new(list.swap_remove(*selected)); log::info!("Adapter {:?} {:?}", A::VARIANT, adapter.raw.info); - let id = HalApi::hub(self) + let (id, _) = HalApi::hub(self) .adapters - .prepare(new_id.unwrap()) - .assign(adapter, &mut token); - Some(id.0) + .prepare::(new_id.unwrap()) + .assign(adapter); + Some(id) } } } @@ -825,7 +887,7 @@ impl Global { profiling::scope!("Instance::pick_adapter"); log::trace!("Instance::pick_adapter"); - fn gather( + fn gather( _: A, instance: Option<&A::Instance>, inputs: &AdapterInputs, @@ -859,16 +921,15 @@ impl Global { } } - let mut token = Token::root(); - let (surface_guard, _) = self.surfaces.read(&mut token); let compatible_surface = desc .compatible_surface .map(|id| { - surface_guard + self.surfaces .get(id) .map_err(|_| RequestAdapterError::InvalidSurface(id)) }) .transpose()?; + let compatible_surface = compatible_surface.as_ref().map(|surface| surface.as_ref()); let mut device_types = Vec::new(); #[cfg(all(feature = "vulkan", not(target_arch = "wasm32")))] @@ -917,9 +978,6 @@ impl Global { &mut device_types, ); - // need to free the token to be used by `select` - drop(surface_guard); - drop(token); if device_types.is_empty() { return Err(RequestAdapterError::NotFound); } @@ -1007,22 +1065,24 @@ impl Global { ) -> AdapterId { profiling::scope!("Instance::create_adapter_from_hal"); - let mut token = Token::root(); - let fid = A::hub(self).adapters.prepare(input); + let fid = A::hub(self).adapters.prepare::(input); - match A::VARIANT { - #[cfg(all(feature = "vulkan", not(target_arch = "wasm32")))] - Backend::Vulkan => fid.assign(Adapter::new(hal_adapter), &mut token).0, - #[cfg(all(feature = "metal", any(target_os = "macos", target_os = "ios")))] - Backend::Metal => fid.assign(Adapter::new(hal_adapter), &mut token).0, - #[cfg(all(feature = "dx12", windows))] - Backend::Dx12 => fid.assign(Adapter::new(hal_adapter), &mut token).0, - #[cfg(all(feature = "dx11", windows))] - Backend::Dx11 => fid.assign(Adapter::new(hal_adapter), &mut token).0, - #[cfg(feature = "gles")] - Backend::Gl => fid.assign(Adapter::new(hal_adapter), &mut token).0, - _ => unreachable!(), - } + let (id, _adapter): (crate::id::Id>, Arc>) = + match A::VARIANT { + #[cfg(all(feature = "vulkan", not(target_arch = "wasm32")))] + Backend::Vulkan => fid.assign(Adapter::new(hal_adapter)), + #[cfg(all(feature = "metal", any(target_os = "macos", target_os = "ios")))] + Backend::Metal => fid.assign(Adapter::new(hal_adapter)), + #[cfg(all(feature = "dx12", windows))] + Backend::Dx12 => fid.assign(Adapter::new(hal_adapter)), + #[cfg(all(feature = "dx11", windows))] + Backend::Dx11 => fid.assign(Adapter::new(hal_adapter)), + #[cfg(feature = "gles")] + Backend::Gl => fid.assign(Adapter::new(hal_adapter)), + _ => unreachable!(), + }; + log::info!("Created Adapter {:?}", id); + id } pub fn adapter_get_info( @@ -1030,9 +1090,8 @@ impl Global { adapter_id: AdapterId, ) -> Result { let hub = A::hub(self); - let mut token = Token::root(); - let (adapter_guard, _) = hub.adapters.read(&mut token); - adapter_guard + + hub.adapters .get(adapter_id) .map(|adapter| adapter.raw.info.clone()) .map_err(|_| InvalidAdapter) @@ -1044,9 +1103,8 @@ impl Global { format: wgt::TextureFormat, ) -> Result { let hub = A::hub(self); - let mut token = Token::root(); - let (adapter_guard, _) = hub.adapters.read(&mut token); - adapter_guard + + hub.adapters .get(adapter_id) .map(|adapter| adapter.get_texture_format_features(format)) .map_err(|_| InvalidAdapter) @@ -1057,9 +1115,8 @@ impl Global { adapter_id: AdapterId, ) -> Result { let hub = A::hub(self); - let mut token = Token::root(); - let (adapter_guard, _) = hub.adapters.read(&mut token); - adapter_guard + + hub.adapters .get(adapter_id) .map(|adapter| adapter.raw.features) .map_err(|_| InvalidAdapter) @@ -1070,9 +1127,8 @@ impl Global { adapter_id: AdapterId, ) -> Result { let hub = A::hub(self); - let mut token = Token::root(); - let (adapter_guard, _) = hub.adapters.read(&mut token); - adapter_guard + + hub.adapters .get(adapter_id) .map(|adapter| adapter.raw.capabilities.limits.clone()) .map_err(|_| InvalidAdapter) @@ -1083,9 +1139,8 @@ impl Global { adapter_id: AdapterId, ) -> Result { let hub = A::hub(self); - let mut token = Token::root(); - let (adapter_guard, _) = hub.adapters.read(&mut token); - adapter_guard + + hub.adapters .get(adapter_id) .map(|adapter| adapter.raw.capabilities.downlevel.clone()) .map_err(|_| InvalidAdapter) @@ -1096,9 +1151,8 @@ impl Global { adapter_id: AdapterId, ) -> Result { let hub = A::hub(self); - let mut token = Token::root(); - let (adapter_guard, _) = hub.adapters.read(&mut token); - let adapter = adapter_guard.get(adapter_id).map_err(|_| InvalidAdapter)?; + + let adapter = hub.adapters.get(adapter_id).map_err(|_| InvalidAdapter)?; Ok(unsafe { adapter.raw.adapter.get_presentation_timestamp() }) } @@ -1108,16 +1162,15 @@ impl Global { log::trace!("Adapter::drop {adapter_id:?}"); let hub = A::hub(self); - let mut token = Token::root(); - let (mut adapter_guard, _) = hub.adapters.write(&mut token); + let mut adapters_locked = hub.adapters.write(); - let free = match adapter_guard.get_mut(adapter_id) { - Ok(adapter) => adapter.life_guard.ref_count.take().unwrap().load() == 1, + let free = match adapters_locked.get(adapter_id) { + Ok(adapter) => Arc::strong_count(adapter) == 1, Err(_) => true, }; if free { hub.adapters - .unregister_locked(adapter_id, &mut *adapter_guard); + .unregister_locked(adapter_id, &mut *adapters_locked); } } } @@ -1128,32 +1181,43 @@ impl Global { adapter_id: AdapterId, desc: &DeviceDescriptor, trace_path: Option<&std::path::Path>, - id_in: Input, - ) -> (DeviceId, Option) { + device_id_in: Input, + queue_id_in: Input, + ) -> (DeviceId, QueueId, Option) { profiling::scope!("Adapter::request_device"); log::trace!("Adapter::request_device"); let hub = A::hub(self); - let mut token = Token::root(); - let fid = hub.devices.prepare(id_in); + let device_fid = hub.devices.prepare::(device_id_in); + let queue_fid = hub.queues.prepare::(queue_id_in); let error = loop { - let (adapter_guard, mut token) = hub.adapters.read(&mut token); - let adapter = match adapter_guard.get(adapter_id) { + let adapter = match hub.adapters.get(adapter_id) { Ok(adapter) => adapter, Err(_) => break RequestDeviceError::InvalidAdapter, }; - let device = - match adapter.create_device(adapter_id, desc, self.instance.flags, trace_path) { - Ok(device) => device, + let (device, mut queue) = + match adapter.create_device_and_queue(desc, self.instance.flags, trace_path) { + Ok((device, queue)) => (device, queue), Err(e) => break e, }; - let id = fid.assign(device, &mut token); - return (id.0, None); + let (device_id, _) = device_fid.assign(device); + log::info!("Created Device {:?}", device_id); + + let device = hub.devices.get(device_id).unwrap(); + queue.device = Some(device.clone()); + + let (queue_id, _) = queue_fid.assign(queue); + log::info!("Created Queue {:?}", queue_id); + + device.queue_id.write().replace(queue_id); + + return (device_id, queue_id, None); }; - let id = fid.assign_error(desc.label.borrow_or_default(), &mut token); - (id, Some(error)) + let device_id = device_fid.assign_error(desc.label.borrow_or_default()); + let queue_id = queue_fid.assign_error(desc.label.borrow_or_default()); + (device_id, queue_id, Some(error)) } /// # Safety @@ -1163,25 +1227,24 @@ impl Global { pub unsafe fn create_device_from_hal( &self, adapter_id: AdapterId, - hal_device: hal::OpenDevice, + hal_device: OpenDevice, desc: &DeviceDescriptor, trace_path: Option<&std::path::Path>, - id_in: Input, - ) -> (DeviceId, Option) { - profiling::scope!("Adapter::create_device_from_hal"); + device_id_in: Input, + queue_id_in: Input, + ) -> (DeviceId, QueueId, Option) { + profiling::scope!("Global::create_device_from_hal"); let hub = A::hub(self); - let mut token = Token::root(); - let fid = hub.devices.prepare(id_in); + let devices_fid = hub.devices.prepare::(device_id_in); + let queues_fid = hub.queues.prepare::(queue_id_in); let error = loop { - let (adapter_guard, mut token) = hub.adapters.read(&mut token); - let adapter = match adapter_guard.get(adapter_id) { + let adapter = match hub.adapters.get(adapter_id) { Ok(adapter) => adapter, Err(_) => break RequestDeviceError::InvalidAdapter, }; - let device = match adapter.create_device_from_hal( - adapter_id, + let (device, mut queue) = match adapter.create_device_and_queue_from_hal( hal_device, desc, self.instance.flags, @@ -1190,12 +1253,23 @@ impl Global { Ok(device) => device, Err(e) => break e, }; - let id = fid.assign(device, &mut token); - return (id.0, None); + let (device_id, _) = devices_fid.assign(device); + log::info!("Created Device {:?}", device_id); + + let device = hub.devices.get(device_id).unwrap(); + queue.device = Some(device.clone()); + + let (queue_id, _) = queues_fid.assign(queue); + log::info!("Created Queue {:?}", queue_id); + + device.queue_id.write().replace(queue_id); + + return (device_id, queue_id, None); }; - let id = fid.assign_error(desc.label.borrow_or_default(), &mut token); - (id, Some(error)) + let device_id = devices_fid.assign_error(desc.label.borrow_or_default()); + let queue_id = queues_fid.assign_error(desc.label.borrow_or_default()); + (device_id, queue_id, Some(error)) } } diff --git a/third_party/rust/wgpu-core/src/lib.rs b/third_party/rust/wgpu-core/src/lib.rs index ad145aeb2bea1..6a3377a13212c 100644 --- a/third_party/rust/wgpu-core/src/lib.rs +++ b/third_party/rust/wgpu-core/src/lib.rs @@ -48,6 +48,7 @@ clippy::pattern_type_mismatch, )] +pub mod any_surface; pub mod binding_model; pub mod command; mod conv; @@ -70,9 +71,7 @@ mod validation; pub use hal::{api, MAX_BIND_GROUPS, MAX_COLOR_ATTACHMENTS, MAX_VERTEX_BUFFERS}; -use atomic::{AtomicUsize, Ordering}; - -use std::{borrow::Cow, os::raw::c_char, ptr, sync::atomic}; +use std::{borrow::Cow, os::raw::c_char}; /// The index of a queue submission. /// @@ -114,155 +113,6 @@ pub fn hal_label(opt: Option<&str>, flags: wgt::InstanceFlags) -> Option<&str> { opt } -/// Reference count object that is 1:1 with each reference. -/// -/// All the clones of a given `RefCount` point to the same -/// heap-allocated atomic reference count. When the count drops to -/// zero, only the count is freed. No other automatic cleanup takes -/// place; this is just a reference count, not a smart pointer. -/// -/// `RefCount` values are created only by [`LifeGuard::new`] and by -/// `Clone`, so every `RefCount` is implicitly tied to some -/// [`LifeGuard`]. -#[derive(Debug)] -struct RefCount(ptr::NonNull); - -unsafe impl Send for RefCount {} -unsafe impl Sync for RefCount {} - -impl RefCount { - const MAX: usize = 1 << 24; - - /// Construct a new `RefCount`, with an initial count of 1. - fn new() -> RefCount { - let bx = Box::new(AtomicUsize::new(1)); - Self(unsafe { ptr::NonNull::new_unchecked(Box::into_raw(bx)) }) - } - - fn load(&self) -> usize { - unsafe { self.0.as_ref() }.load(Ordering::Acquire) - } -} - -impl Clone for RefCount { - fn clone(&self) -> Self { - let old_size = unsafe { self.0.as_ref() }.fetch_add(1, Ordering::AcqRel); - assert!(old_size < Self::MAX); - Self(self.0) - } -} - -impl Drop for RefCount { - fn drop(&mut self) { - unsafe { - if self.0.as_ref().fetch_sub(1, Ordering::AcqRel) == 1 { - drop(Box::from_raw(self.0.as_ptr())); - } - } - } -} - -/// Reference count object that tracks multiple references. -/// Unlike `RefCount`, it's manually inc()/dec() called. -#[derive(Debug)] -struct MultiRefCount(AtomicUsize); - -impl MultiRefCount { - fn new() -> Self { - Self(AtomicUsize::new(1)) - } - - fn inc(&self) { - self.0.fetch_add(1, Ordering::AcqRel); - } - - fn dec_and_check_empty(&self) -> bool { - self.0.fetch_sub(1, Ordering::AcqRel) == 1 - } -} - -/// Information needed to decide when it's safe to free some wgpu-core -/// resource. -/// -/// Each type representing a `wgpu-core` resource, like [`Device`], -/// [`Buffer`], etc., contains a `LifeGuard` which indicates whether -/// it is safe to free. -/// -/// A resource may need to be retained for any of several reasons: -/// -/// - The user may hold a reference to it (via a `wgpu::Buffer`, say). -/// -/// - Other resources may depend on it (a texture view's backing -/// texture, for example). -/// -/// - It may be used by commands sent to the GPU that have not yet -/// finished execution. -/// -/// [`Device`]: device::Device -/// [`Buffer`]: resource::Buffer -#[derive(Debug)] -pub struct LifeGuard { - /// `RefCount` for the user's reference to this resource. - /// - /// When the user first creates a `wgpu-core` resource, this `RefCount` is - /// created along with the resource's `LifeGuard`. When the user drops the - /// resource, we swap this out for `None`. Note that the resource may - /// still be held alive by other resources. - /// - /// Any `Stored` value holds a clone of this `RefCount` along with the id - /// of a `T` resource. - ref_count: Option, - - /// The index of the last queue submission in which the resource - /// was used. - /// - /// Each queue submission is fenced and assigned an index number - /// sequentially. Thus, when a queue submission completes, we know any - /// resources used in that submission and any lower-numbered submissions are - /// no longer in use by the GPU. - submission_index: AtomicUsize, - - /// The `label` from the descriptor used to create the resource. - #[cfg(debug_assertions)] - pub(crate) label: String, -} - -impl LifeGuard { - #[allow(unused_variables)] - fn new(label: &str) -> Self { - Self { - ref_count: Some(RefCount::new()), - submission_index: AtomicUsize::new(0), - #[cfg(debug_assertions)] - label: label.to_string(), - } - } - - fn add_ref(&self) -> RefCount { - self.ref_count.clone().unwrap() - } - - /// Record that this resource will be used by the queue submission with the - /// given index. - /// - /// Returns `true` if the resource is still held by the user. - fn use_at(&self, submit_index: SubmissionIndex) -> bool { - self.submission_index - .store(submit_index as _, Ordering::Release); - self.ref_count.is_some() - } - - fn life_count(&self) -> SubmissionIndex { - self.submission_index.load(Ordering::Acquire) as _ - } -} - -#[derive(Clone, Debug)] -struct Stored { - value: id::Valid, - ref_count: RefCount, -} - const DOWNLEVEL_WARNING_MESSAGE: &str = "The underlying API or device in use does not \ support enough features to be a fully compliant implementation of WebGPU. A subset of the features can still be used. \ If you are running this program on native and not in a browser and wish to limit the features you use to the supported subset, \ @@ -399,7 +249,7 @@ define_backend_caller! { gfx_if_gles, gfx_if_gles_hidden, "gles" if feature = "g /// /// ```ignore /// impl<...> Global<...> { -/// pub fn device_create_buffer(&self, ...) -> ... +/// pub fn device_create_buffer(&self, ...) -> ... /// { ... } /// } /// ``` @@ -407,7 +257,7 @@ define_backend_caller! { gfx_if_gles, gfx_if_gles_hidden, "gles" if feature = "g /// That `gfx_select!` call uses `device_id`'s backend to select the right /// backend type `A` for a call to `Global::device_create_buffer`. /// -/// However, there's nothing about this macro that is specific to `global::Global`. +/// However, there's nothing about this macro that is specific to `hub::Global`. /// For example, Firefox's embedding of `wgpu_core` defines its own types with /// methods that take `hal::Api` type parameters. Firefox uses `gfx_select!` to /// dynamically dispatch to the right specialization based on the resource's id. diff --git a/third_party/rust/wgpu-core/src/pipeline.rs b/third_party/rust/wgpu-core/src/pipeline.rs index bfab15b2f5e30..e02cb45786a70 100644 --- a/third_party/rust/wgpu-core/src/pipeline.rs +++ b/third_party/rust/wgpu-core/src/pipeline.rs @@ -1,13 +1,16 @@ +#[cfg(feature = "trace")] +use crate::device::trace; use crate::{ - binding_model::{CreateBindGroupLayoutError, CreatePipelineLayoutError}, + binding_model::{CreateBindGroupLayoutError, CreatePipelineLayoutError, PipelineLayout}, command::ColorAttachmentError, - device::{DeviceError, MissingDownlevelFlags, MissingFeatures, RenderPassContext}, - id::{DeviceId, PipelineLayoutId, ShaderModuleId}, - resource::Resource, - validation, Label, LifeGuard, Stored, + device::{Device, DeviceError, MissingDownlevelFlags, MissingFeatures, RenderPassContext}, + hal_api::HalApi, + id::{ComputePipelineId, PipelineLayoutId, RenderPipelineId, ShaderModuleId}, + resource::{Resource, ResourceInfo, ResourceType}, + validation, Label, }; use arrayvec::ArrayVec; -use std::{borrow::Cow, error::Error, fmt, marker::PhantomData, num::NonZeroU32}; +use std::{borrow::Cow, error::Error, fmt, marker::PhantomData, num::NonZeroU32, sync::Arc}; use thiserror::Error; /// Information about buffer bindings, which @@ -40,26 +43,53 @@ pub struct ShaderModuleDescriptor<'a> { } #[derive(Debug)] -pub struct ShaderModule { - pub(crate) raw: A::ShaderModule, - pub(crate) device_id: Stored, +pub struct ShaderModule { + pub(crate) raw: Option, + pub(crate) device: Arc>, pub(crate) interface: Option, + pub(crate) info: ResourceInfo, #[cfg(debug_assertions)] pub(crate) label: String, } -impl Resource for ShaderModule { - const TYPE: &'static str = "ShaderModule"; +impl Drop for ShaderModule { + fn drop(&mut self) { + log::info!("Destroying ShaderModule {:?}", self.info.label()); + if let Some(raw) = self.raw.take() { + #[cfg(feature = "trace")] + if let Some(ref mut trace) = *self.device.trace.lock() { + trace.add(trace::Action::DestroyShaderModule(self.info.id())); + } + unsafe { + use hal::Device; + self.device.raw().destroy_shader_module(raw); + } + } + } +} + +impl Resource for ShaderModule { + const TYPE: ResourceType = "ShaderModule"; - fn life_guard(&self) -> &LifeGuard { - unreachable!() + fn as_info(&self) -> &ResourceInfo { + &self.info } - fn label(&self) -> &str { + fn as_info_mut(&mut self) -> &mut ResourceInfo { + &mut self.info + } + + fn label(&self) -> String { #[cfg(debug_assertions)] - return &self.label; + return self.label.clone(); #[cfg(not(debug_assertions))] - return ""; + return String::new(); + } +} + +impl ShaderModule { + pub(crate) fn raw(&self) -> &A::ShaderModule { + self.raw.as_ref().unwrap() } } @@ -212,19 +242,42 @@ pub enum CreateComputePipelineError { } #[derive(Debug)] -pub struct ComputePipeline { - pub(crate) raw: A::ComputePipeline, - pub(crate) layout_id: Stored, - pub(crate) device_id: Stored, +pub struct ComputePipeline { + pub(crate) raw: Option, + pub(crate) layout: Arc>, + pub(crate) device: Arc>, + pub(crate) _shader_module: Arc>, pub(crate) late_sized_buffer_groups: ArrayVec, - pub(crate) life_guard: LifeGuard, + pub(crate) info: ResourceInfo, } -impl Resource for ComputePipeline { - const TYPE: &'static str = "ComputePipeline"; +impl Drop for ComputePipeline { + fn drop(&mut self) { + log::info!("Destroying ComputePipeline {:?}", self.info.label()); + if let Some(raw) = self.raw.take() { + unsafe { + use hal::Device; + self.device.raw().destroy_compute_pipeline(raw); + } + } + } +} + +impl Resource for ComputePipeline { + const TYPE: ResourceType = "ComputePipeline"; - fn life_guard(&self) -> &LifeGuard { - &self.life_guard + fn as_info(&self) -> &ResourceInfo { + &self.info + } + + fn as_info_mut(&mut self) -> &mut ResourceInfo { + &mut self.info + } +} + +impl ComputePipeline { + pub(crate) fn raw(&self) -> &A::ComputePipeline { + self.raw.as_ref().unwrap() } } @@ -426,22 +479,45 @@ impl Default for VertexStep { } #[derive(Debug)] -pub struct RenderPipeline { - pub(crate) raw: A::RenderPipeline, - pub(crate) layout_id: Stored, - pub(crate) device_id: Stored, +pub struct RenderPipeline { + pub(crate) raw: Option, + pub(crate) device: Arc>, + pub(crate) layout: Arc>, + pub(crate) _shader_modules: Vec>>, pub(crate) pass_context: RenderPassContext, pub(crate) flags: PipelineFlags, pub(crate) strip_index_format: Option, pub(crate) vertex_steps: Vec, pub(crate) late_sized_buffer_groups: ArrayVec, - pub(crate) life_guard: LifeGuard, + pub(crate) info: ResourceInfo, } -impl Resource for RenderPipeline { - const TYPE: &'static str = "RenderPipeline"; +impl Drop for RenderPipeline { + fn drop(&mut self) { + log::info!("Destroying RenderPipeline {:?}", self.info.label()); + if let Some(raw) = self.raw.take() { + unsafe { + use hal::Device; + self.device.raw().destroy_render_pipeline(raw); + } + } + } +} + +impl Resource for RenderPipeline { + const TYPE: ResourceType = "RenderPipeline"; + + fn as_info(&self) -> &ResourceInfo { + &self.info + } + + fn as_info_mut(&mut self) -> &mut ResourceInfo { + &mut self.info + } +} - fn life_guard(&self) -> &LifeGuard { - &self.life_guard +impl RenderPipeline { + pub(crate) fn raw(&self) -> &A::RenderPipeline { + self.raw.as_ref().unwrap() } } diff --git a/third_party/rust/wgpu-core/src/present.rs b/third_party/rust/wgpu-core/src/present.rs index 52ac55bcf122e..7d0b980574f58 100644 --- a/third_party/rust/wgpu-core/src/present.rs +++ b/third_party/rust/wgpu-core/src/present.rs @@ -9,24 +9,29 @@ When this texture is presented, we remove it from the device tracker as well as extract it from the hub. !*/ -use std::borrow::Borrow; +use std::{ + borrow::Borrow, + sync::atomic::{AtomicBool, Ordering}, +}; #[cfg(feature = "trace")] use crate::device::trace::Action; use crate::{ conv, + device::any_device::AnyDevice, device::{DeviceError, MissingDownlevelFlags, WaitIdleError}, global::Global, hal_api::HalApi, hal_label, - hub::Token, - id::{DeviceId, SurfaceId, TextureId, Valid}, + id::{SurfaceId, TextureId}, identity::{GlobalIdentityHandlerFactory, Input}, init_tracker::TextureInitTracker, - resource, track, LifeGuard, Stored, + resource::{self, ResourceInfo}, + track, }; use hal::{Queue as _, Surface as _}; +use parking_lot::RwLock; use thiserror::Error; use wgt::SurfaceStatus as Status; @@ -35,17 +40,11 @@ pub const DESIRED_NUM_FRAMES: u32 = 3; #[derive(Debug)] pub(crate) struct Presentation { - pub(crate) device_id: Stored, + pub(crate) device: AnyDevice, pub(crate) config: wgt::SurfaceConfiguration>, #[allow(unused)] pub(crate) num_frames: u32, - pub(crate) acquired_texture: Option>, -} - -impl Presentation { - pub(crate) fn backend(&self) -> wgt::Backend { - crate::id::TypedId::unzip(self.device_id.value.0).2 - } + pub(crate) acquired_texture: Option, } #[derive(Clone, Debug, Error)] @@ -127,29 +126,31 @@ impl Global { profiling::scope!("SwapChain::get_next_texture"); let hub = A::hub(self); - let mut token = Token::root(); - let fid = hub.textures.prepare(texture_id_in); - let (mut surface_guard, mut token) = self.surfaces.write(&mut token); - let surface = surface_guard - .get_mut(surface_id) + let fid = hub.textures.prepare::(texture_id_in); + + let surface = self + .surfaces + .get(surface_id) .map_err(|_| SurfaceError::Invalid)?; - let (device_guard, mut token) = hub.devices.read(&mut token); - let (device, config) = match surface.presentation { - Some(ref present) => { - let device = &device_guard[present.device_id.value]; - if !device.is_valid() { - return Err(DeviceError::Lost.into()); + let (device, config) = if let Some(ref present) = *surface.presentation.lock() { + match present.device.downcast_clone::() { + Some(device) => { + if !device.is_valid() { + return Err(DeviceError::Lost.into()); + } + (device, present.config.clone()) } - (device, present.config.clone()) + None => return Err(SurfaceError::NotConfigured), } - None => return Err(SurfaceError::NotConfigured), + } else { + return Err(SurfaceError::NotConfigured); }; #[cfg(feature = "trace")] - if let Some(ref trace) = device.trace { - trace.lock().add(Action::GetSurfaceTexture { + if let Some(ref mut trace) = *device.trace.lock() { + trace.add(Action::GetSurfaceTexture { id: fid.id(), parent_id: surface_id, }); @@ -157,7 +158,7 @@ impl Global { #[cfg(not(feature = "trace"))] let _ = device; - let suf = A::get_surface_mut(surface); + let suf = A::get_surface(surface.as_ref()); let (texture_id, status) = match unsafe { suf.unwrap() .raw @@ -166,6 +167,26 @@ impl Global { ))) } { Ok(Some(ast)) => { + let texture_desc = wgt::TextureDescriptor { + label: (), + size: wgt::Extent3d { + width: config.width, + height: config.height, + depth_or_array_layers: 1, + }, + sample_count: 1, + mip_level_count: 1, + format: config.format, + dimension: wgt::TextureDimension::D2, + usage: config.usage, + view_formats: config.view_formats, + }; + let hal_usage = conv::map_texture_usage(config.usage, config.format.into()); + let format_features = wgt::TextureFormatFeatures { + allowed_usages: wgt::TextureUsages::RENDER_ATTACHMENT, + flags: wgt::TextureFormatFeatureFlags::MULTISAMPLE_X4 + | wgt::TextureFormatFeatureFlags::MULTISAMPLE_RESOLVE, + }; let clear_view_desc = hal::TextureViewDescriptor { label: hal_label( Some("(wgpu internal) clear surface texture view"), @@ -176,85 +197,60 @@ impl Global { usage: hal::TextureUses::COLOR_TARGET, range: wgt::ImageSubresourceRange::default(), }; - let mut clear_views = smallvec::SmallVec::new(); - clear_views.push( - unsafe { - hal::Device::create_texture_view( - &device.raw, - ast.texture.borrow(), - &clear_view_desc, - ) - } - .map_err(DeviceError::from)?, - ); + let clear_view = unsafe { + hal::Device::create_texture_view( + device.raw(), + ast.texture.borrow(), + &clear_view_desc, + ) + } + .map_err(DeviceError::from)?; - let present = surface.presentation.as_mut().unwrap(); + let mut presentation = surface.presentation.lock(); + let present = presentation.as_mut().unwrap(); let texture = resource::Texture { - inner: resource::TextureInner::Surface { - raw: ast.texture, - parent_id: Valid(surface_id), - has_work: false, - }, - device_id: present.device_id.clone(), - desc: wgt::TextureDescriptor { - label: (), - size: wgt::Extent3d { - width: config.width, - height: config.height, - depth_or_array_layers: 1, - }, - sample_count: 1, - mip_level_count: 1, - format: config.format, - dimension: wgt::TextureDimension::D2, - usage: config.usage, - view_formats: config.view_formats, - }, - hal_usage: conv::map_texture_usage(config.usage, config.format.into()), - format_features: wgt::TextureFormatFeatures { - allowed_usages: wgt::TextureUsages::RENDER_ATTACHMENT, - flags: wgt::TextureFormatFeatureFlags::MULTISAMPLE_X4 - | wgt::TextureFormatFeatureFlags::MULTISAMPLE_RESOLVE, - }, - initialization_status: TextureInitTracker::new(1, 1), + inner: RwLock::new(Some(resource::TextureInner::Surface { + raw: Some(ast.texture), + parent_id: surface_id, + has_work: AtomicBool::new(false), + })), + device: device.clone(), + desc: texture_desc, + hal_usage, + format_features, + initialization_status: RwLock::new(TextureInitTracker::new(1, 1)), full_range: track::TextureSelector { layers: 0..1, mips: 0..1, }, - life_guard: LifeGuard::new(""), - clear_mode: resource::TextureClearMode::RenderPass { - clear_views, - is_color: true, - }, + info: ResourceInfo::new(""), + clear_mode: RwLock::new(resource::TextureClearMode::Surface { + clear_view: Some(clear_view), + }), }; - let ref_count = texture.life_guard.add_ref(); - let id = fid.assign(texture, &mut token); + let (id, resource) = fid.assign(texture); + log::info!("Created CURRENT Surface Texture {:?}", id); { // register it in the device tracker as uninitialized let mut trackers = device.trackers.lock(); - trackers.textures.insert_single( - id.0, - ref_count.clone(), - hal::TextureUses::UNINITIALIZED, - ); + trackers + .textures + .insert_single(id, resource, hal::TextureUses::UNINITIALIZED); } if present.acquired_texture.is_some() { return Err(SurfaceError::AlreadyAcquired); } - present.acquired_texture = Some(Stored { - value: id, - ref_count, - }); + present.acquired_texture = Some(id); let status = if ast.suboptimal { Status::Suboptimal } else { Status::Good }; - (Some(id.0), status) + (Some(id), status) } Ok(None) => (None, Status::Timeout), Err(err) => ( @@ -283,27 +279,28 @@ impl Global { profiling::scope!("SwapChain::present"); let hub = A::hub(self); - let mut token = Token::root(); - let (mut surface_guard, mut token) = self.surfaces.write(&mut token); - let surface = surface_guard - .get_mut(surface_id) + let surface = self + .surfaces + .get(surface_id) .map_err(|_| SurfaceError::Invalid)?; - let (mut device_guard, mut token) = hub.devices.write(&mut token); - let present = match surface.presentation { - Some(ref mut present) => present, + let mut presentation = surface.presentation.lock(); + let present = match presentation.as_mut() { + Some(present) => present, None => return Err(SurfaceError::NotConfigured), }; - let device = &mut device_guard[present.device_id.value]; + let device = present.device.downcast_ref::().unwrap(); if !device.is_valid() { return Err(DeviceError::Lost.into()); } + let queue_id = device.queue_id.read().unwrap(); + let queue = hub.queues.get(queue_id).unwrap(); #[cfg(feature = "trace")] - if let Some(ref trace) = device.trace { - trace.lock().add(Action::Present(surface_id)); + if let Some(ref mut trace) = *device.trace.lock() { + trace.add(Action::Present(surface_id)); } let result = { @@ -316,33 +313,40 @@ impl Global { // and now we are moving it away. log::debug!( "Removing swapchain texture {:?} from the device tracker", - texture_id.value + texture_id ); - device.trackers.lock().textures.remove(texture_id.value); + device.trackers.lock().textures.remove(texture_id); - let (texture, _) = hub.textures.unregister(texture_id.value.0, &mut token); + let texture = hub.textures.unregister(texture_id); if let Some(texture) = texture { - texture.clear_mode.destroy_clear_views(&device.raw); + let suf = A::get_surface(&surface); + let mut inner = texture.inner_mut(); + let inner = inner.as_mut().unwrap(); - let suf = A::get_surface_mut(surface); - match texture.inner { + match *inner { resource::TextureInner::Surface { - raw, - parent_id, - has_work, + ref mut raw, + ref parent_id, + ref has_work, } => { - if surface_id != parent_id.0 { + if surface_id != *parent_id { log::error!("Presented frame is from a different surface"); Err(hal::SurfaceError::Lost) - } else if !has_work { + } else if !has_work.load(Ordering::Relaxed) { log::error!("No work has been submitted for this frame"); - unsafe { suf.unwrap().raw.discard_texture(raw) }; + unsafe { suf.unwrap().raw.discard_texture(raw.take().unwrap()) }; Err(hal::SurfaceError::Outdated) } else { - unsafe { device.queue.present(&mut suf.unwrap().raw, raw) } + unsafe { + queue + .raw + .as_ref() + .unwrap() + .present(&suf.unwrap().raw, raw.take().unwrap()) + } } } - resource::TextureInner::Native { .. } => unreachable!(), + _ => unreachable!(), } } else { Err(hal::SurfaceError::Outdated) //TODO? @@ -372,27 +376,25 @@ impl Global { profiling::scope!("SwapChain::discard"); let hub = A::hub(self); - let mut token = Token::root(); - let (mut surface_guard, mut token) = self.surfaces.write(&mut token); - let surface = surface_guard - .get_mut(surface_id) + let surface = self + .surfaces + .get(surface_id) .map_err(|_| SurfaceError::Invalid)?; - let (mut device_guard, mut token) = hub.devices.write(&mut token); - - let present = match surface.presentation { - Some(ref mut present) => present, + let mut presentation = surface.presentation.lock(); + let present = match presentation.as_mut() { + Some(present) => present, None => return Err(SurfaceError::NotConfigured), }; - let device = &mut device_guard[present.device_id.value]; + let device = present.device.downcast_ref::().unwrap(); if !device.is_valid() { return Err(DeviceError::Lost.into()); } #[cfg(feature = "trace")] - if let Some(ref trace) = device.trace { - trace.lock().add(Action::DiscardSurfaceTexture(surface_id)); + if let Some(ref mut trace) = *device.trace.lock() { + trace.add(Action::DiscardSurfaceTexture(surface_id)); } { @@ -405,28 +407,26 @@ impl Global { // and now we are moving it away. log::debug!( "Removing swapchain texture {:?} from the device tracker", - texture_id.value + texture_id ); - device.trackers.lock().textures.remove(texture_id.value); + device.trackers.lock().textures.remove(texture_id); - let (texture, _) = hub.textures.unregister(texture_id.value.0, &mut token); + let texture = hub.textures.unregister(texture_id); if let Some(texture) = texture { - texture.clear_mode.destroy_clear_views(&device.raw); - - let suf = A::get_surface_mut(surface); - match texture.inner { - resource::TextureInner::Surface { - raw, - parent_id, + let suf = A::get_surface(&surface); + match *texture.inner_mut().as_mut().take().as_mut().unwrap() { + &mut resource::TextureInner::Surface { + ref mut raw, + ref parent_id, has_work: _, } => { - if surface_id == parent_id.0 { - unsafe { suf.unwrap().raw.discard_texture(raw) }; + if surface_id == *parent_id { + unsafe { suf.unwrap().raw.discard_texture(raw.take().unwrap()) }; } else { log::warn!("Surface texture is outdated"); } } - resource::TextureInner::Native { .. } => unreachable!(), + _ => unreachable!(), } } } diff --git a/third_party/rust/wgpu-core/src/registry.rs b/third_party/rust/wgpu-core/src/registry.rs index ef47f7d2447d3..3c93bb31cbf38 100644 --- a/third_party/rust/wgpu-core/src/registry.rs +++ b/third_party/rust/wgpu-core/src/registry.rs @@ -1,57 +1,72 @@ -use std::marker::PhantomData; +use std::sync::Arc; use parking_lot::{RwLock, RwLockReadGuard, RwLockWriteGuard}; use wgt::Backend; use crate::{ - hub::{Access, Token}, id, - identity::{IdentityHandler, IdentityHandlerFactory}, + identity::{IdentityHandlerFactory, IdentityManager}, resource::Resource, - storage::Storage, + storage::{Element, InvalidId, Storage}, }; +#[derive(Copy, Clone, Debug, Default, PartialEq, Eq)] +pub struct RegistryReport { + pub num_allocated: usize, + pub num_kept_from_user: usize, + pub num_released_from_user: usize, + pub num_destroyed_from_user: usize, + pub num_error: usize, + pub element_size: usize, +} + +impl RegistryReport { + pub fn is_empty(&self) -> bool { + self.num_allocated + self.num_kept_from_user == 0 + } +} + +/// Registry is the primary holder of each resource type +/// Every resource is now arcanized so the last arc released +/// will in the end free the memory and release the inner raw resource +/// +/// Registry act as the main entry point to keep resource alive +/// when created and released from user land code +/// +/// A resource may still be alive when released from user land code +/// if it's used in active submission or anyway kept alive from +/// any other dependent resource +/// #[derive(Debug)] -pub struct Registry> { - identity: F::Filter, - pub(crate) data: RwLock>, +pub struct Registry> { + identity: Arc>, + storage: RwLock>, backend: Backend, } -impl> Registry { - pub(crate) fn new(backend: Backend, factory: &F) -> Self { +impl> Registry { + pub(crate) fn new>(backend: Backend, factory: &F) -> Self { Self { identity: factory.spawn(), - data: RwLock::new(Storage { - map: Vec::new(), - kind: T::TYPE, - _phantom: PhantomData, - }), + storage: RwLock::new(Storage::new()), backend, } } - pub(crate) fn without_backend(factory: &F, kind: &'static str) -> Self { - Self { - identity: factory.spawn(), - data: RwLock::new(Storage { - map: Vec::new(), - kind, - _phantom: PhantomData, - }), - backend: Backend::Empty, - } + pub(crate) fn without_backend>(factory: &F) -> Self { + Self::new(Backend::Empty, factory) } } #[must_use] -pub(crate) struct FutureId<'a, I: id::TypedId, T> { +pub(crate) struct FutureId<'a, I: id::TypedId, T: Resource> { id: I, + identity: Arc>, data: &'a RwLock>, } -impl FutureId<'_, I, T> { - #[cfg(feature = "trace")] +impl> FutureId<'_, I, T> { + #[allow(dead_code)] pub fn id(&self) -> I { self.id } @@ -60,134 +75,98 @@ impl FutureId<'_, I, T> { self.id } - pub fn assign<'a, A: Access>(self, value: T, _: &'a mut Token) -> id::Valid { - self.data.write().insert(self.id, value); - id::Valid(self.id) + pub fn init(&self, mut value: T) -> Arc { + value.as_info_mut().set_id(self.id, &self.identity); + Arc::new(value) } - pub fn assign_error<'a, A: Access>(self, label: &str, _: &'a mut Token) -> I { + pub fn assign(self, value: T) -> (I, Arc) { + let mut data = self.data.write(); + data.insert(self.id, self.init(value)); + (self.id, data.get(self.id).unwrap().clone()) + } + + pub fn assign_existing(self, value: &Arc) -> I { + let mut data = self.data.write(); + #[cfg(debug_assertions)] + debug_assert!(!data.contains(self.id)); + data.insert(self.id, value.clone()); + self.id + } + + pub fn assign_error(self, label: &str) -> I { self.data.write().insert_error(self.id, label); self.id } } -impl> Registry { - pub(crate) fn prepare( - &self, - id_in: >::Input, - ) -> FutureId { +impl> Registry { + pub(crate) fn prepare(&self, id_in: F::Input) -> FutureId + where + F: IdentityHandlerFactory, + { FutureId { - id: self.identity.process(id_in, self.backend), - data: &self.data, + id: if F::autogenerate_ids() { + self.identity.process(self.backend) + } else { + self.identity.mark_as_used(F::input_to_id(id_in)) + }, + identity: self.identity.clone(), + data: &self.storage, } } - - /// Acquire read access to this `Registry`'s contents. - /// - /// The caller must present a mutable reference to a `Token`, - /// for some type `A` that comes before this `Registry`'s resource - /// type `T` in the lock ordering. A `Token` grants - /// permission to lock any field; see [`Token::root`]. - /// - /// Once the read lock is acquired, return a new `Token`, along - /// with a read guard for this `Registry`'s [`Storage`], which can - /// be indexed by id to get at the actual resources. - /// - /// The borrow checker ensures that the caller cannot again access - /// its `Token` until it has dropped both the guard and the - /// `Token`. - /// - /// See the [`Hub`] type for more details on locking. - /// - /// [`Hub`]: crate::hub::Hub - pub(crate) fn read<'a, A: Access>( - &'a self, - _token: &'a mut Token, - ) -> (RwLockReadGuard<'a, Storage>, Token<'a, T>) { - (self.data.read(), Token::new()) - } - - /// Acquire write access to this `Registry`'s contents. - /// - /// The caller must present a mutable reference to a `Token`, - /// for some type `A` that comes before this `Registry`'s resource - /// type `T` in the lock ordering. A `Token` grants - /// permission to lock any field; see [`Token::root`]. - /// - /// Once the lock is acquired, return a new `Token`, along with - /// a write guard for this `Registry`'s [`Storage`], which can be - /// indexed by id to get at the actual resources. - /// - /// The borrow checker ensures that the caller cannot again access - /// its `Token` until it has dropped both the guard and the - /// `Token`. - /// - /// See the [`Hub`] type for more details on locking. - /// - /// [`Hub`]: crate::hub::Hub - pub(crate) fn write<'a, A: Access>( - &'a self, - _token: &'a mut Token, - ) -> (RwLockWriteGuard<'a, Storage>, Token<'a, T>) { - (self.data.write(), Token::new()) - } - - /// Unregister the resource at `id`. - /// - /// The caller must prove that it already holds a write lock for - /// this `Registry` by passing a mutable reference to this - /// `Registry`'s storage, obtained from the write guard returned - /// by a previous call to [`write`], as the `guard` parameter. - pub fn unregister_locked(&self, id: I, guard: &mut Storage) -> Option { - let value = guard.remove(id); - //Note: careful about the order here! - self.identity.free(id); - //Returning None is legal if it's an error ID - value + pub(crate) fn request(&self) -> FutureId { + FutureId { + id: self.identity.process(self.backend), + identity: self.identity.clone(), + data: &self.storage, + } } - - /// Unregister the resource at `id` and return its value, if any. - /// - /// The caller must present a mutable reference to a `Token`, - /// for some type `A` that comes before this `Registry`'s resource - /// type `T` in the lock ordering. - /// - /// This returns a `Token`, but it's almost useless, because it - /// doesn't return a lock guard to go with it: its only effect is - /// to make the token you passed to this function inaccessible. - /// However, the `Token` can be used to satisfy some functions' - /// bureacratic expectations that you will have one available. - /// - /// The borrow checker ensures that the caller cannot again access - /// its `Token` until it has dropped both the guard and the - /// `Token`. - /// - /// See the [`Hub`] type for more details on locking. - /// - /// [`Hub`]: crate::hub::Hub - pub(crate) fn unregister<'a, A: Access>( - &self, - id: I, - _token: &'a mut Token, - ) -> (Option, Token<'a, T>) { - let value = self.data.write().remove(id); - //Note: careful about the order here! - self.identity.free(id); + pub(crate) fn contains(&self, id: I) -> bool { + self.read().contains(id) + } + pub(crate) fn try_get(&self, id: I) -> Result>, InvalidId> { + self.read().try_get(id).map(|o| o.cloned()) + } + pub(crate) fn get(&self, id: I) -> Result, InvalidId> { + self.read().get(id).map(|v| v.clone()) + } + pub(crate) fn read<'a>(&'a self) -> RwLockReadGuard<'a, Storage> { + self.storage.read() + } + pub(crate) fn write<'a>(&'a self) -> RwLockWriteGuard<'a, Storage> { + self.storage.write() + } + pub fn unregister_locked(&self, id: I, storage: &mut Storage) -> Option> { + storage.remove(id) + } + pub fn force_replace(&self, id: I, mut value: T) { + let mut storage = self.storage.write(); + value.as_info_mut().set_id(id, &self.identity); + storage.force_replace(id, value) + } + pub fn force_replace_with_error(&self, id: I, label: &str) { + let mut storage = self.storage.write(); + storage.remove(id); + storage.insert_error(id, label); + } + pub(crate) fn unregister(&self, id: I) -> Option> { + let value = self.storage.write().remove(id); //Returning None is legal if it's an error ID - (value, Token::new()) + value } pub fn label_for_resource(&self, id: I) -> String { - let guard = self.data.read(); + let guard = self.storage.read(); - let type_name = guard.kind; + let type_name = guard.kind(); match guard.get(id) { Ok(res) => { let label = res.label(); if label.is_empty() { format!("<{}-{:?}>", type_name, id.unzip()) } else { - label.to_string() + label } } Err(_) => format!( @@ -197,4 +176,22 @@ impl> Registry< ), } } + + pub(crate) fn generate_report(&self) -> RegistryReport { + let storage = self.storage.read(); + let mut report = RegistryReport { + element_size: std::mem::size_of::(), + ..Default::default() + }; + report.num_allocated = self.identity.values.lock().count(); + for element in storage.map.iter() { + match *element { + Element::Occupied(..) => report.num_kept_from_user += 1, + Element::Destroyed(..) => report.num_destroyed_from_user += 1, + Element::Vacant => report.num_released_from_user += 1, + Element::Error(..) => report.num_error += 1, + } + } + report + } } diff --git a/third_party/rust/wgpu-core/src/resource.rs b/third_party/rust/wgpu-core/src/resource.rs index 4ff8f539a8fb9..30deff80c2d5d 100644 --- a/third_party/rust/wgpu-core/src/resource.rs +++ b/third_party/rust/wgpu-core/src/resource.rs @@ -1,29 +1,158 @@ +#[cfg(feature = "trace")] +use crate::device::trace; use crate::{ - device::{DeviceError, HostMap, MissingDownlevelFlags, MissingFeatures}, + device::{ + queue, BufferMapPendingClosure, Device, DeviceError, HostMap, MissingDownlevelFlags, + MissingFeatures, + }, global::Global, hal_api::HalApi, - hub::Token, - id::{AdapterId, DeviceId, SurfaceId, TextureId, Valid}, - identity::GlobalIdentityHandlerFactory, + id::{ + AdapterId, BufferId, DeviceId, QuerySetId, SamplerId, StagingBufferId, SurfaceId, + TextureId, TextureViewId, TypedId, + }, + identity::{GlobalIdentityHandlerFactory, IdentityManager}, init_tracker::{BufferInitTracker, TextureInitTracker}, + resource, track::TextureSelector, validation::MissingBufferUsageError, - Label, LifeGuard, RefCount, Stored, + Label, SubmissionIndex, }; +use hal::CommandEncoder; +use parking_lot::{Mutex, RwLock, RwLockReadGuard, RwLockWriteGuard}; use smallvec::SmallVec; use thiserror::Error; +use wgt::WasmNotSendSync; + +use std::{ + borrow::Borrow, + fmt::Debug, + iter, mem, + ops::Range, + ptr::NonNull, + sync::{ + atomic::{AtomicBool, AtomicUsize, Ordering}, + Arc, + }, +}; + +/// Information about the wgpu-core resource. +/// +/// Each type representing a `wgpu-core` resource, like [`Device`], +/// [`Buffer`], etc., contains a `ResourceInfo` which contains +/// its latest submission index and label. +/// +/// A resource may need to be retained for any of several reasons: +/// and any lifetime logic will be handled by `Arc` refcount +/// +/// - The user may hold a reference to it (via a `wgpu::Buffer`, say). +/// +/// - Other resources may depend on it (a texture view's backing +/// texture, for example). +/// +/// - It may be used by commands sent to the GPU that have not yet +/// finished execution. +/// +/// [`Device`]: crate::device::resource::Device +/// [`Buffer`]: crate::resource::Buffer +#[derive(Debug)] +pub struct ResourceInfo { + id: Option, + identity: Option>>, + /// The index of the last queue submission in which the resource + /// was used. + /// + /// Each queue submission is fenced and assigned an index number + /// sequentially. Thus, when a queue submission completes, we know any + /// resources used in that submission and any lower-numbered submissions are + /// no longer in use by the GPU. + submission_index: AtomicUsize, + + /// The `label` from the descriptor used to create the resource. + #[cfg(debug_assertions)] + pub(crate) label: String, +} + +impl Drop for ResourceInfo { + fn drop(&mut self) { + if let Some(identity) = self.identity.as_ref() { + let id = self.id.as_ref().unwrap(); + identity.free(*id); + log::info!("Freeing {:?}", self.label()); + } + } +} + +impl ResourceInfo { + #[allow(unused_variables)] + pub(crate) fn new(label: &str) -> Self { + Self { + id: None, + identity: None, + submission_index: AtomicUsize::new(0), + #[cfg(debug_assertions)] + label: label.to_string(), + } + } + + #[allow(unused_assignments)] + pub(crate) fn label(&self) -> String + where + Id: Debug, + { + let mut label = String::new(); + #[cfg(debug_assertions)] + { + label = format!("[{}] ", self.label); + } + if let Some(id) = self.id.as_ref() { + label.push_str(format!("{:?}", id).as_str()); + } + label + } + + pub(crate) fn id(&self) -> Id { + self.id.unwrap() + } + + pub(crate) fn set_id(&mut self, id: Id, identity: &Arc>) { + self.id = Some(id); + self.identity = Some(identity.clone()); + } + + /// Record that this resource will be used by the queue submission with the + /// given index. + pub(crate) fn use_at(&self, submit_index: SubmissionIndex) { + self.submission_index + .store(submit_index as _, Ordering::Release); + } + + pub(crate) fn submission_index(&self) -> SubmissionIndex { + self.submission_index.load(Ordering::Acquire) as _ + } +} -use std::{borrow::Borrow, ops::Range, ptr::NonNull}; +pub(crate) type ResourceType = &'static str; -pub trait Resource { - const TYPE: &'static str; - fn life_guard(&self) -> &LifeGuard; - fn label(&self) -> &str { +pub trait Resource: 'static + WasmNotSendSync { + const TYPE: ResourceType; + fn as_info(&self) -> &ResourceInfo; + fn as_info_mut(&mut self) -> &mut ResourceInfo; + fn label(&self) -> String { #[cfg(debug_assertions)] - return &self.life_guard().label; + return self.as_info().label.clone(); #[cfg(not(debug_assertions))] - return ""; + return String::new(); + } + fn ref_count(self: &Arc) -> usize { + Arc::strong_count(self) + } + fn is_unique(self: &Arc) -> bool { + self.ref_count() == 1 + } + fn is_equal(&self, other: &Self) -> bool { + self.as_info().id().unzip() == other.as_info().id().unzip() } } @@ -60,15 +189,16 @@ pub enum BufferMapAsyncStatus { InvalidUsageFlags, } -pub(crate) enum BufferMapState { +#[derive(Debug)] +pub(crate) enum BufferMapState { /// Mapped at creation. Init { ptr: NonNull, - stage_buffer: A::Buffer, + stage_buffer: Arc>, needs_flush: bool, }, /// Waiting for GPU to be done before mapping - Waiting(BufferPendingMapping), + Waiting(BufferPendingMapping), /// Mapped Active { ptr: NonNull, @@ -86,7 +216,7 @@ pub(crate) enum BufferMapState { not(target_feature = "atomics") ) ))] -unsafe impl Send for BufferMapState {} +unsafe impl Send for BufferMapState {} #[cfg(any( not(target_arch = "wasm32"), all( @@ -94,7 +224,7 @@ unsafe impl Send for BufferMapState {} not(target_feature = "atomics") ) ))] -unsafe impl Sync for BufferMapState {} +unsafe impl Sync for BufferMapState {} #[repr(C)] pub struct BufferMapCallbackC { @@ -111,10 +241,11 @@ pub struct BufferMapCallbackC { ))] unsafe impl Send for BufferMapCallbackC {} +#[derive(Debug)] pub struct BufferMapCallback { // We wrap this so creating the enum in the C variant can be unsafe, // allowing our call function to be safe. - inner: Option, + inner: BufferMapCallbackInner, } #[cfg(any( @@ -139,10 +270,19 @@ enum BufferMapCallbackInner { C { inner: BufferMapCallbackC }, } +impl Debug for BufferMapCallbackInner { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match *self { + BufferMapCallbackInner::Rust { callback: _ } => f.debug_struct("Rust").finish(), + BufferMapCallbackInner::C { inner: _ } => f.debug_struct("C").finish(), + } + } +} + impl BufferMapCallback { pub fn from_rust(callback: BufferMapCallbackCallback) -> Self { Self { - inner: Some(BufferMapCallbackInner::Rust { callback }), + inner: BufferMapCallbackInner::Rust { callback }, } } @@ -155,17 +295,17 @@ impl BufferMapCallback { /// invoked, which may happen at an unspecified time. pub unsafe fn from_c(inner: BufferMapCallbackC) -> Self { Self { - inner: Some(BufferMapCallbackInner::C { inner }), + inner: BufferMapCallbackInner::C { inner }, } } - pub(crate) fn call(mut self, result: BufferAccessResult) { - match self.inner.take() { - Some(BufferMapCallbackInner::Rust { callback }) => { + pub(crate) fn call(self, result: BufferAccessResult) { + match self.inner { + BufferMapCallbackInner::Rust { callback } => { callback(result); } // SAFETY: the contract of the call to from_c says that this unsafe is sound. - Some(BufferMapCallbackInner::C { inner }) => unsafe { + BufferMapCallbackInner::C { inner } => unsafe { let status = match result { Ok(()) => BufferMapAsyncStatus::Success, Err(BufferAccessError::Device(_)) => BufferMapAsyncStatus::ContextLost, @@ -194,24 +334,14 @@ impl BufferMapCallback { (inner.callback)(status, inner.user_data); }, - None => { - panic!("Map callback invoked twice"); - } - } - } -} - -impl Drop for BufferMapCallback { - fn drop(&mut self) { - if self.inner.is_some() { - panic!("Map callback was leaked"); } } } +#[derive(Debug)] pub struct BufferMapOperation { pub host: HostMap, - pub callback: BufferMapCallback, + pub callback: Option, } #[derive(Clone, Debug, Error)] @@ -263,24 +393,189 @@ pub enum BufferAccessError { } pub type BufferAccessResult = Result<(), BufferAccessError>; -pub(crate) struct BufferPendingMapping { + +#[derive(Debug)] +pub(crate) struct BufferPendingMapping { pub range: Range, pub op: BufferMapOperation, // hold the parent alive while the mapping is active - pub _parent_ref_count: RefCount, + pub _parent_buffer: Arc>, } pub type BufferDescriptor<'a> = wgt::BufferDescriptor>; -pub struct Buffer { +#[derive(Debug)] +pub struct Buffer { pub(crate) raw: Option, - pub(crate) device_id: Stored, + pub(crate) device: Arc>, pub(crate) usage: wgt::BufferUsages, pub(crate) size: wgt::BufferAddress, - pub(crate) initialization_status: BufferInitTracker, - pub(crate) sync_mapped_writes: Option, - pub(crate) life_guard: LifeGuard, - pub(crate) map_state: BufferMapState, + pub(crate) initialization_status: RwLock, + pub(crate) sync_mapped_writes: Mutex>, + pub(crate) info: ResourceInfo, + pub(crate) map_state: Mutex>, +} + +impl Drop for Buffer { + fn drop(&mut self) { + log::info!("Destroying Buffer {:?}", self.info.label()); + if let Some(raw) = self.raw.take() { + unsafe { + use hal::Device; + self.device.raw().destroy_buffer(raw); + } + } + } +} + +impl Buffer { + pub(crate) fn raw(&self) -> &A::Buffer { + self.raw.as_ref().unwrap() + } + + pub(crate) fn buffer_unmap_inner( + self: &Arc, + ) -> Result, BufferAccessError> { + use hal::Device; + + let device = &self.device; + let buffer_id = self.info.id(); + log::debug!("Buffer {:?} map state -> Idle", buffer_id); + match mem::replace(&mut *self.map_state.lock(), resource::BufferMapState::Idle) { + resource::BufferMapState::Init { + ptr, + stage_buffer, + needs_flush, + } => { + #[cfg(feature = "trace")] + if let Some(ref mut trace) = *device.trace.lock() { + let data = trace.make_binary("bin", unsafe { + std::slice::from_raw_parts(ptr.as_ptr(), self.size as usize) + }); + trace.add(trace::Action::WriteBuffer { + id: buffer_id, + data, + range: 0..self.size, + queued: true, + }); + } + let _ = ptr; + if needs_flush { + unsafe { + device + .raw() + .flush_mapped_ranges(stage_buffer.raw(), iter::once(0..self.size)); + } + } + + let raw_buf = self.raw.as_ref().ok_or(BufferAccessError::Destroyed)?; + + self.info + .use_at(device.active_submission_index.load(Ordering::Relaxed) + 1); + let region = wgt::BufferSize::new(self.size).map(|size| hal::BufferCopy { + src_offset: 0, + dst_offset: 0, + size, + }); + let transition_src = hal::BufferBarrier { + buffer: stage_buffer.raw(), + usage: hal::BufferUses::MAP_WRITE..hal::BufferUses::COPY_SRC, + }; + let transition_dst = hal::BufferBarrier { + buffer: raw_buf, + usage: hal::BufferUses::empty()..hal::BufferUses::COPY_DST, + }; + let mut pending_writes = device.pending_writes.lock(); + let pending_writes = pending_writes.as_mut().unwrap(); + let encoder = pending_writes.activate(); + unsafe { + encoder.transition_buffers( + iter::once(transition_src).chain(iter::once(transition_dst)), + ); + if self.size > 0 { + encoder.copy_buffer_to_buffer( + stage_buffer.raw(), + raw_buf, + region.into_iter(), + ); + } + } + pending_writes.consume_temp(queue::TempResource::Buffer(stage_buffer)); + pending_writes.dst_buffers.insert(buffer_id, self.clone()); + } + resource::BufferMapState::Idle => { + return Err(BufferAccessError::NotMapped); + } + resource::BufferMapState::Waiting(pending) => { + return Ok(Some((pending.op, Err(BufferAccessError::MapAborted)))); + } + resource::BufferMapState::Active { ptr, range, host } => { + if host == HostMap::Write { + #[cfg(feature = "trace")] + if let Some(ref mut trace) = *device.trace.lock() { + let size = range.end - range.start; + let data = trace.make_binary("bin", unsafe { + std::slice::from_raw_parts(ptr.as_ptr(), size as usize) + }); + trace.add(trace::Action::WriteBuffer { + id: buffer_id, + data, + range: range.clone(), + queued: false, + }); + } + let _ = (ptr, range); + } + unsafe { + device + .raw() + .unmap_buffer(self.raw()) + .map_err(DeviceError::from)? + }; + } + } + Ok(None) + } + + pub(crate) fn destroy(self: &Arc) -> Result<(), DestroyError> { + let map_closure; + // Restrict the locks to this scope. + { + let device = &self.device; + let buffer_id = self.info.id(); + + map_closure = self.buffer_unmap_inner().unwrap_or(None); + + #[cfg(feature = "trace")] + if let Some(ref mut trace) = *device.trace.lock() { + trace.add(trace::Action::FreeBuffer(buffer_id)); + } + if self.raw.is_none() { + return Err(resource::DestroyError::AlreadyDestroyed); + } + + let temp = queue::TempResource::Buffer(self.clone()); + let mut pending_writes = device.pending_writes.lock(); + let pending_writes = pending_writes.as_mut().unwrap(); + if pending_writes.dst_buffers.contains_key(&buffer_id) { + pending_writes.temp_resources.push(temp); + } else { + let last_submit_index = self.info.submission_index(); + device + .lock_life() + .schedule_resource_destruction(temp, last_submit_index); + } + } + + // Note: outside the scope where locks are held when calling the callback + if let Some((mut operation, status)) = map_closure { + if let Some(callback) = operation.callback.take() { + callback.call(status); + } + } + + Ok(()) + } } #[derive(Clone, Debug, Error)] @@ -302,11 +597,15 @@ pub enum CreateBufferError { MissingDownlevelFlags(#[from] MissingDownlevelFlags), } -impl Resource for Buffer { - const TYPE: &'static str = "Buffer"; +impl Resource for Buffer { + const TYPE: ResourceType = "Buffer"; + + fn as_info(&self) -> &ResourceInfo { + &self.info + } - fn life_guard(&self) -> &LifeGuard { - &self.life_guard + fn as_info_mut(&mut self) -> &mut ResourceInfo { + &mut self.info } } @@ -329,106 +628,172 @@ impl Resource for Buffer { /// [`queue_write_staging_buffer`]: Global::queue_write_staging_buffer /// [`queue_write_texture`]: Global::queue_write_texture /// [`Device::pending_writes`]: crate::device::Device -pub struct StagingBuffer { - pub(crate) raw: A::Buffer, +#[derive(Debug)] +pub struct StagingBuffer { + pub(crate) raw: Mutex>, + pub(crate) device: Arc>, pub(crate) size: wgt::BufferAddress, pub(crate) is_coherent: bool, + pub(crate) info: ResourceInfo, +} + +impl Drop for StagingBuffer { + fn drop(&mut self) { + log::info!("Destroying StagingBuffer {:?}", self.info.label()); + if let Some(raw) = self.raw.lock().take() { + unsafe { + use hal::Device; + self.device.raw().destroy_buffer(raw); + } + } + } } -impl Resource for StagingBuffer { - const TYPE: &'static str = "StagingBuffer"; +impl Resource for StagingBuffer { + const TYPE: ResourceType = "StagingBuffer"; - fn life_guard(&self) -> &LifeGuard { - unreachable!() + fn as_info(&self) -> &ResourceInfo { + &self.info } - fn label(&self) -> &str { - "" + fn as_info_mut(&mut self) -> &mut ResourceInfo { + &mut self.info + } + + fn label(&self) -> String { + String::from("") } } pub type TextureDescriptor<'a> = wgt::TextureDescriptor, Vec>; #[derive(Debug)] -pub(crate) enum TextureInner { +pub(crate) enum TextureInner { Native { raw: Option, }, Surface { - raw: A::SurfaceTexture, - parent_id: Valid, - has_work: bool, + raw: Option, + parent_id: SurfaceId, + has_work: AtomicBool, }, } -impl TextureInner { +impl TextureInner { pub fn as_raw(&self) -> Option<&A::Texture> { match *self { Self::Native { raw: Some(ref tex) } => Some(tex), - Self::Native { raw: None } => None, - Self::Surface { ref raw, .. } => Some(raw.borrow()), + Self::Surface { + raw: Some(ref tex), .. + } => Some(tex.borrow()), + _ => None, } } } #[derive(Debug)] -pub enum TextureClearMode { +pub enum TextureClearMode { BufferCopy, // View for clear via RenderPass for every subsurface (mip/layer/slice) RenderPass { - clear_views: SmallVec<[A::TextureView; 1]>, + clear_views: SmallVec<[Option; 1]>, is_color: bool, }, + Surface { + clear_view: Option, + }, // Texture can't be cleared, attempting to do so will cause panic. // (either because it is impossible for the type of texture or it is being destroyed) None, } -impl TextureClearMode { - pub(crate) fn destroy_clear_views(self, device: &A::Device) { - if let TextureClearMode::RenderPass { clear_views, .. } = self { - for clear_view in clear_views { - unsafe { - hal::Device::destroy_texture_view(device, clear_view); - } - } - } - } -} - #[derive(Debug)] -pub struct Texture { - pub(crate) inner: TextureInner, - pub(crate) device_id: Stored, +pub struct Texture { + pub(crate) inner: RwLock>>, + pub(crate) device: Arc>, pub(crate) desc: wgt::TextureDescriptor<(), Vec>, pub(crate) hal_usage: hal::TextureUses, pub(crate) format_features: wgt::TextureFormatFeatures, - pub(crate) initialization_status: TextureInitTracker, + pub(crate) initialization_status: RwLock, pub(crate) full_range: TextureSelector, - pub(crate) life_guard: LifeGuard, - pub(crate) clear_mode: TextureClearMode, + pub(crate) info: ResourceInfo, + pub(crate) clear_mode: RwLock>, } -impl Texture { - pub(crate) fn get_clear_view(&self, mip_level: u32, depth_or_layer: u32) -> &A::TextureView { - match self.clear_mode { +impl Drop for Texture { + fn drop(&mut self) { + log::info!("Destroying Texture {:?}", self.info.label()); + use hal::Device; + let mut clear_mode = self.clear_mode.write(); + let clear_mode = &mut *clear_mode; + match *clear_mode { + TextureClearMode::Surface { + ref mut clear_view, .. + } => { + if let Some(view) = clear_view.take() { + unsafe { + self.device.raw().destroy_texture_view(view); + } + } + } + TextureClearMode::RenderPass { + ref mut clear_views, + .. + } => { + clear_views.iter_mut().for_each(|clear_view| { + if let Some(view) = clear_view.take() { + unsafe { + self.device.raw().destroy_texture_view(view); + } + } + }); + } + _ => {} + }; + if self.inner.read().is_none() { + return; + } + let inner = self.inner.write().take().unwrap(); + if let TextureInner::Native { raw: Some(raw) } = inner { + unsafe { + self.device.raw().destroy_texture(raw); + } + } + } +} + +impl Texture { + pub(crate) fn inner<'a>(&'a self) -> RwLockReadGuard<'a, Option>> { + self.inner.read() + } + pub(crate) fn inner_mut<'a>(&'a self) -> RwLockWriteGuard<'a, Option>> { + self.inner.write() + } + pub(crate) fn get_clear_view<'a>( + clear_mode: &'a TextureClearMode, + desc: &'a wgt::TextureDescriptor<(), Vec>, + mip_level: u32, + depth_or_layer: u32, + ) -> &'a A::TextureView { + match *clear_mode { TextureClearMode::BufferCopy => { panic!("Given texture is cleared with buffer copies, not render passes") } TextureClearMode::None => { panic!("Given texture can't be cleared") } + TextureClearMode::Surface { ref clear_view, .. } => clear_view.as_ref().unwrap(), TextureClearMode::RenderPass { ref clear_views, .. } => { - let index = if self.desc.dimension == wgt::TextureDimension::D3 { + let index = if desc.dimension == wgt::TextureDimension::D3 { (0..mip_level).fold(0, |acc, mip| { - acc + (self.desc.size.depth_or_array_layers >> mip).max(1) + acc + (desc.size.depth_or_array_layers >> mip).max(1) }) } else { - mip_level * self.desc.size.depth_or_array_layers + mip_level * desc.size.depth_or_array_layers } + depth_or_layer; - &clear_views[index as usize] + clear_views[index as usize].as_ref().unwrap() } } } @@ -446,10 +811,9 @@ impl Global { profiling::scope!("Texture::as_hal"); let hub = A::hub(self); - let mut token = Token::root(); - let (guard, _) = hub.textures.read(&mut token); - let texture = guard.try_get(id).ok().flatten(); - let hal_texture = texture.and_then(|tex| tex.inner.as_raw()); + let texture = { hub.textures.try_get(id).ok().flatten() }; + let inner = texture.as_ref().unwrap().inner(); + let hal_texture = inner.as_ref().unwrap().as_raw(); hal_texture_callback(hal_texture); } @@ -465,11 +829,8 @@ impl Global { profiling::scope!("Adapter::as_hal"); let hub = A::hub(self); - let mut token = Token::root(); - - let (guard, _) = hub.adapters.read(&mut token); - let adapter = guard.try_get(id).ok().flatten(); - let hal_adapter = adapter.map(|adapter| &adapter.raw.adapter); + let adapter = hub.adapters.try_get(id).ok().flatten(); + let hal_adapter = adapter.as_ref().map(|adapter| &adapter.raw.adapter); hal_adapter_callback(hal_adapter) } @@ -485,29 +846,26 @@ impl Global { profiling::scope!("Device::as_hal"); let hub = A::hub(self); - let mut token = Token::root(); - let (guard, _) = hub.devices.read(&mut token); - let device = guard.try_get(id).ok().flatten(); - let hal_device = device.map(|device| &device.raw); + let device = hub.devices.try_get(id).ok().flatten(); + let hal_device = device.as_ref().map(|device| device.raw()); hal_device_callback(hal_device) } /// # Safety /// - The raw surface handle must not be manually destroyed - pub unsafe fn surface_as_hal_mut) -> R, R>( + pub unsafe fn surface_as_hal) -> R, R>( &self, id: SurfaceId, hal_surface_callback: F, ) -> R { - profiling::scope!("Surface::as_hal_mut"); + profiling::scope!("Surface::as_hal"); - let mut token = Token::root(); - let (mut guard, _) = self.surfaces.write(&mut token); - let surface = guard.get_mut(id).ok(); + let surface = self.surfaces.get(id).ok(); let hal_surface = surface - .and_then(|surface| A::get_surface_mut(surface)) - .map(|surface| &mut surface.raw); + .as_ref() + .and_then(|surface| A::get_surface(surface)) + .map(|surface| &*surface.raw); hal_surface_callback(hal_surface) } @@ -554,6 +912,8 @@ pub enum TextureDimensionError { pub enum CreateTextureError { #[error(transparent)] Device(#[from] DeviceError), + #[error(transparent)] + CreateTextureView(#[from] CreateTextureViewError), #[error("Invalid usage flags {0:?}")] InvalidUsage(wgt::TextureUsages), #[error(transparent)] @@ -589,15 +949,19 @@ pub enum CreateTextureError { MissingDownlevelFlags(#[from] MissingDownlevelFlags), } -impl Resource for Texture { - const TYPE: &'static str = "Texture"; +impl Resource for Texture { + const TYPE: ResourceType = "Texture"; - fn life_guard(&self) -> &LifeGuard { - &self.life_guard + fn as_info(&self) -> &ResourceInfo { + &self.info + } + + fn as_info_mut(&mut self) -> &mut ResourceInfo { + &mut self.info } } -impl Borrow for Texture { +impl Borrow for Texture { fn borrow(&self) -> &TextureSelector { &self.full_range } @@ -657,12 +1021,11 @@ pub enum TextureViewNotRenderableReason { } #[derive(Debug)] -pub struct TextureView { - pub(crate) raw: A::TextureView, - // The parent's refcount is held alive, but the parent may still be deleted - // if it's a surface texture. TODO: make this cleaner. - pub(crate) parent_id: Stored, - pub(crate) device_id: Stored, +pub struct TextureView { + pub(crate) raw: Option, + // if it's a surface texture - it's none + pub(crate) parent: RwLock>>>, + pub(crate) device: Arc>, //TODO: store device_id for quick access? pub(crate) desc: HalTextureViewDescriptor, pub(crate) format_features: wgt::TextureFormatFeatures, @@ -670,7 +1033,25 @@ pub struct TextureView { pub(crate) render_extent: Result, pub(crate) samples: u32, pub(crate) selector: TextureSelector, - pub(crate) life_guard: LifeGuard, + pub(crate) info: ResourceInfo, +} + +impl Drop for TextureView { + fn drop(&mut self) { + log::info!("Destroying TextureView {:?}", self.info.label()); + if let Some(raw) = self.raw.take() { + unsafe { + use hal::Device; + self.device.raw().destroy_texture_view(raw); + } + } + } +} + +impl TextureView { + pub(crate) fn raw(&self) -> &A::TextureView { + self.raw.as_ref().unwrap() + } } #[derive(Clone, Debug, Error)] @@ -678,7 +1059,7 @@ pub struct TextureView { pub enum CreateTextureViewError { #[error("Parent texture is invalid or destroyed")] InvalidTexture, - #[error("Not enough memory left")] + #[error("Not enough memory left to create texture view")] OutOfMemory, #[error("Invalid texture view dimension `{view:?}` with texture of dimension `{texture:?}`")] InvalidTextureViewDimension { @@ -724,11 +1105,15 @@ pub enum CreateTextureViewError { #[non_exhaustive] pub enum TextureViewDestroyError {} -impl Resource for TextureView { - const TYPE: &'static str = "TextureView"; +impl Resource for TextureView { + const TYPE: ResourceType = "TextureView"; - fn life_guard(&self) -> &LifeGuard { - &self.life_guard + fn as_info(&self) -> &ResourceInfo { + &self.info + } + + fn as_info_mut(&mut self) -> &mut ResourceInfo { + &mut self.info } } @@ -763,16 +1148,34 @@ pub struct SamplerDescriptor<'a> { } #[derive(Debug)] -pub struct Sampler { - pub(crate) raw: A::Sampler, - pub(crate) device_id: Stored, - pub(crate) life_guard: LifeGuard, +pub struct Sampler { + pub(crate) raw: Option, + pub(crate) device: Arc>, + pub(crate) info: ResourceInfo, /// `true` if this is a comparison sampler pub(crate) comparison: bool, /// `true` if this is a filtering sampler pub(crate) filtering: bool, } +impl Drop for Sampler { + fn drop(&mut self) { + log::info!("Destroying Sampler {:?}", self.info.label()); + if let Some(raw) = self.raw.take() { + unsafe { + use hal::Device; + self.device.raw().destroy_sampler(raw); + } + } + } +} + +impl Sampler { + pub(crate) fn raw(&self) -> &A::Sampler { + self.raw.as_ref().unwrap() + } +} + #[derive(Copy, Clone)] pub enum SamplerFilterErrorType { MagFilter, @@ -780,7 +1183,7 @@ pub enum SamplerFilterErrorType { MipmapFilter, } -impl std::fmt::Debug for SamplerFilterErrorType { +impl Debug for SamplerFilterErrorType { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match *self { SamplerFilterErrorType::MagFilter => write!(f, "magFilter"), @@ -817,11 +1220,15 @@ pub enum CreateSamplerError { MissingFeatures(#[from] MissingFeatures), } -impl Resource for Sampler { - const TYPE: &'static str = "Sampler"; +impl Resource for Sampler { + const TYPE: ResourceType = "Sampler"; - fn life_guard(&self) -> &LifeGuard { - &self.life_guard + fn as_info(&self) -> &ResourceInfo { + &self.info + } + + fn as_info_mut(&mut self) -> &mut ResourceInfo { + &mut self.info } } @@ -841,18 +1248,40 @@ pub enum CreateQuerySetError { pub type QuerySetDescriptor<'a> = wgt::QuerySetDescriptor>; #[derive(Debug)] -pub struct QuerySet { - pub(crate) raw: A::QuerySet, - pub(crate) device_id: Stored, - pub(crate) life_guard: LifeGuard, +pub struct QuerySet { + pub(crate) raw: Option, + pub(crate) device: Arc>, + pub(crate) info: ResourceInfo, pub(crate) desc: wgt::QuerySetDescriptor<()>, } -impl Resource for QuerySet { - const TYPE: &'static str = "QuerySet"; +impl Drop for QuerySet { + fn drop(&mut self) { + log::info!("Destroying QuerySet {:?}", self.info.label()); + if let Some(raw) = self.raw.take() { + unsafe { + use hal::Device; + self.device.raw().destroy_query_set(raw); + } + } + } +} + +impl Resource for QuerySet { + const TYPE: ResourceType = "QuerySet"; + + fn as_info(&self) -> &ResourceInfo { + &self.info + } + + fn as_info_mut(&mut self) -> &mut ResourceInfo { + &mut self.info + } +} - fn life_guard(&self) -> &LifeGuard { - &self.life_guard +impl QuerySet { + pub(crate) fn raw(&self) -> &A::QuerySet { + self.raw.as_ref().unwrap() } } diff --git a/third_party/rust/wgpu-core/src/storage.rs b/third_party/rust/wgpu-core/src/storage.rs index 09d93d637d0e5..0135f6ba49fe6 100644 --- a/third_party/rust/wgpu-core/src/storage.rs +++ b/third_party/rust/wgpu-core/src/storage.rs @@ -1,8 +1,8 @@ -use std::{marker::PhantomData, mem, ops}; +use std::{marker::PhantomData, ops, sync::Arc}; use wgt::Backend; -use crate::{id, Epoch, Index}; +use crate::{id, resource::Resource, Epoch, Index}; /// An entry in a `Storage::map` table. #[derive(Debug)] @@ -12,11 +12,11 @@ pub(crate) enum Element { /// There is one live id with this index, allocated at the given /// epoch. - Occupied(T, Epoch), + Occupied(Arc, Epoch), /// Like `Occupied`, but the resource has been marked as destroyed /// and hasn't been dropped yet. - Destroyed(T, Epoch), + Destroyed(Arc, Epoch), /// Like `Occupied`, but an error occurred when creating the /// resource. @@ -25,20 +25,6 @@ pub(crate) enum Element { Error(Epoch, String), } -#[derive(Clone, Debug, Default)] -pub struct StorageReport { - pub num_occupied: usize, - pub num_vacant: usize, - pub num_error: usize, - pub element_size: usize, -} - -impl StorageReport { - pub fn is_empty(&self) -> bool { - self.num_occupied + self.num_vacant + self.num_error == 0 - } -} - #[derive(Clone, Debug)] pub(crate) struct InvalidId; @@ -48,26 +34,46 @@ pub(crate) struct InvalidId; /// values, so you should use an id allocator like `IdentityManager` /// that keeps the index values dense and close to zero. #[derive(Debug)] -pub struct Storage { +pub struct Storage +where + T: Resource, + I: id::TypedId, +{ pub(crate) map: Vec>, - pub(crate) kind: &'static str, - pub(crate) _phantom: PhantomData, + kind: &'static str, + _phantom: PhantomData, } -impl ops::Index> for Storage { - type Output = T; - fn index(&self, id: id::Valid) -> &T { - self.get(id.0).unwrap() +impl ops::Index for Storage +where + T: Resource, + I: id::TypedId, +{ + type Output = Arc; + fn index(&self, id: I) -> &Arc { + self.get(id).unwrap() } } - -impl ops::IndexMut> for Storage { - fn index_mut(&mut self, id: id::Valid) -> &mut T { - self.get_mut(id.0).unwrap() +impl Storage +where + T: Resource, + I: id::TypedId, +{ + pub(crate) fn new() -> Self { + Self { + map: Vec::new(), + kind: T::TYPE, + _phantom: PhantomData, + } } } -impl Storage { +impl Storage +where + T: Resource, + I: id::TypedId, +{ + #[allow(dead_code)] pub(crate) fn contains(&self, id: I) -> bool { let (index, epoch, _) = id.unzip(); match self.map.get(index as usize) { @@ -88,7 +94,7 @@ impl Storage { /// This function is primarily intended for the `as_hal` family of functions /// where you may need to fallibly get a object backed by an id that could /// be in a different hub. - pub(crate) fn try_get(&self, id: I) -> Result, InvalidId> { + pub(crate) fn try_get(&self, id: I) -> Result>, InvalidId> { let (index, epoch, _) = id.unzip(); let (result, storage_epoch) = match self.map.get(index as usize) { Some(&Element::Occupied(ref v, epoch)) => (Ok(Some(v)), epoch), @@ -100,96 +106,31 @@ impl Storage { }; assert_eq!( epoch, storage_epoch, - "{}[{}] is no longer alive", - self.kind, index + "{}[{:?}] is no longer alive", + self.kind, id ); result } /// Get a reference to an item behind a potentially invalid ID. /// Panics if there is an epoch mismatch, or the entry is empty. - pub(crate) fn get(&self, id: I) -> Result<&T, InvalidId> { + pub(crate) fn get(&self, id: I) -> Result<&Arc, InvalidId> { let (index, epoch, _) = id.unzip(); let (result, storage_epoch) = match self.map.get(index as usize) { Some(&Element::Occupied(ref v, epoch)) => (Ok(v), epoch), - Some(&Element::Vacant) => panic!("{}[{}] does not exist", self.kind, index), + Some(&Element::Vacant) => panic!("{}[{:?}] does not exist", self.kind, id), Some(&Element::Error(epoch, ..)) => (Err(InvalidId), epoch), Some(&Element::Destroyed(.., epoch)) => (Err(InvalidId), epoch), None => return Err(InvalidId), }; assert_eq!( epoch, storage_epoch, - "{}[{}] is no longer alive", - self.kind, index - ); - result - } - - /// Get a mutable reference to an item behind a potentially invalid ID. - /// Panics if there is an epoch mismatch, or the entry is empty. - pub(crate) fn get_mut(&mut self, id: I) -> Result<&mut T, InvalidId> { - let (index, epoch, _) = id.unzip(); - let (result, storage_epoch) = match self.map.get_mut(index as usize) { - Some(&mut Element::Occupied(ref mut v, epoch)) => (Ok(v), epoch), - Some(&mut Element::Vacant) | None => panic!("{}[{}] does not exist", self.kind, index), - Some(&mut Element::Error(epoch, ..)) => (Err(InvalidId), epoch), - Some(&mut Element::Destroyed(.., epoch)) => (Err(InvalidId), epoch), - }; - assert_eq!( - epoch, storage_epoch, - "{}[{}] is no longer alive", - self.kind, index - ); - result - } - - /// Like `get_mut`, but returns the element even if it is destroyed. - /// - /// In practice, most API entry points should use `get`/`get_mut` so that a - /// destroyed resource leads to a validation error. This should be used internally - /// in places where we want to do some manipulation potentially after the element - /// was destroyed (for example the drop implementation). - pub(crate) fn get_occupied_or_destroyed_mut(&mut self, id: I) -> Result<&mut T, InvalidId> { - let (index, epoch, _) = id.unzip(); - let (result, storage_epoch) = match self.map.get_mut(index as usize) { - Some(&mut Element::Occupied(ref mut v, epoch)) - | Some(&mut Element::Destroyed(ref mut v, epoch)) => (Ok(v), epoch), - Some(&mut Element::Vacant) | None => panic!("{}[{}] does not exist", self.kind, index), - Some(&mut Element::Error(epoch, ..)) => (Err(InvalidId), epoch), - }; - assert_eq!( - epoch, storage_epoch, - "{}[{}] is no longer alive", - self.kind, index + "{}[{:?}] is no longer alive", + self.kind, id ); result } - pub(crate) fn get_occupied_or_destroyed(&self, id: I) -> Result<&T, InvalidId> { - let (index, epoch, _) = id.unzip(); - let (result, storage_epoch) = match self.map.get(index as usize) { - Some(&Element::Occupied(ref v, epoch)) | Some(&Element::Destroyed(ref v, epoch)) => { - (Ok(v), epoch) - } - Some(&Element::Vacant) | None => panic!("{}[{}] does not exist", self.kind, index), - Some(&Element::Error(epoch, ..)) => (Err(InvalidId), epoch), - }; - assert_eq!( - epoch, storage_epoch, - "{}[{}] is no longer alive", - self.kind, index - ); - result - } - - pub(crate) unsafe fn get_unchecked(&self, id: u32) -> &T { - match self.map[id as usize] { - Element::Occupied(ref v, _) => v, - Element::Vacant => panic!("{}[{}] does not exist", self.kind, id), - Element::Error(_, _) | Element::Destroyed(..) => panic!(""), - } - } - pub(crate) fn label_for_invalid_id(&self, id: I) -> &str { let (index, _, _) = id.unzip(); match self.map.get(index as usize) { @@ -198,27 +139,56 @@ impl Storage { } } - fn insert_impl(&mut self, index: usize, element: Element) { + fn insert_impl(&mut self, index: usize, epoch: Epoch, element: Element) { if index >= self.map.len() { self.map.resize_with(index + 1, || Element::Vacant); } match std::mem::replace(&mut self.map[index], element) { Element::Vacant => {} - _ => panic!("Index {index:?} is already occupied"), + Element::Destroyed(_, storage_epoch) => { + assert_ne!( + epoch, + storage_epoch, + "Index {index:?} of {} is already occupied", + T::TYPE + ); + } + Element::Occupied(_, storage_epoch) => { + assert_ne!( + epoch, + storage_epoch, + "Index {index:?} of {} is already occupied", + T::TYPE + ); + } + Element::Error(storage_epoch, _) => { + assert_ne!( + epoch, + storage_epoch, + "Index {index:?} of {} is already occupied with Error", + T::TYPE + ); + } } } - pub(crate) fn insert(&mut self, id: I, value: T) { - let (index, epoch, _) = id.unzip(); - self.insert_impl(index as usize, Element::Occupied(value, epoch)) + pub(crate) fn insert(&mut self, id: I, value: Arc) { + log::info!("User is inserting {}{:?}", T::TYPE, id); + let (index, epoch, _backend) = id.unzip(); + self.insert_impl(index as usize, epoch, Element::Occupied(value, epoch)) } pub(crate) fn insert_error(&mut self, id: I, label: &str) { + log::info!("User is insering as error {}{:?}", T::TYPE, id); let (index, epoch, _) = id.unzip(); - self.insert_impl(index as usize, Element::Error(epoch, label.to_string())) + self.insert_impl( + index as usize, + epoch, + Element::Error(epoch, label.to_string()), + ) } - pub(crate) fn replace_with_error(&mut self, id: I) -> Result { + pub(crate) fn replace_with_error(&mut self, id: I) -> Result, InvalidId> { let (index, epoch, _) = id.unzip(); match std::mem::replace( &mut self.map[index as usize], @@ -233,7 +203,7 @@ impl Storage { } } - pub(crate) fn get_and_mark_destroyed(&mut self, id: I) -> Result<&mut T, InvalidId> { + pub(crate) fn get_and_mark_destroyed(&mut self, id: I) -> Result, InvalidId> { let (index, epoch, _) = id.unzip(); let slot = &mut self.map[index as usize]; // borrowck dance: we have to move the element out before we can replace it @@ -247,19 +217,21 @@ impl Storage { } } - if let Element::Destroyed(ref mut value, ..) = *slot { - Ok(value) + if let Element::Destroyed(ref value, ..) = *slot { + Ok(value.clone()) } else { Err(InvalidId) } } pub(crate) fn force_replace(&mut self, id: I, value: T) { + log::info!("User is replacing {}{:?}", T::TYPE, id); let (index, epoch, _) = id.unzip(); - self.map[index as usize] = Element::Occupied(value, epoch); + self.map[index as usize] = Element::Occupied(Arc::new(value), epoch); } - pub(crate) fn remove(&mut self, id: I) -> Option { + pub(crate) fn remove(&mut self, id: I) -> Option> { + log::info!("User is removing {}{:?}", T::TYPE, id); let (index, epoch, _) = id.unzip(); match std::mem::replace(&mut self.map[index as usize], Element::Vacant) { Element::Occupied(value, storage_epoch) | Element::Destroyed(value, storage_epoch) => { @@ -271,22 +243,7 @@ impl Storage { } } - // Prevents panic on out of range access, allows Vacant elements. - pub(crate) fn _try_remove(&mut self, id: I) -> Option { - let (index, epoch, _) = id.unzip(); - if index as usize >= self.map.len() { - None - } else if let Element::Occupied(value, storage_epoch) = - std::mem::replace(&mut self.map[index as usize], Element::Vacant) - { - assert_eq!(epoch, storage_epoch); - Some(value) - } else { - None - } - } - - pub(crate) fn iter(&self, backend: Backend) -> impl Iterator { + pub(crate) fn iter(&self, backend: Backend) -> impl Iterator)> { self.map .iter() .enumerate() @@ -298,22 +255,11 @@ impl Storage { }) } - pub(crate) fn len(&self) -> usize { - self.map.len() + pub(crate) fn kind(&self) -> &str { + self.kind } - pub(crate) fn generate_report(&self) -> StorageReport { - let mut report = StorageReport { - element_size: mem::size_of::(), - ..Default::default() - }; - for element in self.map.iter() { - match *element { - Element::Occupied(..) | Element::Destroyed(..) => report.num_occupied += 1, - Element::Vacant => report.num_vacant += 1, - Element::Error(..) => report.num_error += 1, - } - } - report + pub(crate) fn len(&self) -> usize { + self.map.len() } } diff --git a/third_party/rust/wgpu-core/src/track/buffer.rs b/third_party/rust/wgpu-core/src/track/buffer.rs index b5e61c5a224ab..932993681a0fe 100644 --- a/third_party/rust/wgpu-core/src/track/buffer.rs +++ b/third_party/rust/wgpu-core/src/track/buffer.rs @@ -5,21 +5,21 @@ * one subresource, they have no selector. !*/ -use std::{borrow::Cow, marker::PhantomData, vec::Drain}; +use std::{borrow::Cow, marker::PhantomData, sync::Arc}; -use super::PendingTransition; +use super::{PendingTransition, ResourceTracker}; use crate::{ hal_api::HalApi, - id::{BufferId, TypedId, Valid}, - resource::Buffer, - storage, + id::{BufferId, TypedId}, + resource::{Buffer, Resource}, + storage::Storage, track::{ invalid_resource_state, skip_barrier, ResourceMetadata, ResourceMetadataProvider, ResourceUses, UsageConflict, }, - LifeGuard, RefCount, }; -use hal::BufferUses; +use hal::{BufferBarrier, BufferUses}; +use parking_lot::Mutex; use wgt::{strict_assert, strict_assert_eq}; impl ResourceUses for BufferUses { @@ -42,15 +42,16 @@ impl ResourceUses for BufferUses { } /// Stores all the buffers that a bind group stores. +#[derive(Debug)] pub(crate) struct BufferBindGroupState { - buffers: Vec<(Valid, RefCount, BufferUses)>, + buffers: Mutex>, BufferUses)>>, _phantom: PhantomData, } impl BufferBindGroupState { pub fn new() -> Self { Self { - buffers: Vec::new(), + buffers: Mutex::new(Vec::new()), _phantom: PhantomData, } @@ -60,27 +61,44 @@ impl BufferBindGroupState { /// /// When this list of states is merged into a tracker, the memory /// accesses will be in a constant assending order. - pub(crate) fn optimize(&mut self) { - self.buffers - .sort_unstable_by_key(|&(id, _, _)| id.0.unzip().0); + #[allow(clippy::pattern_type_mismatch)] + pub(crate) fn optimize(&self) { + let mut buffers = self.buffers.lock(); + buffers.sort_unstable_by_key(|(b, _)| b.as_info().id().unzip().0); + } + + /// Returns a list of all buffers tracked. May contain duplicates. + #[allow(clippy::pattern_type_mismatch)] + pub fn used_ids(&self) -> impl Iterator + '_ { + let buffers = self.buffers.lock(); + buffers + .iter() + .map(|(ref b, _)| b.as_info().id()) + .collect::>() + .into_iter() } /// Returns a list of all buffers tracked. May contain duplicates. - pub fn used(&self) -> impl Iterator> + '_ { - self.buffers.iter().map(|&(id, _, _)| id) + pub fn drain_resources(&self) -> impl Iterator>> + '_ { + let mut buffers = self.buffers.lock(); + buffers + .drain(..) + .map(|(buffer, _u)| buffer) + .collect::>() + .into_iter() } /// Adds the given resource with the given state. pub fn add_single<'a>( - &mut self, - storage: &'a storage::Storage, BufferId>, + &self, + storage: &'a Storage, BufferId>, id: BufferId, state: BufferUses, - ) -> Option<&'a Buffer> { + ) -> Option<&'a Arc>> { let buffer = storage.get(id).ok()?; - self.buffers - .push((Valid(id), buffer.life_guard.add_ref(), state)); + let mut buffers = self.buffers.lock(); + buffers.push((buffer.clone(), state)); Some(buffer) } @@ -91,7 +109,7 @@ impl BufferBindGroupState { pub(crate) struct BufferUsageScope { state: Vec, - metadata: ResourceMetadata, + metadata: ResourceMetadata>, } impl BufferUsageScope { @@ -124,9 +142,25 @@ impl BufferUsageScope { } } - /// Returns a list of all buffers tracked. - pub fn used(&self) -> impl Iterator> + '_ { - self.metadata.owned_ids() + /// Drains all buffers tracked. + pub fn drain_resources(&mut self) -> impl Iterator>> + '_ { + let resources = self.metadata.drain_resources(); + self.state.clear(); + resources.into_iter() + } + + pub fn get(&self, id: BufferId) -> Option<&Arc>> { + let index = id.unzip().0 as usize; + if index > self.metadata.size() { + return None; + } + self.tracker_assert_in_bounds(index); + unsafe { + if self.metadata.contains_unchecked(index) { + return Some(self.metadata.get_resource_unchecked(index)); + } + } + None } /// Merge the list of buffer states in the given bind group into this usage scope. @@ -145,22 +179,20 @@ impl BufferUsageScope { &mut self, bind_group: &BufferBindGroupState, ) -> Result<(), UsageConflict> { - for &(id, ref ref_count, state) in &bind_group.buffers { - let (index32, epoch, _) = id.0.unzip(); - let index = index32 as usize; + let buffers = bind_group.buffers.lock(); + for &(ref resource, state) in &*buffers { + let index = resource.as_info().id().unzip().0 as usize; unsafe { insert_or_merge( - None, None, &mut self.state, &mut self.metadata, - index32, + index as _, index, BufferStateProvider::Direct { state }, ResourceMetadataProvider::Direct { - epoch, - ref_count: Cow::Borrowed(ref_count), + resource: Cow::Borrowed(resource), }, )? }; @@ -188,7 +220,6 @@ impl BufferUsageScope { unsafe { insert_or_merge( - None, None, &mut self.state, &mut self.metadata, @@ -216,16 +247,15 @@ impl BufferUsageScope { /// the vectors will be extended. A call to set_size is not needed. pub fn merge_single<'a>( &mut self, - storage: &'a storage::Storage, BufferId>, + storage: &'a Storage, BufferId>, id: BufferId, new_state: BufferUses, - ) -> Result<&'a Buffer, UsageConflict> { + ) -> Result<&'a Arc>, UsageConflict> { let buffer = storage .get(id) .map_err(|_| UsageConflict::BufferInvalid { id })?; - let (index32, epoch, _) = id.unzip(); - let index = index32 as usize; + let index = id.unzip().0 as usize; self.allow_index(index); @@ -233,14 +263,15 @@ impl BufferUsageScope { unsafe { insert_or_merge( - Some(&buffer.life_guard), None, &mut self.state, &mut self.metadata, - index32, + index as _, index, BufferStateProvider::Direct { state: new_state }, - ResourceMetadataProvider::Resource { epoch }, + ResourceMetadataProvider::Direct { + resource: Cow::Owned(buffer.clone()), + }, )?; } @@ -248,15 +279,74 @@ impl BufferUsageScope { } } +pub(crate) type SetSingleResult = + Option<(Arc>, Option>)>; + /// Stores all buffer state within a command buffer or device. pub(crate) struct BufferTracker { start: Vec, end: Vec, - metadata: ResourceMetadata, + metadata: ResourceMetadata>, temp: Vec>, } + +impl ResourceTracker> for BufferTracker { + /// Removes the buffer `id` from this tracker if it is otherwise unused. + /// + /// A buffer is 'otherwise unused' when the only references to it are: + /// + /// 1) the `Arc` that our caller, `LifetimeTracker::triage_suspected`, has just + /// drained from `LifetimeTracker::suspected_resources`, + /// + /// 2) its `Arc` in [`self.metadata`] (owned by [`Device::trackers`]), and + /// + /// 3) its `Arc` in the [`Hub::buffers`] registry. + /// + /// If the buffer is indeed unused, this function removes 2), and + /// `triage_suspected` will remove 3), leaving 1) as the sole + /// remaining reference. + /// + /// Return `true` if this tracker contained the buffer `id`. This + /// implies that we removed it. + /// + /// [`Device::trackers`]: crate::device::Device + /// [`self.metadata`]: BufferTracker::metadata + /// [`Hub::buffers`]: crate::hub::Hub::buffers + fn remove_abandoned(&mut self, id: BufferId, external_count: usize) -> bool { + let index = id.unzip().0 as usize; + + if index > self.metadata.size() { + return false; + } + + self.tracker_assert_in_bounds(index); + + unsafe { + if self.metadata.contains_unchecked(index) { + let existing_ref_count = self.metadata.get_ref_count_unchecked(index); + //2 ref count if only in Device Tracker and suspected resource itself and already released from user + //so not appearing in Registry + let min_ref_count = 1 + external_count; + if existing_ref_count <= min_ref_count { + self.metadata.remove(index); + log::info!("Buffer {:?} is not tracked anymore", id,); + return true; + } else { + log::info!( + "Buffer {:?} is still referenced from {}", + id, + existing_ref_count + ); + } + } + } + + false + } +} + impl BufferTracker { pub fn new() -> Self { Self { @@ -294,13 +384,17 @@ impl BufferTracker { } /// Returns a list of all buffers tracked. - pub fn used(&self) -> impl Iterator> + '_ { - self.metadata.owned_ids() + pub fn used_resources(&self) -> impl Iterator>> + '_ { + self.metadata.owned_resources() } /// Drains all currently pending transitions. - pub fn drain(&mut self) -> Drain<'_, PendingTransition> { - self.temp.drain(..) + pub fn drain_transitions(&mut self) -> impl Iterator> { + let buffer_barriers = self.temp.drain(..).map(|pending| { + let buf = unsafe { self.metadata.get_resource_unchecked(pending.id as _) }; + pending.into_hal(buf) + }); + buffer_barriers } /// Inserts a single buffer and its state into the resource tracker. @@ -309,9 +403,8 @@ impl BufferTracker { /// /// If the ID is higher than the length of internal vectors, /// the vectors will be extended. A call to set_size is not needed. - pub fn insert_single(&mut self, id: Valid, ref_count: RefCount, state: BufferUses) { - let (index32, epoch, _) = id.0.unzip(); - let index = index32 as usize; + pub fn insert_single(&mut self, id: BufferId, resource: Arc>, state: BufferUses) { + let index = id.unzip().0 as usize; self.allow_index(index); @@ -325,7 +418,6 @@ impl BufferTracker { } insert( - None, Some(&mut self.start), &mut self.end, &mut self.metadata, @@ -333,8 +425,7 @@ impl BufferTracker { BufferStateProvider::Direct { state }, None, ResourceMetadataProvider::Direct { - epoch, - ref_count: Cow::Owned(ref_count), + resource: Cow::Owned(resource), }, ) } @@ -347,16 +438,8 @@ impl BufferTracker { /// /// If the ID is higher than the length of internal vectors, /// the vectors will be extended. A call to set_size is not needed. - pub fn set_single<'a>( - &mut self, - storage: &'a storage::Storage, BufferId>, - id: BufferId, - state: BufferUses, - ) -> Option<(&'a Buffer, Option>)> { - let value = storage.get(id).ok()?; - - let (index32, epoch, _) = id.unzip(); - let index = index32 as usize; + pub fn set_single(&mut self, buffer: &Arc>, state: BufferUses) -> SetSingleResult { + let index: usize = buffer.as_info().id().unzip().0 as usize; self.allow_index(index); @@ -364,29 +447,29 @@ impl BufferTracker { unsafe { insert_or_barrier_update( - Some(&value.life_guard), Some(&mut self.start), &mut self.end, &mut self.metadata, - index32, index, BufferStateProvider::Direct { state }, None, - ResourceMetadataProvider::Resource { epoch }, + ResourceMetadataProvider::Direct { + resource: Cow::Owned(buffer.clone()), + }, &mut self.temp, ) }; strict_assert!(self.temp.len() <= 1); - Some((value, self.temp.pop())) + Some((buffer.clone(), self.temp.pop())) } /// Sets the given state for all buffers in the given tracker. /// /// If a transition is needed to get the buffers into the needed state, /// those transitions are stored within the tracker. A subsequent - /// call to [`Self::drain`] is needed to get those transitions. + /// call to [`Self::drain_transitions`] is needed to get those transitions. /// /// If the ID is higher than the length of internal vectors, /// the vectors will be extended. A call to set_size is not needed. @@ -401,11 +484,9 @@ impl BufferTracker { tracker.tracker_assert_in_bounds(index); unsafe { insert_or_barrier_update( - None, Some(&mut self.start), &mut self.end, &mut self.metadata, - index as u32, index, BufferStateProvider::Indirect { state: &tracker.start, @@ -426,7 +507,7 @@ impl BufferTracker { /// /// If a transition is needed to get the buffers into the needed state, /// those transitions are stored within the tracker. A subsequent - /// call to [`Self::drain`] is needed to get those transitions. + /// call to [`Self::drain_transitions`] is needed to get those transitions. /// /// If the ID is higher than the length of internal vectors, /// the vectors will be extended. A call to set_size is not needed. @@ -441,11 +522,9 @@ impl BufferTracker { scope.tracker_assert_in_bounds(index); unsafe { insert_or_barrier_update( - None, Some(&mut self.start), &mut self.end, &mut self.metadata, - index as u32, index, BufferStateProvider::Indirect { state: &scope.state, @@ -466,7 +545,7 @@ impl BufferTracker { /// /// If a transition is needed to get the buffers into the needed state, /// those transitions are stored within the tracker. A subsequent - /// call to [`Self::drain`] is needed to get those transitions. + /// call to [`Self::drain_transitions`] is needed to get those transitions. /// /// This is a really funky method used by Compute Passes to generate /// barriers after a call to dispatch without needing to iterate @@ -481,7 +560,7 @@ impl BufferTracker { pub unsafe fn set_and_remove_from_usage_scope_sparse( &mut self, scope: &mut BufferUsageScope, - id_source: impl IntoIterator>, + id_source: impl IntoIterator, ) { let incoming_size = scope.state.len(); if incoming_size > self.start.len() { @@ -489,7 +568,7 @@ impl BufferTracker { } for id in id_source { - let (index32, _, _) = id.0.unzip(); + let (index32, _, _) = id.unzip(); let index = index32 as usize; scope.tracker_assert_in_bounds(index); @@ -499,11 +578,9 @@ impl BufferTracker { } unsafe { insert_or_barrier_update( - None, Some(&mut self.start), &mut self.end, &mut self.metadata, - index as u32, index, BufferStateProvider::Indirect { state: &scope.state, @@ -520,36 +597,19 @@ impl BufferTracker { } } - /// Removes the given resource from the tracker iff we have the last reference to the - /// resource and the epoch matches. - /// - /// Returns true if the resource was removed. - /// - /// If the ID is higher than the length of internal vectors, - /// false will be returned. - pub fn remove_abandoned(&mut self, id: Valid) -> bool { - let (index32, epoch, _) = id.0.unzip(); - let index = index32 as usize; - + #[allow(dead_code)] + pub fn get(&self, id: BufferId) -> Option<&Arc>> { + let index = id.unzip().0 as usize; if index > self.metadata.size() { - return false; + return None; } - self.tracker_assert_in_bounds(index); - unsafe { if self.metadata.contains_unchecked(index) { - let existing_epoch = self.metadata.get_epoch_unchecked(index); - let existing_ref_count = self.metadata.get_ref_count_unchecked(index); - - if existing_epoch == epoch && existing_ref_count.load() == 1 { - self.metadata.remove(index); - return true; - } + return Some(self.metadata.get_resource_unchecked(index)); } } - - false + None } } @@ -590,21 +650,19 @@ impl BufferStateProvider<'_> { /// to this function, either directly or via metadata or provider structs. #[inline(always)] unsafe fn insert_or_merge( - life_guard: Option<&LifeGuard>, start_states: Option<&mut [BufferUses]>, current_states: &mut [BufferUses], - resource_metadata: &mut ResourceMetadata, + resource_metadata: &mut ResourceMetadata>, index32: u32, index: usize, state_provider: BufferStateProvider<'_>, - metadata_provider: ResourceMetadataProvider<'_, A>, + metadata_provider: ResourceMetadataProvider<'_, A, BufferId, Buffer>, ) -> Result<(), UsageConflict> { let currently_owned = unsafe { resource_metadata.contains_unchecked(index) }; if !currently_owned { unsafe { insert( - life_guard, start_states, current_states, resource_metadata, @@ -647,15 +705,13 @@ unsafe fn insert_or_merge( /// to this function, either directly or via metadata or provider structs. #[inline(always)] unsafe fn insert_or_barrier_update( - life_guard: Option<&LifeGuard>, start_states: Option<&mut [BufferUses]>, current_states: &mut [BufferUses], - resource_metadata: &mut ResourceMetadata, - index32: u32, + resource_metadata: &mut ResourceMetadata>, index: usize, start_state_provider: BufferStateProvider<'_>, end_state_provider: Option>, - metadata_provider: ResourceMetadataProvider<'_, A>, + metadata_provider: ResourceMetadataProvider<'_, A, BufferId, Buffer>, barriers: &mut Vec>, ) { let currently_owned = unsafe { resource_metadata.contains_unchecked(index) }; @@ -663,7 +719,6 @@ unsafe fn insert_or_barrier_update( if !currently_owned { unsafe { insert( - life_guard, start_states, current_states, resource_metadata, @@ -677,29 +732,20 @@ unsafe fn insert_or_barrier_update( } let update_state_provider = end_state_provider.unwrap_or_else(|| start_state_provider.clone()); - unsafe { - barrier( - current_states, - index32, - index, - start_state_provider, - barriers, - ) - }; + unsafe { barrier(current_states, index, start_state_provider, barriers) }; unsafe { update(current_states, index, update_state_provider) }; } #[inline(always)] unsafe fn insert( - life_guard: Option<&LifeGuard>, start_states: Option<&mut [BufferUses]>, current_states: &mut [BufferUses], - resource_metadata: &mut ResourceMetadata, + resource_metadata: &mut ResourceMetadata>, index: usize, start_state_provider: BufferStateProvider<'_>, end_state_provider: Option>, - metadata_provider: ResourceMetadataProvider<'_, A>, + metadata_provider: ResourceMetadataProvider<'_, A, BufferId, Buffer>, ) { let new_start_state = unsafe { start_state_provider.get_state(index) }; let new_end_state = @@ -718,8 +764,8 @@ unsafe fn insert( } *current_states.get_unchecked_mut(index) = new_end_state; - let (epoch, ref_count) = metadata_provider.get_own(life_guard, index); - resource_metadata.insert(index, epoch, ref_count); + let resource = metadata_provider.get_own(index); + resource_metadata.insert(index, resource); } } @@ -729,7 +775,7 @@ unsafe fn merge( index32: u32, index: usize, state_provider: BufferStateProvider<'_>, - metadata_provider: ResourceMetadataProvider<'_, A>, + metadata_provider: ResourceMetadataProvider<'_, A, BufferId, Buffer>, ) -> Result<(), UsageConflict> { let current_state = unsafe { current_states.get_unchecked_mut(index) }; let new_state = unsafe { state_provider.get_state(index) }; @@ -758,7 +804,6 @@ unsafe fn merge( #[inline(always)] unsafe fn barrier( current_states: &mut [BufferUses], - index32: u32, index: usize, state_provider: BufferStateProvider<'_>, barriers: &mut Vec>, @@ -771,12 +816,12 @@ unsafe fn barrier( } barriers.push(PendingTransition { - id: index32, + id: index as _, selector: (), usage: current_state..new_state, }); - log::trace!("\tbuf {index32}: transition {current_state:?} -> {new_state:?}"); + log::trace!("\tbuf {index}: transition {current_state:?} -> {new_state:?}"); } #[inline(always)] diff --git a/third_party/rust/wgpu-core/src/track/metadata.rs b/third_party/rust/wgpu-core/src/track/metadata.rs index 8561d93bee03d..8001776e8c6e9 100644 --- a/third_party/rust/wgpu-core/src/track/metadata.rs +++ b/third_party/rust/wgpu-core/src/track/metadata.rs @@ -1,15 +1,11 @@ //! The `ResourceMetadata` type. -use crate::{ - hal_api::HalApi, - id::{self, TypedId}, - Epoch, LifeGuard, RefCount, -}; +use crate::{hal_api::HalApi, id::TypedId, resource::Resource, Epoch}; use bit_vec::BitVec; -use std::{borrow::Cow, marker::PhantomData, mem}; +use std::{borrow::Cow, marker::PhantomData, mem, sync::Arc}; use wgt::strict_assert; -/// A set of resources, holding a [`RefCount`] and epoch for each member. +/// A set of resources, holding a `Arc` and epoch for each member. /// /// Testing for membership is fast, and iterating over members is /// reasonably fast in practice. Storage consumption is proportional @@ -17,27 +13,22 @@ use wgt::strict_assert; /// members, but a bit vector tracks occupancy, so iteration touches /// only occupied elements. #[derive(Debug)] -pub(super) struct ResourceMetadata { +pub(super) struct ResourceMetadata> { /// If the resource with index `i` is a member, `owned[i]` is `true`. owned: BitVec, - /// A vector parallel to `owned`, holding clones of members' `RefCount`s. - ref_counts: Vec>, - - /// A vector parallel to `owned`, holding the epoch of each members' id. - epochs: Vec, + /// A vector holding clones of members' `T`s. + resources: Vec>>, /// This tells Rust that this type should be covariant with `A`. - _phantom: PhantomData, + _phantom: PhantomData<(A, I)>, } -impl ResourceMetadata { +impl> ResourceMetadata { pub(super) fn new() -> Self { Self { owned: BitVec::default(), - ref_counts: Vec::new(), - epochs: Vec::new(), - + resources: Vec::new(), _phantom: PhantomData, } } @@ -48,9 +39,7 @@ impl ResourceMetadata { } pub(super) fn set_size(&mut self, size: usize) { - self.ref_counts.resize(size, None); - self.epochs.resize(size, u32::MAX); - + self.resources.resize(size, None); resize_bitvec(&mut self.owned, size); } @@ -61,11 +50,9 @@ impl ResourceMetadata { #[cfg_attr(not(feature = "strict_asserts"), allow(unused_variables))] pub(super) fn tracker_assert_in_bounds(&self, index: usize) { strict_assert!(index < self.owned.len()); - strict_assert!(index < self.ref_counts.len()); - strict_assert!(index < self.epochs.len()); - + strict_assert!(index < self.resources.len()); strict_assert!(if self.contains(index) { - self.ref_counts[index].is_some() + self.resources[index].is_some() } else { true }); @@ -104,52 +91,73 @@ impl ResourceMetadata { /// The given `index` must be in bounds for this `ResourceMetadata`'s /// existing tables. See `tracker_assert_in_bounds`. #[inline(always)] - pub(super) unsafe fn insert(&mut self, index: usize, epoch: Epoch, ref_count: RefCount) { + pub(super) unsafe fn insert(&mut self, index: usize, resource: Arc) { self.owned.set(index, true); unsafe { - *self.epochs.get_unchecked_mut(index) = epoch; - *self.ref_counts.get_unchecked_mut(index) = Some(ref_count); + *self.resources.get_unchecked_mut(index) = Some(resource); } } - /// Get the [`RefCount`] of the resource with the given index. + /// Get the resource with the given index. /// /// # Safety /// /// The given `index` must be in bounds for this `ResourceMetadata`'s /// existing tables. See `tracker_assert_in_bounds`. #[inline(always)] - pub(super) unsafe fn get_ref_count_unchecked(&self, index: usize) -> &RefCount { + pub(super) unsafe fn get_resource_unchecked(&self, index: usize) -> &Arc { unsafe { - self.ref_counts + self.resources .get_unchecked(index) .as_ref() .unwrap_unchecked() } } - /// Get the [`Epoch`] of the id of the resource with the given index. + /// Get the reference count of the resource with the given index. /// /// # Safety /// /// The given `index` must be in bounds for this `ResourceMetadata`'s /// existing tables. See `tracker_assert_in_bounds`. #[inline(always)] - pub(super) unsafe fn get_epoch_unchecked(&self, index: usize) -> Epoch { - unsafe { *self.epochs.get_unchecked(index) } + pub(super) unsafe fn get_ref_count_unchecked(&self, index: usize) -> usize { + unsafe { + Arc::strong_count( + self.resources + .get_unchecked(index) + .as_ref() + .unwrap_unchecked(), + ) + } } - /// Returns an iterator over the ids for all resources owned by `self`. - pub(super) fn owned_ids(&self) -> impl Iterator> + '_ { + /// Returns an iterator over the resources owned by `self`. + pub(super) fn owned_resources(&self) -> impl Iterator> + '_ { if !self.owned.is_empty() { self.tracker_assert_in_bounds(self.owned.len() - 1) }; iterate_bitvec_indices(&self.owned).map(move |index| { - let epoch = unsafe { *self.epochs.get_unchecked(index) }; - id::Valid(Id::zip(index as u32, epoch, A::VARIANT)) + let resource = unsafe { self.resources.get_unchecked(index) }; + resource.as_ref().unwrap().clone() }) } + /// Returns an iterator over the resources owned by `self`. + pub(super) fn drain_resources(&mut self) -> Vec> { + if !self.owned.is_empty() { + self.tracker_assert_in_bounds(self.owned.len() - 1) + }; + let mut resources = Vec::new(); + iterate_bitvec_indices(&self.owned).for_each(|index| { + let resource = unsafe { self.resources.get_unchecked(index) }; + resources.push(resource.as_ref().unwrap().clone()); + }); + self.owned.clear(); + self.resources.clear(); + resources + } + /// Returns an iterator over the indices of all resources owned by `self`. pub(super) fn owned_indices(&self) -> impl Iterator + '_ { if !self.owned.is_empty() { @@ -161,8 +169,7 @@ impl ResourceMetadata { /// Remove the resource with the given index from the set. pub(super) unsafe fn remove(&mut self, index: usize) { unsafe { - *self.ref_counts.get_unchecked_mut(index) = None; - *self.epochs.get_unchecked_mut(index) = u32::MAX; + *self.resources.get_unchecked_mut(index) = None; } self.owned.set(index, false); } @@ -172,44 +179,31 @@ impl ResourceMetadata { /// /// This is used to abstract over the various places /// trackers can get new resource metadata from. -pub(super) enum ResourceMetadataProvider<'a, A: HalApi> { +pub(super) enum ResourceMetadataProvider<'a, A: HalApi, I: TypedId, T: Resource> { /// Comes directly from explicit values. - Direct { - epoch: Epoch, - ref_count: Cow<'a, RefCount>, - }, + Direct { resource: Cow<'a, Arc> }, /// Comes from another metadata tracker. - Indirect { metadata: &'a ResourceMetadata }, - /// The epoch is given directly, but the life count comes from the resource itself. - Resource { epoch: Epoch }, + Indirect { + metadata: &'a ResourceMetadata, + }, } -impl ResourceMetadataProvider<'_, A> { +impl> ResourceMetadataProvider<'_, A, I, T> { /// Get the epoch and an owned refcount from this. /// /// # Safety /// /// - The index must be in bounds of the metadata tracker if this uses an indirect source. - /// - life_guard must be Some if this uses a Resource source. + /// - info must be Some if this uses a Resource source. #[inline(always)] - pub(super) unsafe fn get_own( - self, - life_guard: Option<&LifeGuard>, - index: usize, - ) -> (Epoch, RefCount) { + pub(super) unsafe fn get_own(self, index: usize) -> Arc { match self { - ResourceMetadataProvider::Direct { epoch, ref_count } => { - (epoch, ref_count.into_owned()) - } + ResourceMetadataProvider::Direct { resource } => resource.into_owned(), ResourceMetadataProvider::Indirect { metadata } => { metadata.tracker_assert_in_bounds(index); - (unsafe { *metadata.epochs.get_unchecked(index) }, { - let ref_count = unsafe { metadata.ref_counts.get_unchecked(index) }; - unsafe { ref_count.clone().unwrap_unchecked() } - }) - } - ResourceMetadataProvider::Resource { epoch } => { - strict_assert!(life_guard.is_some()); - (epoch, unsafe { life_guard.unwrap_unchecked() }.add_ref()) + { + let resource = unsafe { metadata.resources.get_unchecked(index) }; + unsafe { resource.clone().unwrap_unchecked() } + } } } } @@ -220,14 +214,7 @@ impl ResourceMetadataProvider<'_, A> { /// - The index must be in bounds of the metadata tracker if this uses an indirect source. #[inline(always)] pub(super) unsafe fn get_epoch(self, index: usize) -> Epoch { - match self { - ResourceMetadataProvider::Direct { epoch, .. } - | ResourceMetadataProvider::Resource { epoch, .. } => epoch, - ResourceMetadataProvider::Indirect { metadata } => { - metadata.tracker_assert_in_bounds(index); - unsafe { *metadata.epochs.get_unchecked(index) } - } - } + unsafe { self.get_own(index).as_info().id().unzip().1 } } } diff --git a/third_party/rust/wgpu-core/src/track/mod.rs b/third_party/rust/wgpu-core/src/track/mod.rs index 69e29fc59ff53..bd8d3a5580e0c 100644 --- a/third_party/rust/wgpu-core/src/track/mod.rs +++ b/third_party/rust/wgpu-core/src/track/mod.rs @@ -105,9 +105,11 @@ use crate::{ binding_model, command, conv, hal_api::HalApi, id::{self, TypedId}, - pipeline, resource, storage, + pipeline, resource, + storage::Storage, }; +use parking_lot::RwLock; use std::{fmt, ops}; use thiserror::Error; @@ -129,9 +131,11 @@ pub(crate) struct PendingTransition { pub usage: ops::Range, } +pub(crate) type PendingTransitionList = Vec>; + impl PendingTransition { /// Produce the hal barrier corresponding to the transition. - pub fn into_hal<'a, A: hal::Api>( + pub fn into_hal<'a, A: HalApi>( self, buf: &'a resource::Buffer, ) -> hal::BufferBarrier<'a, A> { @@ -145,11 +149,11 @@ impl PendingTransition { impl PendingTransition { /// Produce the hal barrier corresponding to the transition. - pub fn into_hal<'a, A: hal::Api>( + pub fn into_hal<'a, A: HalApi>( self, - tex: &'a resource::Texture, + tex: &'a resource::TextureInner, ) -> hal::TextureBarrier<'a, A> { - let texture = tex.inner.as_raw().expect("Texture is destroyed"); + let texture = tex.as_raw().expect("Texture is destroyed"); // These showing up in a barrier is always a bug strict_assert_ne!(self.usage.start, hal::TextureUses::UNKNOWN); @@ -315,11 +319,12 @@ impl fmt::Display for InvalidUse { /// /// All bind group states are sorted by their ID so that when adding to a tracker, /// they are added in the most efficient order possible (assending order). +#[derive(Debug)] pub(crate) struct BindGroupStates { pub buffers: BufferBindGroupState, pub textures: TextureBindGroupState, - pub views: StatelessBindGroupSate, id::TextureViewId>, - pub samplers: StatelessBindGroupSate, id::SamplerId>, + pub views: StatelessBindGroupSate>, + pub samplers: StatelessBindGroupSate>, } impl BindGroupStates { @@ -347,37 +352,42 @@ impl BindGroupStates { /// This is a render bundle specific usage scope. It includes stateless resources /// that are not normally included in a usage scope, but are used by render bundles /// and need to be owned by the render bundles. +#[derive(Debug)] pub(crate) struct RenderBundleScope { - pub buffers: BufferUsageScope, - pub textures: TextureUsageScope, + pub buffers: RwLock>, + pub textures: RwLock>, // Don't need to track views and samplers, they are never used directly, only by bind groups. - pub bind_groups: StatelessTracker, id::BindGroupId>, - pub render_pipelines: StatelessTracker, id::RenderPipelineId>, - pub query_sets: StatelessTracker, id::QuerySetId>, + pub bind_groups: RwLock>>, + pub render_pipelines: + RwLock>>, + pub query_sets: RwLock>>, } impl RenderBundleScope { /// Create the render bundle scope and pull the maximum IDs from the hubs. pub fn new( - buffers: &storage::Storage, id::BufferId>, - textures: &storage::Storage, id::TextureId>, - bind_groups: &storage::Storage, id::BindGroupId>, - render_pipelines: &storage::Storage, id::RenderPipelineId>, - query_sets: &storage::Storage, id::QuerySetId>, + buffers: &Storage, id::BufferId>, + textures: &Storage, id::TextureId>, + bind_groups: &Storage, id::BindGroupId>, + render_pipelines: &Storage, id::RenderPipelineId>, + query_sets: &Storage, id::QuerySetId>, ) -> Self { - let mut value = Self { - buffers: BufferUsageScope::new(), - textures: TextureUsageScope::new(), - bind_groups: StatelessTracker::new(), - render_pipelines: StatelessTracker::new(), - query_sets: StatelessTracker::new(), + let value = Self { + buffers: RwLock::new(BufferUsageScope::new()), + textures: RwLock::new(TextureUsageScope::new()), + bind_groups: RwLock::new(StatelessTracker::new()), + render_pipelines: RwLock::new(StatelessTracker::new()), + query_sets: RwLock::new(StatelessTracker::new()), }; - value.buffers.set_size(buffers.len()); - value.textures.set_size(textures.len()); - value.bind_groups.set_size(bind_groups.len()); - value.render_pipelines.set_size(render_pipelines.len()); - value.query_sets.set_size(query_sets.len()); + value.buffers.write().set_size(buffers.len()); + value.textures.write().set_size(textures.len()); + value.bind_groups.write().set_size(bind_groups.len()); + value + .render_pipelines + .write() + .set_size(render_pipelines.len()); + value.query_sets.write().set_size(query_sets.len()); value } @@ -393,13 +403,13 @@ impl RenderBundleScope { /// length of the storage given at the call to `new`. pub unsafe fn merge_bind_group( &mut self, - textures: &storage::Storage, id::TextureId>, bind_group: &BindGroupStates, ) -> Result<(), UsageConflict> { - unsafe { self.buffers.merge_bind_group(&bind_group.buffers)? }; + unsafe { self.buffers.write().merge_bind_group(&bind_group.buffers)? }; unsafe { self.textures - .merge_bind_group(textures, &bind_group.textures)? + .write() + .merge_bind_group(&bind_group.textures)? }; Ok(()) @@ -417,8 +427,8 @@ pub(crate) struct UsageScope { impl UsageScope { /// Create the render bundle scope and pull the maximum IDs from the hubs. pub fn new( - buffers: &storage::Storage, id::BufferId>, - textures: &storage::Storage, id::TextureId>, + buffers: &Storage, id::BufferId>, + textures: &Storage, id::TextureId>, ) -> Self { let mut value = Self { buffers: BufferUsageScope::new(), @@ -442,13 +452,11 @@ impl UsageScope { /// length of the storage given at the call to `new`. pub unsafe fn merge_bind_group( &mut self, - textures: &storage::Storage, id::TextureId>, bind_group: &BindGroupStates, ) -> Result<(), UsageConflict> { unsafe { self.buffers.merge_bind_group(&bind_group.buffers)?; - self.textures - .merge_bind_group(textures, &bind_group.textures)?; + self.textures.merge_bind_group(&bind_group.textures)?; } Ok(()) @@ -465,28 +473,36 @@ impl UsageScope { /// length of the storage given at the call to `new`. pub unsafe fn merge_render_bundle( &mut self, - textures: &storage::Storage, id::TextureId>, render_bundle: &RenderBundleScope, ) -> Result<(), UsageConflict> { - self.buffers.merge_usage_scope(&render_bundle.buffers)?; + self.buffers + .merge_usage_scope(&*render_bundle.buffers.read())?; self.textures - .merge_usage_scope(textures, &render_bundle.textures)?; + .merge_usage_scope(&*render_bundle.textures.read())?; Ok(()) } } +pub(crate) trait ResourceTracker +where + Id: TypedId, + R: resource::Resource, +{ + fn remove_abandoned(&mut self, id: Id, external_count: usize) -> bool; +} + /// A full double sided tracker used by CommandBuffers and the Device. pub(crate) struct Tracker { pub buffers: BufferTracker, pub textures: TextureTracker, - pub views: StatelessTracker, id::TextureViewId>, - pub samplers: StatelessTracker, id::SamplerId>, - pub bind_groups: StatelessTracker, id::BindGroupId>, - pub compute_pipelines: StatelessTracker, id::ComputePipelineId>, - pub render_pipelines: StatelessTracker, id::RenderPipelineId>, - pub bundles: StatelessTracker, id::RenderBundleId>, - pub query_sets: StatelessTracker, id::QuerySetId>, + pub views: StatelessTracker>, + pub samplers: StatelessTracker>, + pub bind_groups: StatelessTracker>, + pub compute_pipelines: StatelessTracker>, + pub render_pipelines: StatelessTracker>, + pub bundles: StatelessTracker>, + pub query_sets: StatelessTracker>, } impl Tracker { @@ -507,19 +523,15 @@ impl Tracker { /// Pull the maximum IDs from the hubs. pub fn set_size( &mut self, - buffers: Option<&storage::Storage, id::BufferId>>, - textures: Option<&storage::Storage, id::TextureId>>, - views: Option<&storage::Storage, id::TextureViewId>>, - samplers: Option<&storage::Storage, id::SamplerId>>, - bind_groups: Option<&storage::Storage, id::BindGroupId>>, - compute_pipelines: Option< - &storage::Storage, id::ComputePipelineId>, - >, - render_pipelines: Option< - &storage::Storage, id::RenderPipelineId>, - >, - bundles: Option<&storage::Storage, id::RenderBundleId>>, - query_sets: Option<&storage::Storage, id::QuerySetId>>, + buffers: Option<&Storage, id::BufferId>>, + textures: Option<&Storage, id::TextureId>>, + views: Option<&Storage, id::TextureViewId>>, + samplers: Option<&Storage, id::SamplerId>>, + bind_groups: Option<&Storage, id::BindGroupId>>, + compute_pipelines: Option<&Storage, id::ComputePipelineId>>, + render_pipelines: Option<&Storage, id::RenderPipelineId>>, + bundles: Option<&Storage, id::RenderBundleId>>, + query_sets: Option<&Storage, id::QuerySetId>>, ) { if let Some(buffers) = buffers { self.buffers.set_size(buffers.len()); @@ -574,22 +586,18 @@ impl Tracker { /// value given to `set_size` pub unsafe fn set_and_remove_from_usage_scope_sparse( &mut self, - textures: &storage::Storage, id::TextureId>, scope: &mut UsageScope, bind_group: &BindGroupStates, ) { unsafe { self.buffers.set_and_remove_from_usage_scope_sparse( &mut scope.buffers, - bind_group.buffers.used(), + bind_group.buffers.used_ids(), ) }; unsafe { - self.textures.set_and_remove_from_usage_scope_sparse( - textures, - &mut scope.textures, - &bind_group.textures, - ) + self.textures + .set_and_remove_from_usage_scope_sparse(&mut scope.textures, &bind_group.textures) }; } @@ -605,10 +613,11 @@ impl Tracker { render_bundle: &RenderBundleScope, ) -> Result<(), UsageConflict> { self.bind_groups - .add_from_tracker(&render_bundle.bind_groups); + .add_from_tracker(&*render_bundle.bind_groups.read()); self.render_pipelines - .add_from_tracker(&render_bundle.render_pipelines); - self.query_sets.add_from_tracker(&render_bundle.query_sets); + .add_from_tracker(&*render_bundle.render_pipelines.read()); + self.query_sets + .add_from_tracker(&*render_bundle.query_sets.read()); Ok(()) } diff --git a/third_party/rust/wgpu-core/src/track/stateless.rs b/third_party/rust/wgpu-core/src/track/stateless.rs index bb4206b357b04..e88c0c0c61a77 100644 --- a/third_party/rust/wgpu-core/src/track/stateless.rs +++ b/third_party/rust/wgpu-core/src/track/stateless.rs @@ -4,29 +4,26 @@ * distinction between a usage scope and a full tracker. !*/ -use std::marker::PhantomData; +use std::{marker::PhantomData, sync::Arc}; + +use parking_lot::Mutex; use crate::{ - hal_api::HalApi, - id::{TypedId, Valid}, - resource, storage, - track::ResourceMetadata, - RefCount, + hal_api::HalApi, id::TypedId, resource::Resource, storage::Storage, track::ResourceMetadata, }; -/// Stores all the resources that a bind group stores. -pub(crate) struct StatelessBindGroupSate { - resources: Vec<(Valid, RefCount)>, +use super::ResourceTracker; - _phantom: PhantomData, +/// Stores all the resources that a bind group stores. +#[derive(Debug)] +pub(crate) struct StatelessBindGroupSate> { + resources: Mutex)>>, } -impl StatelessBindGroupSate { +impl> StatelessBindGroupSate { pub fn new() -> Self { Self { - resources: Vec::new(), - - _phantom: PhantomData, + resources: Mutex::new(Vec::new()), } } @@ -34,43 +31,97 @@ impl StatelessBindGroupSate { /// /// When this list of states is merged into a tracker, the memory /// accesses will be in a constant assending order. - pub(crate) fn optimize(&mut self) { - self.resources - .sort_unstable_by_key(|&(id, _)| id.0.unzip().0); + pub(crate) fn optimize(&self) { + let mut resources = self.resources.lock(); + resources.sort_unstable_by_key(|&(id, _)| id.unzip().0); + } + + /// Returns a list of all resources tracked. May contain duplicates. + pub fn used_resources(&self) -> impl Iterator> + '_ { + let resources = self.resources.lock(); + resources + .iter() + .map(|&(_, ref resource)| resource.clone()) + .collect::>() + .into_iter() } /// Returns a list of all resources tracked. May contain duplicates. - pub fn used(&self) -> impl Iterator> + '_ { - self.resources.iter().map(|&(id, _)| id) + pub fn drain_resources(&self) -> impl Iterator> + '_ { + let mut resources = self.resources.lock(); + resources + .drain(..) + .map(|(_, r)| r) + .collect::>() + .into_iter() } /// Adds the given resource. - pub fn add_single<'a>( - &mut self, - storage: &'a storage::Storage, - id: Id, - ) -> Option<&'a T> { + pub fn add_single<'a>(&self, storage: &'a Storage, id: Id) -> Option<&'a T> { let resource = storage.get(id).ok()?; - self.resources - .push((Valid(id), resource.life_guard().add_ref())); + let mut resources = self.resources.lock(); + resources.push((id, resource.clone())); Some(resource) } } /// Stores all resource state within a command buffer or device. -pub(crate) struct StatelessTracker { - metadata: ResourceMetadata, +#[derive(Debug)] +pub(crate) struct StatelessTracker> { + metadata: ResourceMetadata, + _phantom: PhantomData, +} + +impl> ResourceTracker + for StatelessTracker +{ + /// Removes the given resource from the tracker iff we have the last reference to the + /// resource and the epoch matches. + /// + /// Returns true if the resource was removed. + /// + /// If the ID is higher than the length of internal vectors, + /// false will be returned. + fn remove_abandoned(&mut self, id: Id, external_count: usize) -> bool { + let index = id.unzip().0 as usize; + + if index > self.metadata.size() { + return false; + } + + self.tracker_assert_in_bounds(index); + + unsafe { + if self.metadata.contains_unchecked(index) { + let existing_ref_count = self.metadata.get_ref_count_unchecked(index); + //2 ref count if only in Device Tracker and suspected resource itself and already released from user + //so not appearing in Registry + let min_ref_count = 1 + external_count; + if existing_ref_count <= min_ref_count { + self.metadata.remove(index); + log::info!("{} {:?} is not tracked anymore", T::TYPE, id,); + return true; + } else { + log::info!( + "{} {:?} is still referenced from {}", + T::TYPE, + id, + existing_ref_count + ); + } + } + } - _phantom: PhantomData<(T, Id)>, + false + } } -impl StatelessTracker { +impl> StatelessTracker { pub fn new() -> Self { Self { metadata: ResourceMetadata::new(), - _phantom: PhantomData, } } @@ -95,8 +146,14 @@ impl StatelessTracker { } /// Returns a list of all resources tracked. - pub fn used(&self) -> impl Iterator> + '_ { - self.metadata.owned_ids() + pub fn used_resources(&self) -> impl Iterator> + '_ { + self.metadata.owned_resources() + } + + /// Returns a list of all resources tracked. + pub fn drain_resources(&mut self) -> impl Iterator> + '_ { + let resources = self.metadata.drain_resources(); + resources.into_iter() } /// Inserts a single resource into the resource tracker. @@ -105,8 +162,8 @@ impl StatelessTracker { /// /// If the ID is higher than the length of internal vectors, /// the vectors will be extended. A call to set_size is not needed. - pub fn insert_single(&mut self, id: Valid, ref_count: RefCount) { - let (index32, epoch, _) = id.0.unzip(); + pub fn insert_single(&mut self, id: Id, resource: Arc) { + let (index32, _epoch, _) = id.unzip(); let index = index32 as usize; self.allow_index(index); @@ -114,7 +171,7 @@ impl StatelessTracker { self.tracker_assert_in_bounds(index); unsafe { - self.metadata.insert(index, epoch, ref_count); + self.metadata.insert(index, resource); } } @@ -122,14 +179,10 @@ impl StatelessTracker { /// /// If the ID is higher than the length of internal vectors, /// the vectors will be extended. A call to set_size is not needed. - pub fn add_single<'a>( - &mut self, - storage: &'a storage::Storage, - id: Id, - ) -> Option<&'a T> { - let item = storage.get(id).ok()?; - - let (index32, epoch, _) = id.unzip(); + pub fn add_single<'a>(&mut self, storage: &'a Storage, id: Id) -> Option<&'a Arc> { + let resource = storage.get(id).ok()?; + + let (index32, _epoch, _) = id.unzip(); let index = index32 as usize; self.allow_index(index); @@ -137,11 +190,10 @@ impl StatelessTracker { self.tracker_assert_in_bounds(index); unsafe { - self.metadata - .insert(index, epoch, item.life_guard().add_ref()); + self.metadata.insert(index, resource.clone()); } - Some(item) + Some(resource) } /// Adds the given resources from the given tracker. @@ -161,43 +213,24 @@ impl StatelessTracker { let previously_owned = self.metadata.contains_unchecked(index); if !previously_owned { - let epoch = other.metadata.get_epoch_unchecked(index); - let other_ref_count = other.metadata.get_ref_count_unchecked(index); - self.metadata.insert(index, epoch, other_ref_count.clone()); + let other_resource = other.metadata.get_resource_unchecked(index); + self.metadata.insert(index, other_resource.clone()); } } } } - /// Removes the given resource from the tracker iff we have the last reference to the - /// resource and the epoch matches. - /// - /// Returns true if the resource was removed. - /// - /// If the ID is higher than the length of internal vectors, - /// false will be returned. - pub fn remove_abandoned(&mut self, id: Valid) -> bool { - let (index32, epoch, _) = id.0.unzip(); - let index = index32 as usize; - + pub fn get(&self, id: Id) -> Option<&Arc> { + let index = id.unzip().0 as usize; if index > self.metadata.size() { - return false; + return None; } - self.tracker_assert_in_bounds(index); - unsafe { if self.metadata.contains_unchecked(index) { - let existing_epoch = self.metadata.get_epoch_unchecked(index); - let existing_ref_count = self.metadata.get_ref_count_unchecked(index); - - if existing_epoch == epoch && existing_ref_count.load() == 1 { - self.metadata.remove(index); - return true; - } + return Some(self.metadata.get_resource_unchecked(index)); } } - - false + None } } diff --git a/third_party/rust/wgpu-core/src/track/texture.rs b/third_party/rust/wgpu-core/src/track/texture.rs index 8b0926adf1a6e..bda2b9f850743 100644 --- a/third_party/rust/wgpu-core/src/track/texture.rs +++ b/third_party/rust/wgpu-core/src/track/texture.rs @@ -19,25 +19,25 @@ * will treat the contents as junk. !*/ -use super::{range::RangedStates, PendingTransition}; +use super::{range::RangedStates, PendingTransition, PendingTransitionList, ResourceTracker}; use crate::{ hal_api::HalApi, - id::{TextureId, TypedId, Valid}, - resource::Texture, - storage, + id::{TextureId, TypedId}, + resource::{Resource, Texture, TextureInner}, track::{ invalid_resource_state, skip_barrier, ResourceMetadata, ResourceMetadataProvider, ResourceUses, UsageConflict, }, - LifeGuard, RefCount, }; use hal::TextureUses; use arrayvec::ArrayVec; use naga::FastHashMap; + +use parking_lot::{Mutex, RwLockReadGuard}; use wgt::{strict_assert, strict_assert_eq}; -use std::{borrow::Cow, iter, marker::PhantomData, ops::Range, vec::Drain}; +use std::{borrow::Cow, iter, marker::PhantomData, ops::Range, sync::Arc, vec::Drain}; /// Specifies a particular set of subresources in a texture. #[derive(Clone, Debug, PartialEq, Eq)] @@ -148,23 +148,22 @@ impl ComplexTextureState { } } +#[derive(Debug)] +struct TextureBindGroupStateData { + selector: Option, + texture: Arc>, + usage: TextureUses, +} + /// Stores all the textures that a bind group stores. +#[derive(Debug)] pub(crate) struct TextureBindGroupState { - textures: Vec<( - Valid, - Option, - RefCount, - TextureUses, - )>, - - _phantom: PhantomData, + textures: Mutex>>, } impl TextureBindGroupState { pub fn new() -> Self { Self { - textures: Vec::new(), - - _phantom: PhantomData, + textures: Mutex::new(Vec::new()), } } @@ -172,30 +171,35 @@ impl TextureBindGroupState { /// /// When this list of states is merged into a tracker, the memory /// accesses will be in a constant assending order. - pub(crate) fn optimize(&mut self) { - self.textures - .sort_unstable_by_key(|&(id, _, _, _)| id.0.unzip().0); + pub(crate) fn optimize(&self) { + let mut textures = self.textures.lock(); + textures.sort_unstable_by_key(|v| v.texture.as_info().id().unzip().0); } - /// Returns a list of all buffers tracked. May contain duplicates. - pub fn used(&self) -> impl Iterator> + '_ { - self.textures.iter().map(|&(id, _, _, _)| id) + /// Returns a list of all textures tracked. May contain duplicates. + pub fn drain_resources(&self) -> impl Iterator>> + '_ { + let mut textures = self.textures.lock(); + textures + .drain(..) + .map(|v| v.texture) + .collect::>() + .into_iter() } /// Adds the given resource with the given state. pub fn add_single<'a>( - &mut self, - storage: &'a storage::Storage, TextureId>, - id: TextureId, - ref_count: RefCount, + &self, + texture: &'a Arc>, selector: Option, state: TextureUses, - ) -> Option<&'a Texture> { - let value = storage.get(id).ok()?; - - self.textures.push((Valid(id), selector, ref_count, state)); - - Some(value) + ) -> Option<&'a Arc>> { + let mut textures = self.textures.lock(); + textures.push(TextureBindGroupStateData { + selector, + texture: texture.clone(), + usage: state, + }); + Some(texture) } } @@ -203,7 +207,7 @@ impl TextureBindGroupState { #[derive(Debug)] pub(crate) struct TextureStateSet { simple: Vec, - complex: FastHashMap, + complex: FastHashMap, } impl TextureStateSet { fn new() -> Self { @@ -213,6 +217,11 @@ impl TextureStateSet { } } + fn clear(&mut self) { + self.simple.clear(); + self.complex.clear(); + } + fn set_size(&mut self, size: usize) { self.simple.resize(size, TextureUses::UNINITIALIZED); } @@ -222,8 +231,7 @@ impl TextureStateSet { #[derive(Debug)] pub(crate) struct TextureUsageScope { set: TextureStateSet, - - metadata: ResourceMetadata, + metadata: ResourceMetadata>, } impl TextureUsageScope { @@ -243,7 +251,7 @@ impl TextureUsageScope { strict_assert!(if self.metadata.contains(index) && self.set.simple[index] == TextureUses::COMPLEX { - self.set.complex.contains_key(&(index as u32)) + self.set.complex.contains_key(&index) } else { true }); @@ -258,9 +266,11 @@ impl TextureUsageScope { self.metadata.set_size(size); } - /// Returns a list of all textures tracked. - pub fn used(&self) -> impl Iterator> + '_ { - self.metadata.owned_ids() + /// Drains all textures tracked. + pub(crate) fn drain_resources(&mut self) -> impl Iterator>> + '_ { + let resources = self.metadata.drain_resources(); + self.set.clear(); + resources.into_iter() } /// Returns true if the tracker owns no resources. @@ -277,29 +287,23 @@ impl TextureUsageScope { /// /// If the given tracker uses IDs higher than the length of internal vectors, /// the vectors will be extended. A call to set_size is not needed. - pub fn merge_usage_scope( - &mut self, - storage: &storage::Storage, TextureId>, - scope: &Self, - ) -> Result<(), UsageConflict> { + pub fn merge_usage_scope(&mut self, scope: &Self) -> Result<(), UsageConflict> { let incoming_size = scope.set.simple.len(); if incoming_size > self.set.simple.len() { self.set_size(incoming_size); } for index in scope.metadata.owned_indices() { - let index32 = index as u32; - self.tracker_assert_in_bounds(index); scope.tracker_assert_in_bounds(index); - let texture_data = unsafe { texture_data_from_texture(storage, index32) }; + let texture_selector = + unsafe { &scope.metadata.get_resource_unchecked(index).full_range }; unsafe { insert_or_merge( - texture_data, + texture_selector, &mut self.set, &mut self.metadata, - index32, index, TextureStateProvider::TextureSet { set: &scope.set }, ResourceMetadataProvider::Indirect { @@ -326,11 +330,11 @@ impl TextureUsageScope { /// method is called. pub unsafe fn merge_bind_group( &mut self, - storage: &storage::Storage, TextureId>, bind_group: &TextureBindGroupState, ) -> Result<(), UsageConflict> { - for &(id, ref selector, ref ref_count, state) in &bind_group.textures { - unsafe { self.merge_single(storage, id, selector.clone(), ref_count, state)? }; + let textures = bind_group.textures.lock(); + for t in &*textures { + unsafe { self.merge_single(&t.texture, t.selector.clone(), t.usage)? }; } Ok(()) @@ -351,29 +355,24 @@ impl TextureUsageScope { /// method is called. pub unsafe fn merge_single( &mut self, - storage: &storage::Storage, TextureId>, - id: Valid, + texture: &Arc>, selector: Option, - ref_count: &RefCount, new_state: TextureUses, ) -> Result<(), UsageConflict> { - let (index32, epoch, _) = id.0.unzip(); - let index = index32 as usize; + let index = texture.as_info().id().unzip().0 as usize; self.tracker_assert_in_bounds(index); - let texture_data = unsafe { texture_data_from_texture(storage, index32) }; + let texture_selector = &texture.full_range; unsafe { insert_or_merge( - texture_data, + texture_selector, &mut self.set, &mut self.metadata, - index32, index, TextureStateProvider::from_option(selector, new_state), ResourceMetadataProvider::Direct { - epoch, - ref_count: Cow::Borrowed(ref_count), + resource: Cow::Borrowed(texture), }, )? }; @@ -387,12 +386,56 @@ pub(crate) struct TextureTracker { start_set: TextureStateSet, end_set: TextureStateSet, - metadata: ResourceMetadata, + metadata: ResourceMetadata>, temp: Vec>, _phantom: PhantomData, } + +impl ResourceTracker> for TextureTracker { + /// Removes the given resource from the tracker iff we have the last reference to the + /// resource and the epoch matches. + /// + /// Returns true if the resource was removed. + /// + /// If the ID is higher than the length of internal vectors, + /// false will be returned. + fn remove_abandoned(&mut self, id: TextureId, external_count: usize) -> bool { + let index = id.unzip().0 as usize; + + if index > self.metadata.size() { + return false; + } + + self.tracker_assert_in_bounds(index); + + unsafe { + if self.metadata.contains_unchecked(index) { + let existing_ref_count = self.metadata.get_ref_count_unchecked(index); + //2 ref count if only in Device Tracker and suspected resource itself and already released from user + //so not appearing in Registry + let min_ref_count = 1 + external_count; + if existing_ref_count <= min_ref_count { + self.start_set.complex.remove(&index); + self.end_set.complex.remove(&index); + self.metadata.remove(index); + log::info!("Texture {:?} is not tracked anymore", id,); + return true; + } else { + log::info!( + "Texture {:?} is still referenced from {}", + id, + existing_ref_count + ); + } + } + } + + false + } +} + impl TextureTracker { pub fn new() -> Self { Self { @@ -416,14 +459,14 @@ impl TextureTracker { strict_assert!(if self.metadata.contains(index) && self.start_set.simple[index] == TextureUses::COMPLEX { - self.start_set.complex.contains_key(&(index as u32)) + self.start_set.complex.contains_key(&index) } else { true }); strict_assert!(if self.metadata.contains(index) && self.end_set.simple[index] == TextureUses::COMPLEX { - self.end_set.complex.contains_key(&(index as u32)) + self.end_set.complex.contains_key(&index) } else { true }); @@ -448,30 +491,28 @@ impl TextureTracker { } /// Returns a list of all textures tracked. - pub fn used(&self) -> impl Iterator> + '_ { - self.metadata.owned_ids() + pub fn used_resources(&self) -> impl Iterator>> + '_ { + self.metadata.owned_resources() } - /// Drains all currently pending transitions. - pub fn drain(&mut self) -> Drain> { - self.temp.drain(..) - } - - /// Get the refcount of the given resource. - /// - /// # Safety - /// - /// [`Self::set_size`] must be called with the maximum possible Buffer ID before this - /// method is called. - /// - /// The resource must be tracked by this tracker. - pub unsafe fn get_ref_count(&self, id: Valid) -> &RefCount { - let (index32, _, _) = id.0.unzip(); - let index = index32 as usize; - - self.tracker_assert_in_bounds(index); - - unsafe { self.metadata.get_ref_count_unchecked(index) } + /// Drain all currently pending transitions. + pub fn drain_transitions<'a>( + &'a mut self, + ) -> ( + PendingTransitionList, + Vec>>>, + ) { + let mut textures = Vec::new(); + let transitions = self + .temp + .drain(..) + .map(|pending| { + let tex = unsafe { self.metadata.get_resource_unchecked(pending.id as _) }; + textures.push(tex.inner()); + pending + }) + .collect(); + (transitions, textures) } /// Inserts a single texture and a state into the resource tracker. @@ -480,9 +521,8 @@ impl TextureTracker { /// /// If the ID is higher than the length of internal vectors, /// the vectors will be extended. A call to set_size is not needed. - pub fn insert_single(&mut self, id: TextureId, ref_count: RefCount, usage: TextureUses) { - let (index32, epoch, _) = id.unzip(); - let index = index32 as usize; + pub fn insert_single(&mut self, id: TextureId, resource: Arc>, usage: TextureUses) { + let index = id.unzip().0 as usize; self.allow_index(index); @@ -500,13 +540,11 @@ impl TextureTracker { Some(&mut self.start_set), &mut self.end_set, &mut self.metadata, - index32, index, TextureStateProvider::KnownSingle { state: usage }, None, ResourceMetadataProvider::Direct { - epoch, - ref_count: Cow::Owned(ref_count), + resource: Cow::Owned(resource), }, ) }; @@ -521,13 +559,11 @@ impl TextureTracker { /// the vectors will be extended. A call to set_size is not needed. pub fn set_single( &mut self, - texture: &Texture, - id: TextureId, + texture: &Arc>, selector: TextureSelector, new_state: TextureUses, ) -> Option>> { - let (index32, epoch, _) = id.unzip(); - let index = index32 as usize; + let index = texture.as_info().id().unzip().0 as usize; self.allow_index(index); @@ -535,18 +571,19 @@ impl TextureTracker { unsafe { insert_or_barrier_update( - (&texture.life_guard, &texture.full_range), + &texture.full_range, Some(&mut self.start_set), &mut self.end_set, &mut self.metadata, - index32, index, TextureStateProvider::Selector { selector, state: new_state, }, None, - ResourceMetadataProvider::Resource { epoch }, + ResourceMetadataProvider::Direct { + resource: Cow::Owned(texture.clone()), + }, &mut self.temp, ) } @@ -558,32 +595,26 @@ impl TextureTracker { /// /// If a transition is needed to get the texture into the needed state, /// those transitions are stored within the tracker. A subsequent - /// call to [`Self::drain`] is needed to get those transitions. + /// call to [`Self::drain_transitions`] is needed to get those transitions. /// /// If the ID is higher than the length of internal vectors, /// the vectors will be extended. A call to set_size is not needed. - pub fn set_from_tracker( - &mut self, - storage: &storage::Storage, TextureId>, - tracker: &Self, - ) { + pub fn set_from_tracker(&mut self, tracker: &Self) { let incoming_size = tracker.start_set.simple.len(); if incoming_size > self.start_set.simple.len() { self.set_size(incoming_size); } for index in tracker.metadata.owned_indices() { - let index32 = index as u32; - self.tracker_assert_in_bounds(index); tracker.tracker_assert_in_bounds(index); unsafe { + let texture_selector = &tracker.metadata.get_resource_unchecked(index).full_range; insert_or_barrier_update( - texture_data_from_texture(storage, index32), + texture_selector, Some(&mut self.start_set), &mut self.end_set, &mut self.metadata, - index32, index, TextureStateProvider::TextureSet { set: &tracker.start_set, @@ -604,32 +635,26 @@ impl TextureTracker { /// /// If a transition is needed to get the textures into the needed state, /// those transitions are stored within the tracker. A subsequent - /// call to [`Self::drain`] is needed to get those transitions. + /// call to [`Self::drain_transitions`] is needed to get those transitions. /// /// If the ID is higher than the length of internal vectors, /// the vectors will be extended. A call to set_size is not needed. - pub fn set_from_usage_scope( - &mut self, - storage: &storage::Storage, TextureId>, - scope: &TextureUsageScope, - ) { + pub fn set_from_usage_scope(&mut self, scope: &TextureUsageScope) { let incoming_size = scope.set.simple.len(); if incoming_size > self.start_set.simple.len() { self.set_size(incoming_size); } for index in scope.metadata.owned_indices() { - let index32 = index as u32; - self.tracker_assert_in_bounds(index); scope.tracker_assert_in_bounds(index); unsafe { + let texture_selector = &scope.metadata.get_resource_unchecked(index).full_range; insert_or_barrier_update( - texture_data_from_texture(storage, index32), + texture_selector, Some(&mut self.start_set), &mut self.end_set, &mut self.metadata, - index32, index, TextureStateProvider::TextureSet { set: &scope.set }, None, @@ -648,7 +673,7 @@ impl TextureTracker { /// /// If a transition is needed to get the textures into the needed state, /// those transitions are stored within the tracker. A subsequent - /// call to [`Self::drain`] is needed to get those transitions. + /// call to [`Self::drain_transitions`] is needed to get those transitions. /// /// This is a really funky method used by Compute Passes to generate /// barriers after a call to dispatch without needing to iterate @@ -662,7 +687,6 @@ impl TextureTracker { /// method is called. pub unsafe fn set_and_remove_from_usage_scope_sparse( &mut self, - storage: &storage::Storage, TextureId>, scope: &mut TextureUsageScope, bind_group_state: &TextureBindGroupState, ) { @@ -671,22 +695,21 @@ impl TextureTracker { self.set_size(incoming_size); } - for &(id, _, _, _) in bind_group_state.textures.iter() { - let (index32, _, _) = id.0.unzip(); - let index = index32 as usize; + let textures = bind_group_state.textures.lock(); + for t in textures.iter() { + let index = t.texture.as_info().id().unzip().0 as usize; scope.tracker_assert_in_bounds(index); if unsafe { !scope.metadata.contains_unchecked(index) } { continue; } - let texture_data = unsafe { texture_data_from_texture(storage, index32) }; + let texture_selector = &t.texture.full_range; unsafe { insert_or_barrier_update( - texture_data, + texture_selector, Some(&mut self.start_set), &mut self.end_set, &mut self.metadata, - index32, index, TextureStateProvider::TextureSet { set: &scope.set }, None, @@ -707,9 +730,8 @@ impl TextureTracker { /// /// If the ID is higher than the length of internal vectors, /// false will be returned. - pub fn remove(&mut self, id: Valid) -> bool { - let (index32, epoch, _) = id.0.unzip(); - let index = index32 as usize; + pub fn remove(&mut self, id: TextureId) -> bool { + let index = id.unzip().0 as usize; if index > self.metadata.size() { return false; @@ -719,56 +741,15 @@ impl TextureTracker { unsafe { if self.metadata.contains_unchecked(index) { - let existing_epoch = self.metadata.get_epoch_unchecked(index); - assert_eq!(existing_epoch, epoch); - - self.start_set.complex.remove(&index32); - self.end_set.complex.remove(&index32); - + self.start_set.complex.remove(&index); + self.end_set.complex.remove(&index); self.metadata.remove(index); - return true; } } false } - - /// Removes the given resource from the tracker iff we have the last reference to the - /// resource and the epoch matches. - /// - /// Returns true if the resource was removed. - /// - /// If the ID is higher than the length of internal vectors, - /// false will be returned. - pub fn remove_abandoned(&mut self, id: Valid) -> bool { - let (index32, epoch, _) = id.0.unzip(); - let index = index32 as usize; - - if index > self.metadata.size() { - return false; - } - - self.tracker_assert_in_bounds(index); - - unsafe { - if self.metadata.contains_unchecked(index) { - let existing_epoch = self.metadata.get_epoch_unchecked(index); - let existing_ref_count = self.metadata.get_ref_count_unchecked(index); - - if existing_epoch == epoch && existing_ref_count.load() == 1 { - self.start_set.complex.remove(&index32); - self.end_set.complex.remove(&index32); - - self.metadata.remove(index); - - return true; - } - } - } - - false - } } /// An iterator adapter that can store two different iterator types. @@ -828,7 +809,7 @@ impl<'a> TextureStateProvider<'a> { /// /// # Panics /// - /// Panics if texture_data is None and this uses a Selector source. + /// Panics if texture_selector is None and this uses a Selector source. /// /// # Safety /// @@ -836,8 +817,7 @@ impl<'a> TextureStateProvider<'a> { #[inline(always)] unsafe fn get_state( self, - texture_data: Option<(&LifeGuard, &TextureSelector)>, - index32: u32, + texture_selector: Option<&TextureSelector>, index: usize, ) -> SingleOrManyStates< TextureUses, @@ -850,7 +830,7 @@ impl<'a> TextureStateProvider<'a> { // and if it is we promote to a simple state. This allows upstream // code to specify selectors willy nilly, and all that are really // single states are promoted here. - if *texture_data.unwrap().1 == selector { + if *texture_selector.unwrap() == selector { SingleOrManyStates::Single(state) } else { SingleOrManyStates::Many(EitherIter::Left(iter::once((selector, state)))) @@ -860,7 +840,7 @@ impl<'a> TextureStateProvider<'a> { let new_state = *unsafe { set.simple.get_unchecked(index) }; if new_state == TextureUses::COMPLEX { - let new_complex = unsafe { set.complex.get(&index32).unwrap_unchecked() }; + let new_complex = unsafe { set.complex.get(&index).unwrap_unchecked() }; SingleOrManyStates::Many(EitherIter::Right( new_complex.to_selector_state_iter(), @@ -873,17 +853,6 @@ impl<'a> TextureStateProvider<'a> { } } -/// Helper function that gets what is needed from the texture storage -/// out of the texture storage. -#[inline(always)] -unsafe fn texture_data_from_texture( - storage: &storage::Storage, TextureId>, - index32: u32, -) -> (&LifeGuard, &TextureSelector) { - let texture = unsafe { storage.get_unchecked(index32) }; - (&texture.life_guard, &texture.full_range) -} - /// Does an insertion operation if the index isn't tracked /// in the current metadata, otherwise merges the given state /// with the current state. If the merging would cause @@ -895,24 +864,22 @@ unsafe fn texture_data_from_texture( /// to this function, either directly or via metadata or provider structs. #[inline(always)] unsafe fn insert_or_merge( - texture_data: (&LifeGuard, &TextureSelector), + texture_selector: &TextureSelector, current_state_set: &mut TextureStateSet, - resource_metadata: &mut ResourceMetadata, - index32: u32, + resource_metadata: &mut ResourceMetadata>, index: usize, state_provider: TextureStateProvider<'_>, - metadata_provider: ResourceMetadataProvider<'_, A>, + metadata_provider: ResourceMetadataProvider<'_, A, TextureId, Texture>, ) -> Result<(), UsageConflict> { let currently_owned = unsafe { resource_metadata.contains_unchecked(index) }; if !currently_owned { unsafe { insert( - Some(texture_data), + Some(texture_selector), None, current_state_set, resource_metadata, - index32, index, state_provider, None, @@ -924,9 +891,8 @@ unsafe fn insert_or_merge( unsafe { merge( - texture_data, + texture_selector, current_state_set, - index32, index, state_provider, metadata_provider, @@ -953,15 +919,14 @@ unsafe fn insert_or_merge( /// to this function, either directly or via metadata or provider structs. #[inline(always)] unsafe fn insert_or_barrier_update( - texture_data: (&LifeGuard, &TextureSelector), + texture_selector: &TextureSelector, start_state: Option<&mut TextureStateSet>, current_state_set: &mut TextureStateSet, - resource_metadata: &mut ResourceMetadata, - index32: u32, + resource_metadata: &mut ResourceMetadata>, index: usize, start_state_provider: TextureStateProvider<'_>, end_state_provider: Option>, - metadata_provider: ResourceMetadataProvider<'_, A>, + metadata_provider: ResourceMetadataProvider<'_, A, TextureId, Texture>, barriers: &mut Vec>, ) { let currently_owned = unsafe { resource_metadata.contains_unchecked(index) }; @@ -969,11 +934,10 @@ unsafe fn insert_or_barrier_update( if !currently_owned { unsafe { insert( - Some(texture_data), + Some(texture_selector), start_state, current_state_set, resource_metadata, - index32, index, start_state_provider, end_state_provider, @@ -986,9 +950,8 @@ unsafe fn insert_or_barrier_update( let update_state_provider = end_state_provider.unwrap_or_else(|| start_state_provider.clone()); unsafe { barrier( - texture_data, + texture_selector, current_state_set, - index32, index, start_state_provider, barriers, @@ -998,10 +961,9 @@ unsafe fn insert_or_barrier_update( let start_state_set = start_state.unwrap(); unsafe { update( - texture_data, + texture_selector, start_state_set, current_state_set, - index32, index, update_state_provider, ) @@ -1010,24 +972,23 @@ unsafe fn insert_or_barrier_update( #[inline(always)] unsafe fn insert( - texture_data: Option<(&LifeGuard, &TextureSelector)>, + texture_selector: Option<&TextureSelector>, start_state: Option<&mut TextureStateSet>, end_state: &mut TextureStateSet, - resource_metadata: &mut ResourceMetadata, - index32: u32, + resource_metadata: &mut ResourceMetadata>, index: usize, start_state_provider: TextureStateProvider<'_>, end_state_provider: Option>, - metadata_provider: ResourceMetadataProvider<'_, A>, + metadata_provider: ResourceMetadataProvider<'_, A, TextureId, Texture>, ) { - let start_layers = unsafe { start_state_provider.get_state(texture_data, index32, index) }; + let start_layers = unsafe { start_state_provider.get_state(texture_selector, index) }; match start_layers { SingleOrManyStates::Single(state) => { // This should only ever happen with a wgpu bug, but let's just double // check that resource states don't have any conflicts. strict_assert_eq!(invalid_resource_state(state), false); - log::trace!("\ttex {index32}: insert start {state:?}"); + log::trace!("\ttex {index}: insert start {state:?}"); if let Some(start_state) = start_state { unsafe { *start_state.simple.get_unchecked_mut(index) = state }; @@ -1039,100 +1000,95 @@ unsafe fn insert( } } SingleOrManyStates::Many(state_iter) => { - let full_range = texture_data.unwrap().1.clone(); + let full_range = texture_selector.unwrap().clone(); let complex = unsafe { ComplexTextureState::from_selector_state_iter(full_range, state_iter) }; - log::trace!("\ttex {index32}: insert start {complex:?}"); + log::trace!("\ttex {index}: insert start {complex:?}"); if let Some(start_state) = start_state { unsafe { *start_state.simple.get_unchecked_mut(index) = TextureUses::COMPLEX }; - start_state.complex.insert(index32, complex.clone()); + start_state.complex.insert(index, complex.clone()); } // We only need to insert ourselves the end state if there is no end state provider. if end_state_provider.is_none() { unsafe { *end_state.simple.get_unchecked_mut(index) = TextureUses::COMPLEX }; - end_state.complex.insert(index32, complex); + end_state.complex.insert(index, complex); } } } if let Some(end_state_provider) = end_state_provider { - match unsafe { end_state_provider.get_state(texture_data, index32, index) } { + match unsafe { end_state_provider.get_state(texture_selector, index) } { SingleOrManyStates::Single(state) => { // This should only ever happen with a wgpu bug, but let's just double // check that resource states don't have any conflicts. strict_assert_eq!(invalid_resource_state(state), false); - log::trace!("\ttex {index32}: insert end {state:?}"); + log::trace!("\ttex {index}: insert end {state:?}"); // We only need to insert into the end, as there is guarenteed to be // a start state provider. unsafe { *end_state.simple.get_unchecked_mut(index) = state }; } SingleOrManyStates::Many(state_iter) => { - let full_range = texture_data.unwrap().1.clone(); + let full_range = texture_selector.unwrap().clone(); let complex = unsafe { ComplexTextureState::from_selector_state_iter(full_range, state_iter) }; - log::trace!("\ttex {index32}: insert end {complex:?}"); + log::trace!("\ttex {index}: insert end {complex:?}"); // We only need to insert into the end, as there is guarenteed to be // a start state provider. unsafe { *end_state.simple.get_unchecked_mut(index) = TextureUses::COMPLEX }; - end_state.complex.insert(index32, complex); + end_state.complex.insert(index, complex); } } } unsafe { - let (epoch, ref_count) = - metadata_provider.get_own(texture_data.map(|(life_guard, _)| life_guard), index); - resource_metadata.insert(index, epoch, ref_count); + let resource = metadata_provider.get_own(index); + resource_metadata.insert(index, resource); } } #[inline(always)] unsafe fn merge( - texture_data: (&LifeGuard, &TextureSelector), + texture_selector: &TextureSelector, current_state_set: &mut TextureStateSet, - index32: u32, index: usize, state_provider: TextureStateProvider<'_>, - metadata_provider: ResourceMetadataProvider<'_, A>, + metadata_provider: ResourceMetadataProvider<'_, A, TextureId, Texture>, ) -> Result<(), UsageConflict> { let current_simple = unsafe { current_state_set.simple.get_unchecked_mut(index) }; let current_state = if *current_simple == TextureUses::COMPLEX { SingleOrManyStates::Many(unsafe { - current_state_set - .complex - .get_mut(&index32) - .unwrap_unchecked() + current_state_set.complex.get_mut(&index).unwrap_unchecked() }) } else { SingleOrManyStates::Single(current_simple) }; - let new_state = unsafe { state_provider.get_state(Some(texture_data), index32, index) }; + let new_state = unsafe { state_provider.get_state(Some(texture_selector), index) }; match (current_state, new_state) { (SingleOrManyStates::Single(current_simple), SingleOrManyStates::Single(new_simple)) => { let merged_state = *current_simple | new_simple; - log::trace!("\ttex {index32}: merge simple {current_simple:?} + {new_simple:?}"); + log::trace!("\ttex {index}: merge simple {current_simple:?} + {new_simple:?}"); if invalid_resource_state(merged_state) { return Err(UsageConflict::from_texture( TextureId::zip( - index32, + index as _, unsafe { metadata_provider.get_epoch(index) }, A::VARIANT, ), - texture_data.1.clone(), + texture_selector.clone(), *current_simple, new_simple, )); @@ -1146,22 +1102,20 @@ unsafe fn merge( // as there wasn't one before. let mut new_complex = unsafe { ComplexTextureState::from_selector_state_iter( - texture_data.1.clone(), - iter::once((texture_data.1.clone(), *current_simple)), + texture_selector.clone(), + iter::once((texture_selector.clone(), *current_simple)), ) }; for (selector, new_state) in new_many { let merged_state = *current_simple | new_state; - log::trace!( - "\ttex {index32}: merge {selector:?} {current_simple:?} + {new_state:?}" - ); + log::trace!("\ttex {index}: merge {selector:?} {current_simple:?} + {new_state:?}"); if invalid_resource_state(merged_state) { return Err(UsageConflict::from_texture( TextureId::zip( - index32, + index as _, unsafe { metadata_provider.get_epoch(index) }, A::VARIANT, ), @@ -1185,7 +1139,7 @@ unsafe fn merge( } *current_simple = TextureUses::COMPLEX; - current_state_set.complex.insert(index32, new_complex); + current_state_set.complex.insert(index, new_complex); } (SingleOrManyStates::Many(current_complex), SingleOrManyStates::Single(new_simple)) => { for (mip_id, mip) in current_complex.mips.iter_mut().enumerate() { @@ -1199,14 +1153,14 @@ unsafe fn merge( let merged_state = merged_state - TextureUses::UNKNOWN; log::trace!( - "\ttex {index32}: merge mip {mip_id} layers {layers:?} \ + "\ttex {index}: merge mip {mip_id} layers {layers:?} \ {current_layer_state:?} + {new_simple:?}" ); if invalid_resource_state(merged_state) { return Err(UsageConflict::from_texture( TextureId::zip( - index32, + index as _, unsafe { metadata_provider.get_epoch(index) }, A::VARIANT, ), @@ -1244,14 +1198,14 @@ unsafe fn merge( } log::trace!( - "\ttex {index32}: merge mip {mip_id} layers {layers:?} \ + "\ttex {index}: merge mip {mip_id} layers {layers:?} \ {current_layer_state:?} + {new_state:?}" ); if invalid_resource_state(merged_state) { return Err(UsageConflict::from_texture( TextureId::zip( - index32, + index as _, unsafe { metadata_provider.get_epoch(index) }, A::VARIANT, ), @@ -1276,9 +1230,8 @@ unsafe fn merge( #[inline(always)] unsafe fn barrier( - texture_data: (&LifeGuard, &TextureSelector), + texture_selector: &TextureSelector, current_state_set: &TextureStateSet, - index32: u32, index: usize, state_provider: TextureStateProvider<'_>, barriers: &mut Vec>, @@ -1286,13 +1239,13 @@ unsafe fn barrier( let current_simple = unsafe { *current_state_set.simple.get_unchecked(index) }; let current_state = if current_simple == TextureUses::COMPLEX { SingleOrManyStates::Many(unsafe { - current_state_set.complex.get(&index32).unwrap_unchecked() + current_state_set.complex.get(&index).unwrap_unchecked() }) } else { SingleOrManyStates::Single(current_simple) }; - let new_state = unsafe { state_provider.get_state(Some(texture_data), index32, index) }; + let new_state = unsafe { state_provider.get_state(Some(texture_selector), index) }; match (current_state, new_state) { (SingleOrManyStates::Single(current_simple), SingleOrManyStates::Single(new_simple)) => { @@ -1300,11 +1253,11 @@ unsafe fn barrier( return; } - log::trace!("\ttex {index32}: transition simple {current_simple:?} -> {new_simple:?}"); + log::trace!("\ttex {index}: transition simple {current_simple:?} -> {new_simple:?}"); barriers.push(PendingTransition { - id: index32, - selector: texture_data.1.clone(), + id: index as _, + selector: texture_selector.clone(), usage: current_simple..new_simple, }); } @@ -1319,11 +1272,11 @@ unsafe fn barrier( } log::trace!( - "\ttex {index32}: transition {selector:?} {current_simple:?} -> {new_state:?}" + "\ttex {index}: transition {selector:?} {current_simple:?} -> {new_state:?}" ); barriers.push(PendingTransition { - id: index32, + id: index as _, selector, usage: current_simple..new_state, }); @@ -1343,12 +1296,12 @@ unsafe fn barrier( } log::trace!( - "\ttex {index32}: transition mip {mip_id} layers {layers:?} \ + "\ttex {index}: transition mip {mip_id} layers {layers:?} \ {current_layer_state:?} -> {new_simple:?}" ); barriers.push(PendingTransition { - id: index32, + id: index as _, selector: TextureSelector { mips: mip_id..mip_id + 1, layers: layers.clone(), @@ -1377,12 +1330,12 @@ unsafe fn barrier( } log::trace!( - "\ttex {index32}: transition mip {mip_id} layers {layers:?} \ + "\ttex {index}: transition mip {mip_id} layers {layers:?} \ {current_layer_state:?} -> {new_state:?}" ); barriers.push(PendingTransition { - id: index32, + id: index as _, selector: TextureSelector { mips: mip_id..mip_id + 1, layers, @@ -1399,10 +1352,9 @@ unsafe fn barrier( #[allow(clippy::needless_option_as_deref)] // we use this for reborrowing Option<&mut T> #[inline(always)] unsafe fn update( - texture_data: (&LifeGuard, &TextureSelector), + texture_selector: &TextureSelector, start_state_set: &mut TextureStateSet, current_state_set: &mut TextureStateSet, - index32: u32, index: usize, state_provider: TextureStateProvider<'_>, ) { @@ -1413,23 +1365,19 @@ unsafe fn update( // If the state is simple, the first insert to the tracker would cover it. let mut start_complex = None; if start_simple == TextureUses::COMPLEX { - start_complex = - Some(unsafe { start_state_set.complex.get_mut(&index32).unwrap_unchecked() }); + start_complex = Some(unsafe { start_state_set.complex.get_mut(&index).unwrap_unchecked() }); } let current_simple = unsafe { current_state_set.simple.get_unchecked_mut(index) }; let current_state = if *current_simple == TextureUses::COMPLEX { SingleOrManyStates::Many(unsafe { - current_state_set - .complex - .get_mut(&index32) - .unwrap_unchecked() + current_state_set.complex.get_mut(&index).unwrap_unchecked() }) } else { SingleOrManyStates::Single(current_simple) }; - let new_state = unsafe { state_provider.get_state(Some(texture_data), index32, index) }; + let new_state = unsafe { state_provider.get_state(Some(texture_selector), index) }; match (current_state, new_state) { (SingleOrManyStates::Single(current_simple), SingleOrManyStates::Single(new_simple)) => { @@ -1441,8 +1389,8 @@ unsafe fn update( // as there wasn't one before. let mut new_complex = unsafe { ComplexTextureState::from_selector_state_iter( - texture_data.1.clone(), - iter::once((texture_data.1.clone(), *current_simple)), + texture_selector.clone(), + iter::once((texture_selector.clone(), *current_simple)), ) }; @@ -1464,7 +1412,7 @@ unsafe fn update( } *current_simple = TextureUses::COMPLEX; - current_state_set.complex.insert(index32, new_complex); + current_state_set.complex.insert(index, new_complex); } (SingleOrManyStates::Many(current_complex), SingleOrManyStates::Single(new_single)) => { for (mip_id, mip) in current_complex.mips.iter().enumerate() { @@ -1490,12 +1438,7 @@ unsafe fn update( } unsafe { *current_state_set.simple.get_unchecked_mut(index) = new_single }; - unsafe { - current_state_set - .complex - .remove(&index32) - .unwrap_unchecked() - }; + unsafe { current_state_set.complex.remove(&index).unwrap_unchecked() }; } (SingleOrManyStates::Many(current_complex), SingleOrManyStates::Many(new_many)) => { for (selector, new_state) in new_many { diff --git a/third_party/rust/wgpu-hal/.cargo-checksum.json b/third_party/rust/wgpu-hal/.cargo-checksum.json index e65e84c8843e2..2e1af1a789db7 100644 --- a/third_party/rust/wgpu-hal/.cargo-checksum.json +++ b/third_party/rust/wgpu-hal/.cargo-checksum.json @@ -1 +1 @@ -{"files":{"Cargo.toml":"af8f5b97c66922b8f68e5214102cc79befc3a3a86984a611a3bdc01fae168523","LICENSE.APACHE":"a6cba85bc92e0cff7a450b1d873c0eaa2e9fc96bf472df0247a26bec77bf3ff9","LICENSE.MIT":"c7fea58d1cfe49634cd92e54fc10a9d871f4b275321a4cd8c09e449122caaeb4","README.md":"78377f5876fafd77963eff7e3c2ba3a7e3ad5cf9201b09ed5612e49c2288eb18","examples/halmark/main.rs":"216efbf93177cc633b5a7e598a0570df760a41bab01c29b42e1cee4a863a9a5e","examples/halmark/shader.wgsl":"26c256ec36d6f0e9a1647431ca772766bee4382d64eaa718ba7b488dcfb6bcca","examples/raw-gles.em.html":"70fbe68394a1a4522192de1dcfaf7d399f60d7bdf5de70b708f9bb0417427546","examples/raw-gles.rs":"d2b853152d8d84591f8ebef8b3739e506e370d8fef4582e00b31128784c0395c","src/auxil/dxgi/conv.rs":"536a6ff638ce32bdbf7cd697270f563c14f2c8cc013bb0e9565e06dc5127730a","src/auxil/dxgi/exception.rs":"baad31f4529f1cf9d10519c4cceccbb21ed9bba7d7ea99e4789c1d5ddf3a450b","src/auxil/dxgi/factory.rs":"b66a77645ede6b32d709cef23ba258323f59d4f996dc72edbdae27d930795fbe","src/auxil/dxgi/mod.rs":"a202564d9ac97530b16a234b87d180cd345aae705e082a9b1177dcde813645f9","src/auxil/dxgi/result.rs":"20c8eb03d738062dff198feca6327addb9882ed0462be842c789eadf7dca0573","src/auxil/dxgi/time.rs":"b6f966b250e9424d5d7e4065f2108cba87197c1e30baae6d87083055d1bc5a4b","src/auxil/mod.rs":"c38f0d3b10804d1c1d7e3b8e4a975fcb87271f8b1904f2f4a3153cceddafb56b","src/auxil/renderdoc.rs":"c2f849f70f576b0c9b0d32dd155b6a6353f74dff59cbeeaa994a12789d047c0f","src/dx11/adapter.rs":"cacb8243f9107f63bc3247a76b3d28adc3b87d79ffee0af90214a483ab51285b","src/dx11/command.rs":"e4959ec3dc6f8494844e01b56ba162ba91270c9c9a52e83315c8730fcfe188b9","src/dx11/device.rs":"96ccd8d6645839f3daf832ddf569676643ac92d1b332ab9a0c8563b3b5026295","src/dx11/instance.rs":"206b38346de245e315e762b8de81ca26c74d10aad26a488713945d0e1c802e2b","src/dx11/library.rs":"4fb09475fb24bc45fb7a464a7c6a30b45e10fed0022aad2891a390b512c11ae1","src/dx11/mod.rs":"4023922673cbfde7fbb4a8c7620607ff317b8f0075567e5b9c6a11a081eb8aab","src/dx12/adapter.rs":"28868258dae69a70b111a21c4b6ae3b6c4dbb7e0470737ccff0a15b75b466e9a","src/dx12/command.rs":"8a99d028ce379f14c19d88069c91a58d6fd7b1957e0a114c369f51a0bfaee1e9","src/dx12/conv.rs":"24d6ac9808f7534f3480ba2eb563f98f1441c8ad2b4c6e86b2700f5ac812e99a","src/dx12/descriptor.rs":"d9e295c48dc7d1759fd121c927c2218c1bde90e9f1b35f2ad01b610b184b614e","src/dx12/device.rs":"f015f59682768b24474cd1736d9461e65e0a2c932976ff4c502e2f1bdaae86cd","src/dx12/instance.rs":"493a1e93474d72a4d3bea8701d9195a478fe36ce2ad4a881f65b15db8f8f7f6d","src/dx12/mod.rs":"d0c547838442d34e6a04e695b299be0b81c96675a81e5539df0e0dc941310284","src/dx12/shader_compilation.rs":"0589ed592cbd939f821d2801c82ee973c28844292682d37db84048ba21e6c72b","src/dx12/suballocation.rs":"eec45b2d23e979f7d7f33d982a89ae2f92e528b22f1bb7d2e5dd52582a25a601","src/dx12/types.rs":"9573736baaa0ef607367c3b72144556d24faf677a26bb8df49a4372a1348e06b","src/dx12/view.rs":"c09241520377e6a47ad8822c69075e6e16e6c558e18bb64398c8d7424fc57dcf","src/empty.rs":"4a47b337ed93a076613c379b001f12d276da6423af6cd0df1e344c52886cef86","src/gles/adapter.rs":"676d3c7d80b1fff7225863a8ddee8f041fef9aaa8fb3bef95455ceea1c2714f6","src/gles/command.rs":"9c4f2e474d0fd512f4d8da2a42ca309f9ec3c7bbf472d043b59a180cb3b0f459","src/gles/conv.rs":"23f7f82b74fb01dc09cf9b696a6977d993e65c4f700b3029984ff2dbdc9b3dda","src/gles/device.rs":"bf5b3a6eeff4ec56d25783d92c0ccd50a06f5142ead3ae209e50a5f81fedd26e","src/gles/egl.rs":"99df0b03679e0725481093a520870fbdfb45e00359a1820e8aa40a255be4d92d","src/gles/emscripten.rs":"19bb73a9d140645f3f32cd48b002151711a9b8456e213eab5f3a2be79239e147","src/gles/mod.rs":"c7de1063f8d0775c84e5ea578020e01c8b385b84f37e1a9bada299105ca5e82c","src/gles/queue.rs":"999652c9b1305a9792933989b29792c685a0130d6cc3da491ad8bd37b57b0f85","src/gles/shaders/clear.frag":"9133ed8ed97d3641fbb6b5f5ea894a3554c629ccc1b80a5fc9221d7293aa1954","src/gles/shaders/clear.vert":"a543768725f4121ff2e9e1fb5b00644931e9d6f2f946c0ef01968afb5a135abd","src/gles/shaders/srgb_present.frag":"dd9a43c339a2fa4ccf7f6a1854c6f400cabf271a7d5e9230768e9f39d47f3ff5","src/gles/shaders/srgb_present.vert":"6e85d489403d80b81cc94790730bb53b309dfc5eeede8f1ea3412a660f31d357","src/gles/web.rs":"8c1758a3eac121acc56368d9b7a50b6c9e800010d676be38b30fd1aa77cea2e2","src/gles/wgl.rs":"b7272b616f39b7eb743501fadc974a60893dacbb63df92da876abe446981e01c","src/lib.rs":"66736c592e1fef7c25e2b5be19e5e04cbf391ab1609eea3a8c4415bf5dc4ad38","src/metal/adapter.rs":"9d200608a3f931ee2d37f085809c761eb809bae2058f8caa25c592c7d2c19752","src/metal/command.rs":"95512642ff903a57b661b10b72b44c065a760a3096bcb2aabbd9977e6dba0d5b","src/metal/conv.rs":"0bce6a8d0ccef16783475803d70d35e03ab7938c19374e22c9d253abe1f8b111","src/metal/device.rs":"d2fb16e8d7cfc0c9e3141dcf1245a1f0ea8a891962c0cd83c0cd4450e7acaf36","src/metal/mod.rs":"ab7a2570144fae14203a8126d7cdcb9df576c046d0fd2b83514246574ab124b1","src/metal/surface.rs":"f4b3f8364ec32a7540f7a1da3b4b7e8c6c1cf336aa5535a4515b4efb5b11c443","src/metal/time.rs":"c32d69f30e846dfcc0e39e01097fb80df63b2bebb6586143bb62494999850246","src/vulkan/adapter.rs":"939a24ec9b3b03c874c00903755f8b575b35397fde6d28846d18bcb5e5cd9002","src/vulkan/command.rs":"e10346068cdf97f50b98b5d7481b09f601319843cf843206c95c597fad36144e","src/vulkan/conv.rs":"63abaed541f7f5263f5e3a27e3e7fad419cb32c7b8af25081aa547add12087a8","src/vulkan/device.rs":"fa515d41a30bdbd91c6d7c7f3c6b72538c918f16ff077c45e5702012fd060801","src/vulkan/instance.rs":"d71d9707b01584f423916ecc1784a625f7f2a9c57751cb555a7f40878182d28a","src/vulkan/mod.rs":"a0cbaa351d6a41f1ef8db2f7b1c0c4b0051aaa37798ae8ce737eb8009fd879d3"},"package":null} \ No newline at end of file +{"files":{"Cargo.toml":"af8f5b97c66922b8f68e5214102cc79befc3a3a86984a611a3bdc01fae168523","LICENSE.APACHE":"a6cba85bc92e0cff7a450b1d873c0eaa2e9fc96bf472df0247a26bec77bf3ff9","LICENSE.MIT":"c7fea58d1cfe49634cd92e54fc10a9d871f4b275321a4cd8c09e449122caaeb4","README.md":"78377f5876fafd77963eff7e3c2ba3a7e3ad5cf9201b09ed5612e49c2288eb18","examples/halmark/main.rs":"360828f6f103ce966a152ceaff50087b59d0eac580fac12c8f89850024ccc233","examples/halmark/shader.wgsl":"26c256ec36d6f0e9a1647431ca772766bee4382d64eaa718ba7b488dcfb6bcca","examples/raw-gles.em.html":"70fbe68394a1a4522192de1dcfaf7d399f60d7bdf5de70b708f9bb0417427546","examples/raw-gles.rs":"33f536693b08a9f00b0358416be8f368894bf92a2e673b16622fa9c244854c91","src/auxil/dxgi/conv.rs":"536a6ff638ce32bdbf7cd697270f563c14f2c8cc013bb0e9565e06dc5127730a","src/auxil/dxgi/exception.rs":"baad31f4529f1cf9d10519c4cceccbb21ed9bba7d7ea99e4789c1d5ddf3a450b","src/auxil/dxgi/factory.rs":"65c758d6583c2fdac0b3d48d1a0a2a414ff913efc4f3913a38bd660819c3a2e2","src/auxil/dxgi/mod.rs":"a202564d9ac97530b16a234b87d180cd345aae705e082a9b1177dcde813645f9","src/auxil/dxgi/result.rs":"20c8eb03d738062dff198feca6327addb9882ed0462be842c789eadf7dca0573","src/auxil/dxgi/time.rs":"b6f966b250e9424d5d7e4065f2108cba87197c1e30baae6d87083055d1bc5a4b","src/auxil/mod.rs":"c38f0d3b10804d1c1d7e3b8e4a975fcb87271f8b1904f2f4a3153cceddafb56b","src/auxil/renderdoc.rs":"c2f849f70f576b0c9b0d32dd155b6a6353f74dff59cbeeaa994a12789d047c0f","src/dx11/adapter.rs":"cacb8243f9107f63bc3247a76b3d28adc3b87d79ffee0af90214a483ab51285b","src/dx11/command.rs":"e4959ec3dc6f8494844e01b56ba162ba91270c9c9a52e83315c8730fcfe188b9","src/dx11/device.rs":"4ee1880b0d48ed054654e936c76b3e7b7e370fbf79c2f0af370f8de350fd94ff","src/dx11/instance.rs":"206b38346de245e315e762b8de81ca26c74d10aad26a488713945d0e1c802e2b","src/dx11/library.rs":"1b4c3575fd8fd30d07a61ce68ec3c930305c4df5aa5dcf54dd1fdd4edff590d4","src/dx11/mod.rs":"1f2e7f04801b4e65aa27183bc0ddeee0dd3aa686bac4056649dbd97ddef142e1","src/dx12/adapter.rs":"03763cc928b38a4153ab91d7a6ab7d38e2f64c7590f2e17a29c56e0a93b1284f","src/dx12/command.rs":"8a99d028ce379f14c19d88069c91a58d6fd7b1957e0a114c369f51a0bfaee1e9","src/dx12/conv.rs":"24d6ac9808f7534f3480ba2eb563f98f1441c8ad2b4c6e86b2700f5ac812e99a","src/dx12/descriptor.rs":"d9e295c48dc7d1759fd121c927c2218c1bde90e9f1b35f2ad01b610b184b614e","src/dx12/device.rs":"0ac995a92acd8cbe160f58f7347ed50ce5c0d16a3da496127aff66eecca765e7","src/dx12/instance.rs":"719125a6adb69f16df1a0069c8a1ccb5792169a00abdf2e38cc26b633b15768a","src/dx12/mod.rs":"9c5c3babcdfc0daa81ba85a2729020e102e5bcc7c2ec35d3d228ccf477775edf","src/dx12/shader_compilation.rs":"0589ed592cbd939f821d2801c82ee973c28844292682d37db84048ba21e6c72b","src/dx12/suballocation.rs":"eec45b2d23e979f7d7f33d982a89ae2f92e528b22f1bb7d2e5dd52582a25a601","src/dx12/types.rs":"9573736baaa0ef607367c3b72144556d24faf677a26bb8df49a4372a1348e06b","src/dx12/view.rs":"c09241520377e6a47ad8822c69075e6e16e6c558e18bb64398c8d7424fc57dcf","src/empty.rs":"8288689e0016c60e58d1c3e24fe3a9ef247bb23961c9f01482a0bf21870379a0","src/gles/adapter.rs":"6e38936a7a3f01f9aa13b6568e495499b320d009f33146cf978cf57bcd795013","src/gles/command.rs":"9c4f2e474d0fd512f4d8da2a42ca309f9ec3c7bbf472d043b59a180cb3b0f459","src/gles/conv.rs":"23f7f82b74fb01dc09cf9b696a6977d993e65c4f700b3029984ff2dbdc9b3dda","src/gles/device.rs":"8e13133b398f25850e342777edd06076188644a1013004b2a08e00845d62e6a6","src/gles/egl.rs":"89212bdacbb9dc43eebcc7b74fb612a69d35ffc1dfe2b626820c44c61256c4c9","src/gles/emscripten.rs":"19bb73a9d140645f3f32cd48b002151711a9b8456e213eab5f3a2be79239e147","src/gles/mod.rs":"43b3e9d03f9eca064832dd307e4b8b8f1d2d3bbcdad21c87af3854f9bd479e3d","src/gles/queue.rs":"b6c675d5f76309650779ccf0d35ca32e322020d7199de09586d040b58ad719b0","src/gles/shaders/clear.frag":"9133ed8ed97d3641fbb6b5f5ea894a3554c629ccc1b80a5fc9221d7293aa1954","src/gles/shaders/clear.vert":"a543768725f4121ff2e9e1fb5b00644931e9d6f2f946c0ef01968afb5a135abd","src/gles/shaders/srgb_present.frag":"dd9a43c339a2fa4ccf7f6a1854c6f400cabf271a7d5e9230768e9f39d47f3ff5","src/gles/shaders/srgb_present.vert":"6e85d489403d80b81cc94790730bb53b309dfc5eeede8f1ea3412a660f31d357","src/gles/web.rs":"5623c8c78584c5e9182502e0997bd11e627e67ccd23d516e65526f415215de2f","src/gles/wgl.rs":"ec1c958acc903af43c29e60059c2c71257b5735312c15679647daed774ad01a1","src/lib.rs":"04b61b4d07fc001fd3c2d95d7577bc485e7ac67406e7052681222d32233cc0ae","src/metal/adapter.rs":"9d200608a3f931ee2d37f085809c761eb809bae2058f8caa25c592c7d2c19752","src/metal/command.rs":"95512642ff903a57b661b10b72b44c065a760a3096bcb2aabbd9977e6dba0d5b","src/metal/conv.rs":"0bce6a8d0ccef16783475803d70d35e03ab7938c19374e22c9d253abe1f8b111","src/metal/device.rs":"d2fb16e8d7cfc0c9e3141dcf1245a1f0ea8a891962c0cd83c0cd4450e7acaf36","src/metal/mod.rs":"3c49207a7cc88b75785aa4886147121192d19d01cdc621319aff36b3659fa98c","src/metal/surface.rs":"01539fa9f22c26fdcca5ee915ca97cf76cecc7fae237347dfc9a813ae13e98cd","src/metal/time.rs":"c32d69f30e846dfcc0e39e01097fb80df63b2bebb6586143bb62494999850246","src/vulkan/adapter.rs":"d9af751f16d30b3d129a7481d1f529ef53cc7c6e0ca0b6f25181f8449f23b6fd","src/vulkan/command.rs":"e10346068cdf97f50b98b5d7481b09f601319843cf843206c95c597fad36144e","src/vulkan/conv.rs":"63abaed541f7f5263f5e3a27e3e7fad419cb32c7b8af25081aa547add12087a8","src/vulkan/device.rs":"1557ee3b93a085b1704b858a42a7b3b8c742c20b87ab325e9f0096ec5b97bc59","src/vulkan/instance.rs":"e16d2e849b2ead8d83d354946bbffd03c26e2b024b07fe8b59c3ea47093a0a12","src/vulkan/mod.rs":"7ed5c34156d70808e7845575d93a8c2a07bfa989d455c3848e93e8ae372b99ff"},"package":null} \ No newline at end of file diff --git a/third_party/rust/wgpu-hal/examples/halmark/main.rs b/third_party/rust/wgpu-hal/examples/halmark/main.rs index 471b0c48906b1..609146454a715 100644 --- a/third_party/rust/wgpu-hal/examples/halmark/main.rs +++ b/third_party/rust/wgpu-hal/examples/halmark/main.rs @@ -100,7 +100,7 @@ impl Example { gles_minor_version: wgt::Gles3MinorVersion::default(), }; let instance = unsafe { A::Instance::init(&instance_desc)? }; - let mut surface = { + let surface = { let raw_window_handle = window.window_handle()?.as_raw(); let raw_display_handle = window.display_handle()?.as_raw(); @@ -119,11 +119,12 @@ impl Example { let exposed = adapters.swap_remove(0); (exposed.adapter, exposed.capabilities) }; + let surface_caps = unsafe { adapter.surface_capabilities(&surface) } .ok_or("failed to get surface capabilities")?; log::info!("Surface caps: {:#?}", surface_caps); - let hal::OpenDevice { device, mut queue } = unsafe { + let hal::OpenDevice { device, queue } = unsafe { adapter .open(wgt::Features::empty(), &wgt::Limits::default()) .unwrap() @@ -727,13 +728,13 @@ impl Example { None }; self.queue.submit(&[&cmd_buf], fence_param).unwrap(); - self.queue.present(&mut self.surface, surface_tex).unwrap(); + self.queue.present(&self.surface, surface_tex).unwrap(); ctx.used_cmd_bufs.push(cmd_buf); ctx.used_views.push(surface_tex_view); }; if do_fence { - log::info!("Context switch from {}", self.context_index); + log::debug!("Context switch from {}", self.context_index); let old_fence_value = ctx.fence_value; if self.contexts.len() == 1 { let hal_desc = hal::CommandEncoderDescriptor { diff --git a/third_party/rust/wgpu-hal/examples/raw-gles.rs b/third_party/rust/wgpu-hal/examples/raw-gles.rs index 455c555e85e47..81ab4171e3dd2 100644 --- a/third_party/rust/wgpu-hal/examples/raw-gles.rs +++ b/third_party/rust/wgpu-hal/examples/raw-gles.rs @@ -123,7 +123,7 @@ fn main() {} fn fill_screen(exposed: &hal::ExposedAdapter, width: u32, height: u32) { use hal::{Adapter as _, CommandEncoder as _, Device as _, Queue as _}; - let mut od = unsafe { + let od = unsafe { exposed .adapter .open(wgt::Features::empty(), &wgt::Limits::downlevel_defaults()) diff --git a/third_party/rust/wgpu-hal/src/auxil/dxgi/factory.rs b/third_party/rust/wgpu-hal/src/auxil/dxgi/factory.rs index 86d632678c860..2d09f44a75b27 100644 --- a/third_party/rust/wgpu-hal/src/auxil/dxgi/factory.rs +++ b/third_party/rust/wgpu-hal/src/auxil/dxgi/factory.rs @@ -18,6 +18,26 @@ fn should_keep_adapter(adapter: &dxgi::IDXGIAdapter1) -> bool { let mut desc = unsafe { std::mem::zeroed() }; unsafe { adapter.GetDesc1(&mut desc) }; + // The Intel Haswell family of iGPUs had support for the D3D12 API but it was later + // removed due to a security vulnerability. + // + // We are explicitly filtering out all the devices in the family because we are now + // getting reports of device loss at a later time than at device creation time (`D3D12CreateDevice`). + // + // See https://www.intel.com/content/www/us/en/support/articles/000057520/graphics.html + // This list of device IDs is from https://dgpu-docs.intel.com/devices/hardware-table.html + let haswell_device_ids = [ + 0x0422, 0x0426, 0x042A, 0x042B, 0x042E, 0x0C22, 0x0C26, 0x0C2A, 0x0C2B, 0x0C2E, 0x0A22, + 0x0A2A, 0x0A2B, 0x0D2A, 0x0D2B, 0x0D2E, 0x0A26, 0x0A2E, 0x0D22, 0x0D26, 0x0412, 0x0416, + 0x0D12, 0x041A, 0x041B, 0x0C12, 0x0C16, 0x0C1A, 0x0C1B, 0x0C1E, 0x0A12, 0x0A1A, 0x0A1B, + 0x0D16, 0x0D1A, 0x0D1B, 0x0D1E, 0x041E, 0x0A16, 0x0A1E, 0x0402, 0x0406, 0x040A, 0x040B, + 0x040E, 0x0C02, 0x0C06, 0x0C0A, 0x0C0B, 0x0C0E, 0x0A02, 0x0A06, 0x0A0A, 0x0A0B, 0x0A0E, + 0x0D02, 0x0D06, 0x0D0A, 0x0D0B, 0x0D0E, + ]; + if desc.VendorId == 0x8086 && haswell_device_ids.contains(&desc.DeviceId) { + return false; + } + // If run completely headless, windows will show two different WARP adapters, one // which is lying about being an integrated card. This is so that programs // that ignore software adapters will actually run on headless/gpu-less machines. @@ -92,7 +112,7 @@ pub fn enumerate_adapters(factory: d3d12::DxgiFactory) -> Vec { - log::info!("Failed casting Adapter1 to Adapter3: {}", err); + log::warn!("Failed casting Adapter1 to Adapter3: {}", err); } } } @@ -105,7 +125,7 @@ pub fn enumerate_adapters(factory: d3d12::DxgiFactory) -> Vec { - log::info!("Failed casting Adapter1 to Adapter2: {}", err); + log::warn!("Failed casting Adapter1 to Adapter2: {}", err); } } } @@ -170,9 +190,9 @@ pub fn create_factory( err, )); } - // If we don't print it to info as all win7 will hit this case. + // If we don't print it to warn as all win7 will hit this case. Err(err) => { - log::info!("IDXGIFactory1 creation function not found: {err:?}"); + log::warn!("IDXGIFactory1 creation function not found: {err:?}"); None } }; @@ -191,9 +211,9 @@ pub fn create_factory( "failed to cast IDXGIFactory4 to IDXGIFactory6: {err:?}" ))); } - // If we don't print it to info. + // If we don't print it to warn. Err(err) => { - log::info!("Failed to cast IDXGIFactory4 to IDXGIFactory6: {:?}", err); + log::warn!("Failed to cast IDXGIFactory4 to IDXGIFactory6: {:?}", err); return Ok((lib_dxgi, d3d12::DxgiFactory::Factory4(factory4))); } } @@ -232,9 +252,9 @@ pub fn create_factory( "failed to cast IDXGIFactory1 to IDXGIFactory2: {err:?}" ))); } - // If we don't print it to info. + // If we don't print it to warn. Err(err) => { - log::info!("Failed to cast IDXGIFactory1 to IDXGIFactory2: {:?}", err); + log::warn!("Failed to cast IDXGIFactory1 to IDXGIFactory2: {:?}", err); } } diff --git a/third_party/rust/wgpu-hal/src/dx11/device.rs b/third_party/rust/wgpu-hal/src/dx11/device.rs index 3b087c4311519..ce33584e35a0e 100644 --- a/third_party/rust/wgpu-hal/src/dx11/device.rs +++ b/third_party/rust/wgpu-hal/src/dx11/device.rs @@ -204,7 +204,7 @@ impl crate::Device for super::Device { impl crate::Queue for super::Queue { unsafe fn submit( - &mut self, + &self, command_buffers: &[&super::CommandBuffer], signal_fence: Option<(&mut super::Fence, crate::FenceValue)>, ) -> Result<(), crate::DeviceError> { @@ -212,8 +212,8 @@ impl crate::Queue for super::Queue { } unsafe fn present( - &mut self, - surface: &mut super::Surface, + &self, + surface: &super::Surface, texture: super::SurfaceTexture, ) -> Result<(), crate::SurfaceError> { todo!() diff --git a/third_party/rust/wgpu-hal/src/dx11/library.rs b/third_party/rust/wgpu-hal/src/dx11/library.rs index c2b5315ba1e1d..2b7b1b1c2ae50 100644 --- a/third_party/rust/wgpu-hal/src/dx11/library.rs +++ b/third_party/rust/wgpu-hal/src/dx11/library.rs @@ -120,7 +120,7 @@ impl D3D11Lib { return Some((super::D3D11Device::Device2(device2), feature_level)); } Err(hr) => { - log::info!("Failed to cast device to ID3D11Device2: {}", hr) + log::warn!("Failed to cast device to ID3D11Device2: {}", hr) } } } @@ -132,7 +132,7 @@ impl D3D11Lib { return Some((super::D3D11Device::Device1(device1), feature_level)); } Err(hr) => { - log::info!("Failed to cast device to ID3D11Device1: {}", hr) + log::warn!("Failed to cast device to ID3D11Device1: {}", hr) } } } diff --git a/third_party/rust/wgpu-hal/src/dx11/mod.rs b/third_party/rust/wgpu-hal/src/dx11/mod.rs index a529e98c93391..203f37cac55d1 100644 --- a/third_party/rust/wgpu-hal/src/dx11/mod.rs +++ b/third_party/rust/wgpu-hal/src/dx11/mod.rs @@ -108,30 +108,32 @@ pub struct BindGroup {} pub struct PipelineLayout {} #[derive(Debug)] pub struct ShaderModule {} +#[derive(Debug)] pub struct RenderPipeline {} +#[derive(Debug)] pub struct ComputePipeline {} impl crate::Surface for Surface { unsafe fn configure( - &mut self, + &self, device: &Device, config: &crate::SurfaceConfiguration, ) -> Result<(), crate::SurfaceError> { todo!() } - unsafe fn unconfigure(&mut self, device: &Device) { + unsafe fn unconfigure(&self, device: &Device) { todo!() } unsafe fn acquire_texture( - &mut self, + &self, _timeout: Option, ) -> Result>, crate::SurfaceError> { todo!() } - unsafe fn discard_texture(&mut self, texture: SurfaceTexture) { + unsafe fn discard_texture(&self, texture: SurfaceTexture) { todo!() } } diff --git a/third_party/rust/wgpu-hal/src/dx12/adapter.rs b/third_party/rust/wgpu-hal/src/dx12/adapter.rs index 28524858f921d..9d66fd2653c62 100644 --- a/third_party/rust/wgpu-hal/src/dx12/adapter.rs +++ b/third_party/rust/wgpu-hal/src/dx12/adapter.rs @@ -2,6 +2,7 @@ use crate::{ auxil::{self, dxgi::result::HResult as _}, dx12::{shader_compilation, SurfaceTarget}, }; +use parking_lot::Mutex; use std::{mem, ptr, sync::Arc, thread}; use winapi::{ shared::{ @@ -427,7 +428,7 @@ impl crate::Adapter for super::Adapter { device, queue: super::Queue { raw: queue, - temp_lists: Vec::new(), + temp_lists: Mutex::new(Vec::new()), }, }) } diff --git a/third_party/rust/wgpu-hal/src/dx12/device.rs b/third_party/rust/wgpu-hal/src/dx12/device.rs index 84146a006b23e..06e1b59a21bc8 100644 --- a/third_party/rust/wgpu-hal/src/dx12/device.rs +++ b/third_party/rust/wgpu-hal/src/dx12/device.rs @@ -186,7 +186,7 @@ impl super::Device { } let value = cur_value + 1; - log::info!("Waiting for idle with value {}", value); + log::debug!("Waiting for idle with value {}", value); self.present_queue.signal(&self.idler.fence, value); let hr = self .idler diff --git a/third_party/rust/wgpu-hal/src/dx12/instance.rs b/third_party/rust/wgpu-hal/src/dx12/instance.rs index 36a3ec2c96906..7bf5f3ef75508 100644 --- a/third_party/rust/wgpu-hal/src/dx12/instance.rs +++ b/third_party/rust/wgpu-hal/src/dx12/instance.rs @@ -1,3 +1,4 @@ +use parking_lot::RwLock; use winapi::shared::{dxgi1_5, minwindef}; use super::SurfaceTarget; @@ -50,7 +51,7 @@ impl crate::Instance for super::Instance { } }, Err(err) => { - log::info!("IDXGIFactory1 creation function not found: {:?}", err); + log::warn!("IDXGIFactory1 creation function not found: {:?}", err); None } }; @@ -117,7 +118,7 @@ impl crate::Instance for super::Instance { factory_media: self.factory_media.clone(), target: SurfaceTarget::WndHandle(handle.hwnd.get() as *mut _), supports_allow_tearing: self.supports_allow_tearing, - swap_chain: None, + swap_chain: RwLock::new(None), }), _ => Err(crate::InstanceError::new(format!( "window handle {window_handle:?} is not a Win32 handle" diff --git a/third_party/rust/wgpu-hal/src/dx12/mod.rs b/third_party/rust/wgpu-hal/src/dx12/mod.rs index 39c24bd5283da..0de992cf910ce 100644 --- a/third_party/rust/wgpu-hal/src/dx12/mod.rs +++ b/third_party/rust/wgpu-hal/src/dx12/mod.rs @@ -47,7 +47,7 @@ mod view; use crate::auxil::{self, dxgi::result::HResult as _}; use arrayvec::ArrayVec; -use parking_lot::Mutex; +use parking_lot::{Mutex, RwLock}; use std::{ffi, fmt, mem, num::NonZeroU32, sync::Arc}; use winapi::{ shared::{dxgi, dxgi1_4, dxgitype, windef, winerror}, @@ -108,7 +108,7 @@ impl Instance { factory_media: self.factory_media.clone(), target: SurfaceTarget::Visual(unsafe { d3d12::ComPtr::from_raw(visual) }), supports_allow_tearing: self.supports_allow_tearing, - swap_chain: None, + swap_chain: RwLock::new(None), } } @@ -121,7 +121,7 @@ impl Instance { factory_media: self.factory_media.clone(), target: SurfaceTarget::SurfaceHandle(surface_handle), supports_allow_tearing: self.supports_allow_tearing, - swap_chain: None, + swap_chain: RwLock::new(None), } } @@ -136,7 +136,7 @@ impl Instance { d3d12::ComPtr::from_raw(swap_chain_panel) }), supports_allow_tearing: self.supports_allow_tearing, - swap_chain: None, + swap_chain: RwLock::new(None), } } } @@ -168,7 +168,7 @@ pub struct Surface { factory_media: Option, target: SurfaceTarget, supports_allow_tearing: bool, - swap_chain: Option, + swap_chain: RwLock>, } unsafe impl Send for Surface {} @@ -261,7 +261,7 @@ unsafe impl Sync for Device {} pub struct Queue { raw: d3d12::CommandQueue, - temp_lists: Vec, + temp_lists: Mutex>, } unsafe impl Send for Queue {} @@ -499,7 +499,7 @@ pub struct BindGroupLayout { copy_counts: Vec, // all 1's } -#[derive(Clone, Copy)] +#[derive(Debug, Clone, Copy)] enum BufferViewKind { Constant, ShaderResource, @@ -524,19 +524,20 @@ bitflags::bitflags! { // Element (also known as parameter) index into the root signature. type RootIndex = u32; +#[derive(Debug)] struct BindGroupInfo { base_root_index: RootIndex, tables: TableTypes, dynamic_buffers: Vec, } -#[derive(Clone)] +#[derive(Debug, Clone)] struct RootConstantInfo { root_index: RootIndex, range: std::ops::Range, } -#[derive(Clone)] +#[derive(Debug, Clone)] struct PipelineLayoutShared { signature: d3d12::RootSignature, total_root_elements: RootIndex, @@ -547,6 +548,7 @@ struct PipelineLayoutShared { unsafe impl Send for PipelineLayoutShared {} unsafe impl Sync for PipelineLayoutShared {} +#[derive(Debug)] pub struct PipelineLayout { shared: PipelineLayoutShared, // Storing for each associated bind group, which tables we created @@ -578,6 +580,7 @@ impl CompiledShader { unsafe fn destroy(self) {} } +#[derive(Debug)] pub struct RenderPipeline { raw: d3d12::PipelineState, layout: PipelineLayoutShared, @@ -588,6 +591,7 @@ pub struct RenderPipeline { unsafe impl Send for RenderPipeline {} unsafe impl Sync for RenderPipeline {} +#[derive(Debug)] pub struct ComputePipeline { raw: d3d12::PipelineState, layout: PipelineLayoutShared, @@ -623,7 +627,7 @@ impl SwapChain { impl crate::Surface for Surface { unsafe fn configure( - &mut self, + &self, device: &Device, config: &crate::SurfaceConfiguration, ) -> Result<(), crate::SurfaceError> { @@ -644,7 +648,7 @@ impl crate::Surface for Surface { let non_srgb_format = auxil::dxgi::conv::map_texture_format_nosrgb(config.format); - let swap_chain = match self.swap_chain.take() { + let swap_chain = match self.swap_chain.write().take() { //Note: this path doesn't properly re-initialize all of the things Some(sc) => { let raw = unsafe { sc.release_resources() }; @@ -793,7 +797,8 @@ impl crate::Surface for Surface { resources.push(resource); } - self.swap_chain = Some(SwapChain { + let mut swapchain = self.swap_chain.write(); + *swapchain = Some(SwapChain { raw: swap_chain, resources, waitable, @@ -806,8 +811,8 @@ impl crate::Surface for Surface { Ok(()) } - unsafe fn unconfigure(&mut self, device: &Device) { - if let Some(sc) = self.swap_chain.take() { + unsafe fn unconfigure(&self, device: &Device) { + if let Some(sc) = self.swap_chain.write().take() { unsafe { // While `unconfigure`s contract ensures that no work on the GPU's main queues // are in flight, we still need to wait for the present queue to be idle. @@ -823,10 +828,11 @@ impl crate::Surface for Surface { } unsafe fn acquire_texture( - &mut self, + &self, timeout: Option, ) -> Result>, crate::SurfaceError> { - let sc = self.swap_chain.as_mut().unwrap(); + let mut swapchain = self.swap_chain.write(); + let sc = swapchain.as_mut().unwrap(); unsafe { sc.wait(timeout) }?; @@ -848,26 +854,28 @@ impl crate::Surface for Surface { suboptimal: false, })) } - unsafe fn discard_texture(&mut self, _texture: Texture) { - let sc = self.swap_chain.as_mut().unwrap(); + unsafe fn discard_texture(&self, _texture: Texture) { + let mut swapchain = self.swap_chain.write(); + let sc = swapchain.as_mut().unwrap(); sc.acquired_count -= 1; } } impl crate::Queue for Queue { unsafe fn submit( - &mut self, + &self, command_buffers: &[&CommandBuffer], signal_fence: Option<(&mut Fence, crate::FenceValue)>, ) -> Result<(), crate::DeviceError> { - self.temp_lists.clear(); + let mut temp_lists = self.temp_lists.lock(); + temp_lists.clear(); for cmd_buf in command_buffers { - self.temp_lists.push(cmd_buf.raw.as_list()); + temp_lists.push(cmd_buf.raw.as_list()); } { profiling::scope!("ID3D12CommandQueue::ExecuteCommandLists"); - self.raw.execute_command_lists(&self.temp_lists); + self.raw.execute_command_lists(&temp_lists); } if let Some((fence, value)) = signal_fence { @@ -885,11 +893,12 @@ impl crate::Queue for Queue { Ok(()) } unsafe fn present( - &mut self, - surface: &mut Surface, + &self, + surface: &Surface, _texture: Texture, ) -> Result<(), crate::SurfaceError> { - let sc = surface.swap_chain.as_mut().unwrap(); + let mut swapchain = surface.swap_chain.write(); + let sc = swapchain.as_mut().unwrap(); sc.acquired_count -= 1; let (interval, flags) = match sc.present_mode { diff --git a/third_party/rust/wgpu-hal/src/empty.rs b/third_party/rust/wgpu-hal/src/empty.rs index 64bcf3109be26..a2a8c57c994e7 100644 --- a/third_party/rust/wgpu-hal/src/empty.rs +++ b/third_party/rust/wgpu-hal/src/empty.rs @@ -57,22 +57,22 @@ impl crate::Instance for Context { impl crate::Surface for Context { unsafe fn configure( - &mut self, + &self, device: &Context, config: &crate::SurfaceConfiguration, ) -> Result<(), crate::SurfaceError> { Ok(()) } - unsafe fn unconfigure(&mut self, device: &Context) {} + unsafe fn unconfigure(&self, device: &Context) {} unsafe fn acquire_texture( - &mut self, + &self, timeout: Option, ) -> Result>, crate::SurfaceError> { Ok(None) } - unsafe fn discard_texture(&mut self, texture: Resource) {} + unsafe fn discard_texture(&self, texture: Resource) {} } impl crate::Adapter for Context { @@ -101,15 +101,15 @@ impl crate::Adapter for Context { impl crate::Queue for Context { unsafe fn submit( - &mut self, + &self, command_buffers: &[&Resource], signal_fence: Option<(&mut Resource, crate::FenceValue)>, ) -> DeviceResult<()> { Ok(()) } unsafe fn present( - &mut self, - surface: &mut Context, + &self, + surface: &Context, texture: Resource, ) -> Result<(), crate::SurfaceError> { Ok(()) diff --git a/third_party/rust/wgpu-hal/src/gles/adapter.rs b/third_party/rust/wgpu-hal/src/gles/adapter.rs index b771caed5953e..34e61ce915786 100644 --- a/third_party/rust/wgpu-hal/src/gles/adapter.rs +++ b/third_party/rust/wgpu-hal/src/gles/adapter.rs @@ -1,5 +1,6 @@ use glow::HasContext; -use std::sync::Arc; +use parking_lot::Mutex; +use std::sync::{atomic::AtomicU8, Arc}; use wgt::AstcChannel; use crate::auxil::db; @@ -213,9 +214,9 @@ impl super::Adapter { let vendor = unsafe { gl.get_parameter_string(vendor_const) }; let renderer = unsafe { gl.get_parameter_string(renderer_const) }; let version = unsafe { gl.get_parameter_string(glow::VERSION) }; - log::trace!("Vendor: {}", vendor); - log::trace!("Renderer: {}", renderer); - log::trace!("Version: {}", version); + log::debug!("Vendor: {}", vendor); + log::debug!("Renderer: {}", renderer); + log::debug!("Version: {}", version); let full_ver = Self::parse_full_version(&version).ok(); let es_ver = full_ver @@ -271,7 +272,7 @@ impl super::Adapter { let shading_language_version = { let sl_version = unsafe { gl.get_parameter_string(glow::SHADING_LANGUAGE_VERSION) }; - log::trace!("SL version: {}", &sl_version); + log::debug!("SL version: {}", &sl_version); if full_ver.is_some() { let (sl_major, sl_minor) = Self::parse_full_version(&sl_version).ok()?; let mut value = sl_major as u16 * 100 + sl_minor as u16 * 10; @@ -290,7 +291,7 @@ impl super::Adapter { } }; - log::trace!("Supported GL Extensions: {:#?}", extensions); + log::debug!("Supported GL Extensions: {:#?}", extensions); let supported = |(req_es_major, req_es_minor), (req_full_major, req_full_minor)| { let es_supported = es_ver @@ -919,9 +920,9 @@ impl crate::Adapter for super::Adapter { shader_clear_program, shader_clear_program_color_uniform_location, zero_buffer, - temp_query_results: Vec::new(), - draw_buffer_count: 1, - current_index_buffer: None, + temp_query_results: Mutex::new(Vec::new()), + draw_buffer_count: AtomicU8::new(1), + current_index_buffer: Mutex::new(None), }, }) } diff --git a/third_party/rust/wgpu-hal/src/gles/device.rs b/third_party/rust/wgpu-hal/src/gles/device.rs index 7934c4be01ad3..64f47788eb1fb 100644 --- a/third_party/rust/wgpu-hal/src/gles/device.rs +++ b/third_party/rust/wgpu-hal/src/gles/device.rs @@ -187,7 +187,7 @@ impl super::Device { unsafe { gl.shader_source(raw, shader) }; unsafe { gl.compile_shader(raw) }; - log::info!("\tCompiled shader {:?}", raw); + log::debug!("\tCompiled shader {:?}", raw); let compiled_ok = unsafe { gl.get_shader_compile_status(raw) }; let msg = unsafe { gl.get_shader_info_log(raw) }; @@ -398,7 +398,7 @@ impl super::Device { unsafe { gl.delete_shader(shader) }; } - log::info!("\tLinked program {:?}", program); + log::debug!("\tLinked program {:?}", program); let linked_ok = unsafe { gl.get_program_link_status(program) }; let msg = unsafe { gl.get_program_info_log(program) }; @@ -1418,27 +1418,27 @@ impl crate::Device for super::Device { } else { (timeout_ms as u64 * 1_000_000).min(!0u32 as u64) }; - let &(_, sync) = fence + if let Some(&(_, sync)) = fence .pending .iter() .find(|&&(value, _)| value >= wait_value) - .unwrap(); - match unsafe { - gl.client_wait_sync(sync, glow::SYNC_FLUSH_COMMANDS_BIT, timeout_ns as i32) - } { - // for some reason firefox returns WAIT_FAILED, to investigate - #[cfg(target_arch = "wasm32")] - glow::WAIT_FAILED => { - log::warn!("wait failed!"); - Ok(false) - } - glow::TIMEOUT_EXPIRED => Ok(false), - glow::CONDITION_SATISFIED | glow::ALREADY_SIGNALED => Ok(true), - _ => Err(crate::DeviceError::Lost), + { + return match unsafe { + gl.client_wait_sync(sync, glow::SYNC_FLUSH_COMMANDS_BIT, timeout_ns as i32) + } { + // for some reason firefox returns WAIT_FAILED, to investigate + #[cfg(target_arch = "wasm32")] + glow::WAIT_FAILED => { + log::warn!("wait failed!"); + Ok(false) + } + glow::TIMEOUT_EXPIRED => Ok(false), + glow::CONDITION_SATISFIED | glow::ALREADY_SIGNALED => Ok(true), + _ => Err(crate::DeviceError::Lost), + }; } - } else { - Ok(true) } + Ok(true) } unsafe fn start_capture(&self) -> bool { diff --git a/third_party/rust/wgpu-hal/src/gles/egl.rs b/third_party/rust/wgpu-hal/src/gles/egl.rs index 82a53781857bd..bda33f35c06ad 100644 --- a/third_party/rust/wgpu-hal/src/gles/egl.rs +++ b/third_party/rust/wgpu-hal/src/gles/egl.rs @@ -1,5 +1,5 @@ use glow::HasContext; -use parking_lot::{Mutex, MutexGuard}; +use parking_lot::{Mutex, MutexGuard, RwLock}; use std::{ffi, os::raw, ptr, rc::Rc, sync::Arc, time::Duration}; @@ -159,7 +159,7 @@ impl Drop for DisplayOwner { } fn open_x_display() -> Option { - log::info!("Loading X11 library to get the current display"); + log::debug!("Loading X11 library to get the current display"); unsafe { let library = libloading::Library::new("libX11.so").ok()?; let func: libloading::Symbol = library.get(b"XOpenDisplay").unwrap(); @@ -185,7 +185,7 @@ fn test_wayland_display() -> Option { /* We try to connect and disconnect here to simply ensure there * is an active wayland display available. */ - log::info!("Loading Wayland library to get the current display"); + log::debug!("Loading Wayland library to get the current display"); let library = unsafe { let client_library = find_library(&["libwayland-client.so.0", "libwayland-client.so"])?; let wl_display_connect: libloading::Symbol = @@ -243,7 +243,7 @@ fn choose_config( let mut attributes = Vec::with_capacity(9); for tier_max in (0..tiers.len()).rev() { let name = tiers[tier_max].0; - log::info!("\tTrying {}", name); + log::debug!("\tTrying {}", name); attributes.clear(); for &(_, tier_attr) in tiers[..=tier_max].iter() { @@ -462,17 +462,17 @@ impl Inner { .query_string(Some(display), khronos_egl::EXTENSIONS) .unwrap() .to_string_lossy(); - log::info!("Display vendor {:?}, version {:?}", vendor, version,); + log::debug!("Display vendor {:?}, version {:?}", vendor, version,); log::debug!( "Display extensions: {:#?}", display_extensions.split_whitespace().collect::>() ); let srgb_kind = if version >= (1, 5) { - log::info!("\tEGL surface: +srgb"); + log::debug!("\tEGL surface: +srgb"); SrgbFrameBufferKind::Core } else if display_extensions.contains("EGL_KHR_gl_colorspace") { - log::info!("\tEGL surface: +srgb khr"); + log::debug!("\tEGL surface: +srgb khr"); SrgbFrameBufferKind::Khr } else { log::warn!("\tEGL surface: -srgb"); @@ -520,14 +520,14 @@ impl Inner { if flags.contains(wgt::InstanceFlags::DEBUG) { if version >= (1, 5) { - log::info!("\tEGL context: +debug"); + log::debug!("\tEGL context: +debug"); context_attributes.push(khronos_egl::CONTEXT_OPENGL_DEBUG); context_attributes.push(khronos_egl::TRUE as _); } else if supports_khr_context { - log::info!("\tEGL context: +debug KHR"); + log::debug!("\tEGL context: +debug KHR"); khr_context_flags |= EGL_CONTEXT_OPENGL_DEBUG_BIT_KHR; } else { - log::info!("\tEGL context: -debug"); + log::debug!("\tEGL context: -debug"); } } if needs_robustness { @@ -535,11 +535,11 @@ impl Inner { // (regardless of whether the extension is supported!). // In fact, Angle does precisely that awful behavior, so we don't try it there. if version >= (1, 5) && !display_extensions.contains("EGL_ANGLE_") { - log::info!("\tEGL context: +robust access"); + log::debug!("\tEGL context: +robust access"); context_attributes.push(khronos_egl::CONTEXT_OPENGL_ROBUST_ACCESS); context_attributes.push(khronos_egl::TRUE as _); } else if display_extensions.contains("EGL_EXT_create_context_robustness") { - log::info!("\tEGL context: +robust access EXT"); + log::debug!("\tEGL context: +robust access EXT"); context_attributes.push(EGL_CONTEXT_OPENGL_ROBUST_ACCESS_EXT); context_attributes.push(khronos_egl::TRUE as _); } else { @@ -571,7 +571,7 @@ impl Inner { || display_extensions.contains("EGL_KHR_surfaceless_context") || cfg!(target_os = "emscripten") { - log::info!("\tEGL context: +surfaceless"); + log::debug!("\tEGL context: +surfaceless"); None } else { let attributes = [ @@ -782,7 +782,7 @@ impl crate::Instance for Instance { .unwrap(); (display, Some(Rc::new(display_owner)), WindowKind::AngleX11) } else if client_ext_str.contains("EGL_MESA_platform_surfaceless") { - log::info!("No windowing system present. Using surfaceless platform"); + log::warn!("No windowing system present. Using surfaceless platform"); let egl = egl1_5.expect("Failed to get EGL 1.5 for surfaceless"); let display = unsafe { egl.get_platform_display( @@ -795,7 +795,7 @@ impl crate::Instance for Instance { (display, None, WindowKind::Unknown) } else { - log::info!("EGL_MESA_platform_surfaceless not available. Using default platform"); + log::warn!("EGL_MESA_platform_surfaceless not available. Using default platform"); let display = unsafe { egl.get_display(khronos_egl::DEFAULT_DISPLAY) }.unwrap(); (display, None, WindowKind::Unknown) }; @@ -803,7 +803,7 @@ impl crate::Instance for Instance { if desc.flags.contains(wgt::InstanceFlags::VALIDATION) && client_ext_str.contains("EGL_KHR_debug") { - log::info!("Enabling EGL debug output"); + log::debug!("Enabling EGL debug output"); let function: EglDebugMessageControlFun = { let addr = egl.get_proc_address("eglDebugMessageControlKHR").unwrap(); unsafe { std::mem::transmute(addr) } @@ -937,7 +937,7 @@ impl crate::Instance for Instance { config: inner.config, presentable: inner.supports_native_window, raw_window_handle: window_handle, - swapchain: None, + swapchain: RwLock::new(None), srgb_kind: inner.srgb_kind, }) } @@ -958,13 +958,13 @@ impl crate::Instance for Instance { }; if self.flags.contains(wgt::InstanceFlags::DEBUG) && gl.supports_debug() { - log::info!("Max label length: {}", unsafe { + log::debug!("Max label length: {}", unsafe { gl.get_parameter_i32(glow::MAX_LABEL_LENGTH) }); } if self.flags.contains(wgt::InstanceFlags::VALIDATION) && gl.supports_debug() { - log::info!("Enabling GLES debug output"); + log::debug!("Enabling GLES debug output"); unsafe { gl.enable(glow::DEBUG_OUTPUT) }; unsafe { gl.debug_message_callback(super::gl_debug_message_callback) }; } @@ -1035,7 +1035,7 @@ pub struct Surface { config: khronos_egl::Config, pub(super) presentable: bool, raw_window_handle: raw_window_handle::RawWindowHandle, - swapchain: Option, + swapchain: RwLock>, srgb_kind: SrgbFrameBufferKind, } @@ -1044,12 +1044,13 @@ unsafe impl Sync for Surface {} impl Surface { pub(super) unsafe fn present( - &mut self, + &self, _suf_texture: super::Texture, context: &AdapterContext, ) -> Result<(), crate::SurfaceError> { let gl = unsafe { context.get_without_egl_lock() }; - let sc = self.swapchain.as_ref().unwrap(); + let swapchain = self.swapchain.read(); + let sc = swapchain.as_ref().unwrap(); self.egl .instance @@ -1107,11 +1108,11 @@ impl Surface { } unsafe fn unconfigure_impl( - &mut self, + &self, device: &super::Device, ) -> Option<(khronos_egl::Surface, Option<*mut raw::c_void>)> { let gl = &device.shared.context.lock(); - match self.swapchain.take() { + match self.swapchain.write().take() { Some(sc) => { unsafe { gl.delete_renderbuffer(sc.renderbuffer) }; unsafe { gl.delete_framebuffer(sc.framebuffer) }; @@ -1131,7 +1132,7 @@ impl Surface { impl crate::Surface for Surface { unsafe fn configure( - &mut self, + &self, device: &super::Device, config: &crate::SurfaceConfiguration, ) -> Result<(), crate::SurfaceError> { @@ -1316,7 +1317,8 @@ impl crate::Surface for Surface { unsafe { gl.bind_renderbuffer(glow::RENDERBUFFER, None) }; unsafe { gl.bind_framebuffer(glow::READ_FRAMEBUFFER, None) }; - self.swapchain = Some(Swapchain { + let mut swapchain = self.swapchain.write(); + *swapchain = Some(Swapchain { surface, wl_window, renderbuffer, @@ -1330,7 +1332,7 @@ impl crate::Surface for Surface { Ok(()) } - unsafe fn unconfigure(&mut self, device: &super::Device) { + unsafe fn unconfigure(&self, device: &super::Device) { if let Some((surface, wl_window)) = unsafe { self.unconfigure_impl(device) } { self.egl .instance @@ -1351,10 +1353,11 @@ impl crate::Surface for Surface { } unsafe fn acquire_texture( - &mut self, + &self, _timeout_ms: Option, //TODO ) -> Result>, crate::SurfaceError> { - let sc = self.swapchain.as_ref().unwrap(); + let swapchain = self.swapchain.read(); + let sc = swapchain.as_ref().unwrap(); let texture = super::Texture { inner: super::TextureInner::Renderbuffer { raw: sc.renderbuffer, @@ -1375,5 +1378,5 @@ impl crate::Surface for Surface { suboptimal: false, })) } - unsafe fn discard_texture(&mut self, _texture: super::Texture) {} + unsafe fn discard_texture(&self, _texture: super::Texture) {} } diff --git a/third_party/rust/wgpu-hal/src/gles/mod.rs b/third_party/rust/wgpu-hal/src/gles/mod.rs index 0af5ad4a6edbf..7b9a694b7dc9f 100644 --- a/third_party/rust/wgpu-hal/src/gles/mod.rs +++ b/third_party/rust/wgpu-hal/src/gles/mod.rs @@ -95,7 +95,7 @@ use glow::HasContext; use naga::FastHashMap; use parking_lot::Mutex; -use std::sync::atomic::AtomicU32; +use std::sync::atomic::{AtomicU32, AtomicU8}; use std::{fmt, ops::Range, sync::Arc}; #[derive(Clone, Debug)] @@ -248,9 +248,9 @@ pub struct Queue { /// Keep a reasonably large buffer filled with zeroes, so that we can implement `ClearBuffer` of /// zeroes by copying from it. zero_buffer: glow::Buffer, - temp_query_results: Vec, - draw_buffer_count: u8, - current_index_buffer: Option, + temp_query_results: Mutex>, + draw_buffer_count: AtomicU8, + current_index_buffer: Mutex>, } #[derive(Clone, Debug)] @@ -387,6 +387,7 @@ pub struct BindGroupLayout { entries: Arc<[wgt::BindGroupLayoutEntry]>, } +#[derive(Debug)] struct BindGroupLayoutInfo { entries: Arc<[wgt::BindGroupLayoutEntry]>, /// Mapping of resources, indexed by `binding`, into the whole layout space. @@ -397,6 +398,7 @@ struct BindGroupLayoutInfo { binding_to_slot: Box<[u8]>, } +#[derive(Debug)] pub struct PipelineLayout { group_infos: Box<[BindGroupLayoutInfo]>, naga_options: naga::back::glsl::Options, @@ -510,6 +512,7 @@ unsafe impl Send for PushConstantDesc {} /// sampler (in this layout) that the texture is used with. type SamplerBindMap = [Option; MAX_TEXTURE_SLOTS]; +#[derive(Debug)] struct PipelineInner { program: glow::Program, sampler_map: SamplerBindMap, @@ -556,6 +559,7 @@ struct ProgramCacheKey { type ProgramCache = FastHashMap, crate::PipelineError>>; +#[derive(Debug)] pub struct RenderPipeline { inner: Arc, primitive: wgt::PrimitiveState, @@ -581,6 +585,7 @@ unsafe impl Sync for RenderPipeline {} ))] unsafe impl Send for RenderPipeline {} +#[derive(Debug)] pub struct ComputePipeline { inner: Arc, } @@ -691,7 +696,7 @@ impl Default for StencilSide { } } -#[derive(Clone, Default)] +#[derive(Debug, Clone, Default)] struct StencilState { front: StencilSide, back: StencilSide, diff --git a/third_party/rust/wgpu-hal/src/gles/queue.rs b/third_party/rust/wgpu-hal/src/gles/queue.rs index 3b87ae7b729bd..22c9d4dc0a985 100644 --- a/third_party/rust/wgpu-hal/src/gles/queue.rs +++ b/third_party/rust/wgpu-hal/src/gles/queue.rs @@ -1,7 +1,10 @@ use super::{conv::is_layered_target, Command as C, PrivateCapabilities}; use arrayvec::ArrayVec; use glow::HasContext; -use std::{mem, slice, sync::Arc}; +use std::{ + mem, slice, + sync::{atomic::Ordering, Arc}, +}; const DEBUG_ID: u32 = 0; @@ -55,16 +58,17 @@ impl super::Queue { unsafe { gl.draw_buffers(&[glow::COLOR_ATTACHMENT0 + draw_buffer]) }; unsafe { gl.draw_arrays(glow::TRIANGLES, 0, 3) }; - if self.draw_buffer_count != 0 { + let draw_buffer_count = self.draw_buffer_count.load(Ordering::Relaxed); + if draw_buffer_count != 0 { // Reset the draw buffers to what they were before the clear - let indices = (0..self.draw_buffer_count as u32) + let indices = (0..draw_buffer_count as u32) .map(|i| glow::COLOR_ATTACHMENT0 + i) .collect::>(); unsafe { gl.draw_buffers(&indices) }; } } - unsafe fn reset_state(&mut self, gl: &glow::Context) { + unsafe fn reset_state(&self, gl: &glow::Context) { unsafe { gl.use_program(None) }; unsafe { gl.bind_framebuffer(glow::FRAMEBUFFER, None) }; unsafe { gl.disable(glow::DEPTH_TEST) }; @@ -79,7 +83,8 @@ impl super::Queue { } unsafe { gl.bind_buffer(glow::ELEMENT_ARRAY_BUFFER, None) }; - self.current_index_buffer = None; + let mut current_index_buffer = self.current_index_buffer.lock(); + *current_index_buffer = None; } unsafe fn set_attachment( @@ -146,7 +151,7 @@ impl super::Queue { } unsafe fn process( - &mut self, + &self, gl: &glow::Context, command: &C, #[cfg_attr(target_arch = "wasm32", allow(unused))] data_bytes: &[u8], @@ -355,7 +360,10 @@ impl super::Queue { unsafe { gl.bind_buffer(copy_src_target, None) }; if is_index_buffer_only_element_dst { unsafe { - gl.bind_buffer(glow::ELEMENT_ARRAY_BUFFER, self.current_index_buffer) + gl.bind_buffer( + glow::ELEMENT_ARRAY_BUFFER, + *self.current_index_buffer.lock(), + ) }; } else { unsafe { gl.bind_buffer(copy_dst_target, None) }; @@ -799,7 +807,8 @@ impl super::Queue { } C::SetIndexBuffer(buffer) => { unsafe { gl.bind_buffer(glow::ELEMENT_ARRAY_BUFFER, Some(buffer)) }; - self.current_index_buffer = Some(buffer); + let mut current_index_buffer = self.current_index_buffer.lock(); + *current_index_buffer = Some(buffer); } C::BeginQuery(query, target) => { unsafe { gl.begin_query(target, query) }; @@ -861,7 +870,8 @@ impl super::Queue { } } } else { - self.temp_query_results.clear(); + let mut temp_query_results = self.temp_query_results.lock(); + temp_query_results.clear(); for &query in queries[query_range.start as usize..query_range.end as usize].iter() { @@ -874,12 +884,12 @@ impl super::Queue { result as usize, ) }; - self.temp_query_results.push(result); + temp_query_results.push(result); } let query_data = unsafe { slice::from_raw_parts( - self.temp_query_results.as_ptr() as *const u8, - self.temp_query_results.len() * mem::size_of::(), + temp_query_results.as_ptr() as *const u8, + temp_query_results.len() * mem::size_of::(), ) }; match dst.raw { @@ -979,7 +989,7 @@ impl super::Queue { } } C::SetDrawColorBuffers(count) => { - self.draw_buffer_count = count; + self.draw_buffer_count.store(count, Ordering::Relaxed); let indices = (0..count as u32) .map(|i| glow::COLOR_ATTACHMENT0 + i) .collect::>(); @@ -1660,7 +1670,7 @@ impl super::Queue { impl crate::Queue for super::Queue { unsafe fn submit( - &mut self, + &self, command_buffers: &[&super::CommandBuffer], signal_fence: Option<(&mut super::Fence, crate::FenceValue)>, ) -> Result<(), crate::DeviceError> { @@ -1707,8 +1717,8 @@ impl crate::Queue for super::Queue { } unsafe fn present( - &mut self, - surface: &mut super::Surface, + &self, + surface: &super::Surface, texture: super::Texture, ) -> Result<(), crate::SurfaceError> { unsafe { surface.present(texture, &self.shared.context) } diff --git a/third_party/rust/wgpu-hal/src/gles/web.rs b/third_party/rust/wgpu-hal/src/gles/web.rs index f4ce038bb18a2..49236bb94cbe2 100644 --- a/third_party/rust/wgpu-hal/src/gles/web.rs +++ b/third_party/rust/wgpu-hal/src/gles/web.rs @@ -1,5 +1,5 @@ use glow::HasContext; -use parking_lot::Mutex; +use parking_lot::{Mutex, RwLock}; use wasm_bindgen::JsCast; use super::TextureFormatDesc; @@ -92,9 +92,9 @@ impl Instance { Ok(Surface { canvas, webgl2_context, - srgb_present_program: None, - swapchain: None, - texture: None, + srgb_present_program: Mutex::new(None), + swapchain: RwLock::new(None), + texture: Mutex::new(None), presentable: true, }) } @@ -176,14 +176,27 @@ impl crate::Instance for Instance { } } -#[derive(Clone, Debug)] +#[derive(Debug)] pub struct Surface { canvas: Canvas, webgl2_context: web_sys::WebGl2RenderingContext, - pub(super) swapchain: Option, - texture: Option, + pub(super) swapchain: RwLock>, + texture: Mutex>, pub(super) presentable: bool, - srgb_present_program: Option, + srgb_present_program: Mutex>, +} + +impl Clone for Surface { + fn clone(&self) -> Self { + Self { + canvas: self.canvas.clone(), + webgl2_context: self.webgl2_context.clone(), + swapchain: RwLock::new(self.swapchain.read().clone()), + texture: Mutex::new(*self.texture.lock()), + presentable: self.presentable, + srgb_present_program: Mutex::new(*self.srgb_present_program.lock()), + } + } } #[cfg(all( @@ -214,12 +227,13 @@ pub struct Swapchain { impl Surface { pub(super) unsafe fn present( - &mut self, + &self, _suf_texture: super::Texture, context: &AdapterContext, ) -> Result<(), crate::SurfaceError> { let gl = &context.glow_context; - let swapchain = self.swapchain.as_ref().ok_or(crate::SurfaceError::Other( + let swapchain = self.swapchain.read(); + let swapchain = swapchain.as_ref().ok_or(crate::SurfaceError::Other( "need to configure surface before presenting", ))?; @@ -236,8 +250,8 @@ impl Surface { unsafe { gl.bind_framebuffer(glow::DRAW_FRAMEBUFFER, None) }; unsafe { gl.bind_sampler(0, None) }; unsafe { gl.active_texture(glow::TEXTURE0) }; - unsafe { gl.bind_texture(glow::TEXTURE_2D, self.texture) }; - unsafe { gl.use_program(self.srgb_present_program) }; + unsafe { gl.bind_texture(glow::TEXTURE_2D, *self.texture.lock()) }; + unsafe { gl.use_program(*self.srgb_present_program.lock()) }; unsafe { gl.disable(glow::DEPTH_TEST) }; unsafe { gl.disable(glow::STENCIL_TEST) }; unsafe { gl.disable(glow::SCISSOR_TEST) }; @@ -298,7 +312,7 @@ impl Surface { impl crate::Surface for Surface { unsafe fn configure( - &mut self, + &self, device: &super::Device, config: &crate::SurfaceConfiguration, ) -> Result<(), crate::SurfaceError> { @@ -315,94 +329,107 @@ impl crate::Surface for Surface { let gl = &device.shared.context.lock(); - if let Some(swapchain) = self.swapchain.take() { - // delete all frame buffers already allocated - unsafe { gl.delete_framebuffer(swapchain.framebuffer) }; + { + let mut swapchain = self.swapchain.write(); + if let Some(swapchain) = swapchain.take() { + // delete all frame buffers already allocated + unsafe { gl.delete_framebuffer(swapchain.framebuffer) }; + } } - - if self.srgb_present_program.is_none() && config.format.is_srgb() { - self.srgb_present_program = Some(unsafe { Self::create_srgb_present_program(gl) }); + { + let mut srgb_present_program = self.srgb_present_program.lock(); + if srgb_present_program.is_none() && config.format.is_srgb() { + *srgb_present_program = Some(unsafe { Self::create_srgb_present_program(gl) }); + } } + { + let mut texture = self.texture.lock(); + if let Some(texture) = texture.take() { + unsafe { gl.delete_texture(texture) }; + } - if let Some(texture) = self.texture.take() { - unsafe { gl.delete_texture(texture) }; - } + *texture = Some(unsafe { gl.create_texture() }.map_err(|error| { + log::error!("Internal swapchain texture creation failed: {error}"); + crate::DeviceError::OutOfMemory + })?); - self.texture = Some(unsafe { gl.create_texture() }.map_err(|error| { - log::error!("Internal swapchain texture creation failed: {error}"); - crate::DeviceError::OutOfMemory - })?); - - let desc = device.shared.describe_texture_format(config.format); - unsafe { gl.bind_texture(glow::TEXTURE_2D, self.texture) }; - unsafe { - gl.tex_parameter_i32( - glow::TEXTURE_2D, - glow::TEXTURE_MIN_FILTER, - glow::NEAREST as _, - ) - }; - unsafe { - gl.tex_parameter_i32( - glow::TEXTURE_2D, - glow::TEXTURE_MAG_FILTER, - glow::NEAREST as _, - ) - }; - unsafe { - gl.tex_storage_2d( - glow::TEXTURE_2D, - 1, - desc.internal, - config.extent.width as i32, - config.extent.height as i32, - ) - }; + let desc = device.shared.describe_texture_format(config.format); + unsafe { gl.bind_texture(glow::TEXTURE_2D, *texture) }; + unsafe { + gl.tex_parameter_i32( + glow::TEXTURE_2D, + glow::TEXTURE_MIN_FILTER, + glow::NEAREST as _, + ) + }; + unsafe { + gl.tex_parameter_i32( + glow::TEXTURE_2D, + glow::TEXTURE_MAG_FILTER, + glow::NEAREST as _, + ) + }; + unsafe { + gl.tex_storage_2d( + glow::TEXTURE_2D, + 1, + desc.internal, + config.extent.width as i32, + config.extent.height as i32, + ) + }; - let framebuffer = unsafe { gl.create_framebuffer() }.map_err(|error| { - log::error!("Internal swapchain framebuffer creation failed: {error}"); - crate::DeviceError::OutOfMemory - })?; - unsafe { gl.bind_framebuffer(glow::READ_FRAMEBUFFER, Some(framebuffer)) }; - unsafe { - gl.framebuffer_texture_2d( - glow::READ_FRAMEBUFFER, - glow::COLOR_ATTACHMENT0, - glow::TEXTURE_2D, - self.texture, - 0, - ) - }; - unsafe { gl.bind_texture(glow::TEXTURE_2D, None) }; + let framebuffer = unsafe { gl.create_framebuffer() }.map_err(|error| { + log::error!("Internal swapchain framebuffer creation failed: {error}"); + crate::DeviceError::OutOfMemory + })?; + unsafe { gl.bind_framebuffer(glow::READ_FRAMEBUFFER, Some(framebuffer)) }; + unsafe { + gl.framebuffer_texture_2d( + glow::READ_FRAMEBUFFER, + glow::COLOR_ATTACHMENT0, + glow::TEXTURE_2D, + *texture, + 0, + ) + }; + unsafe { gl.bind_texture(glow::TEXTURE_2D, None) }; + + let mut swapchain = self.swapchain.write(); + *swapchain = Some(Swapchain { + extent: config.extent, + // channel: config.format.base_format().1, + format: config.format, + format_desc: desc, + framebuffer, + }); + } - self.swapchain = Some(Swapchain { - extent: config.extent, - // channel: config.format.base_format().1, - format: config.format, - format_desc: desc, - framebuffer, - }); Ok(()) } - unsafe fn unconfigure(&mut self, device: &super::Device) { + unsafe fn unconfigure(&self, device: &super::Device) { let gl = device.shared.context.lock(); - if let Some(swapchain) = self.swapchain.take() { - unsafe { gl.delete_framebuffer(swapchain.framebuffer) }; + { + let mut swapchain = self.swapchain.write(); + if let Some(swapchain) = swapchain.take() { + unsafe { gl.delete_framebuffer(swapchain.framebuffer) }; + } } - if let Some(renderbuffer) = self.texture.take() { + if let Some(renderbuffer) = self.texture.lock().take() { unsafe { gl.delete_texture(renderbuffer) }; } } unsafe fn acquire_texture( - &mut self, + &self, _timeout_ms: Option, //TODO ) -> Result>, crate::SurfaceError> { - let sc = self.swapchain.as_ref().unwrap(); + let swapchain = self.swapchain.read(); + let sc = swapchain.as_ref().unwrap(); let texture = super::Texture { inner: super::TextureInner::Texture { - raw: self.texture.unwrap(), + raw: self.texture.lock().unwrap(), target: glow::TEXTURE_2D, }, drop_guard: None, @@ -422,5 +449,5 @@ impl crate::Surface for Surface { })) } - unsafe fn discard_texture(&mut self, _texture: super::Texture) {} + unsafe fn discard_texture(&self, _texture: super::Texture) {} } diff --git a/third_party/rust/wgpu-hal/src/gles/wgl.rs b/third_party/rust/wgpu-hal/src/gles/wgl.rs index 4554d7a70580b..6e6860728625f 100644 --- a/third_party/rust/wgpu-hal/src/gles/wgl.rs +++ b/third_party/rust/wgpu-hal/src/gles/wgl.rs @@ -4,7 +4,7 @@ use glutin_wgl_sys::wgl_extra::{ CONTEXT_PROFILE_MASK_ARB, }; use once_cell::sync::Lazy; -use parking_lot::{Mutex, MutexGuard}; +use parking_lot::{Mutex, MutexGuard, RwLock}; use raw_window_handle::{RawDisplayHandle, RawWindowHandle}; use std::{ collections::HashSet, @@ -527,7 +527,7 @@ impl crate::Instance for Instance { Ok(Surface { window: window.hwnd.get() as *mut _, presentable: true, - swapchain: None, + swapchain: RwLock::new(None), srgb_capable: self.srgb_capable, }) } @@ -573,7 +573,7 @@ pub struct Swapchain { pub struct Surface { window: HWND, pub(super) presentable: bool, - swapchain: Option, + swapchain: RwLock>, srgb_capable: bool, } @@ -582,11 +582,12 @@ unsafe impl Sync for Surface {} impl Surface { pub(super) unsafe fn present( - &mut self, + &self, _suf_texture: super::Texture, context: &AdapterContext, ) -> Result<(), crate::SurfaceError> { - let sc = self.swapchain.as_ref().unwrap(); + let swapchain = self.swapchain.read(); + let sc = swapchain.as_ref().unwrap(); let dc = unsafe { GetDC(self.window) }; if dc.is_null() { log::error!( @@ -662,7 +663,7 @@ impl Surface { impl crate::Surface for Surface { unsafe fn configure( - &mut self, + &self, device: &super::Device, config: &crate::SurfaceConfiguration, ) -> Result<(), crate::SurfaceError> { @@ -756,7 +757,7 @@ impl crate::Surface for Surface { return Err(crate::SurfaceError::Other("unable to set swap interval")); } - self.swapchain = Some(Swapchain { + self.swapchain.write().replace(Swapchain { renderbuffer, framebuffer, extent: config.extent, @@ -768,9 +769,9 @@ impl crate::Surface for Surface { Ok(()) } - unsafe fn unconfigure(&mut self, device: &super::Device) { + unsafe fn unconfigure(&self, device: &super::Device) { let gl = &device.shared.context.lock(); - if let Some(sc) = self.swapchain.take() { + if let Some(sc) = self.swapchain.write().take() { unsafe { gl.delete_renderbuffer(sc.renderbuffer); gl.delete_framebuffer(sc.framebuffer) @@ -779,10 +780,11 @@ impl crate::Surface for Surface { } unsafe fn acquire_texture( - &mut self, + &self, _timeout_ms: Option, ) -> Result>, crate::SurfaceError> { - let sc = self.swapchain.as_ref().unwrap(); + let swapchain = self.swapchain.read(); + let sc = swapchain.as_ref().unwrap(); let texture = super::Texture { inner: super::TextureInner::Renderbuffer { raw: sc.renderbuffer, @@ -803,5 +805,5 @@ impl crate::Surface for Surface { suboptimal: false, })) } - unsafe fn discard_texture(&mut self, _texture: super::Texture) {} + unsafe fn discard_texture(&self, _texture: super::Texture) {} } diff --git a/third_party/rust/wgpu-hal/src/lib.rs b/third_party/rust/wgpu-hal/src/lib.rs index 6c8e36ab7cec1..b0b1120dbb476 100644 --- a/third_party/rust/wgpu-hal/src/lib.rs +++ b/third_party/rust/wgpu-hal/src/lib.rs @@ -95,7 +95,7 @@ use std::{ use bitflags::bitflags; use thiserror::Error; -use wgt::{WasmNotSend, WasmNotSync}; +use wgt::WasmNotSendSync; // - Vertex + Fragment // - Compute @@ -200,25 +200,25 @@ pub trait Api: Clone + fmt::Debug + Sized { type Queue: Queue; type CommandEncoder: CommandEncoder; - type CommandBuffer: WasmNotSend + WasmNotSync + fmt::Debug; + type CommandBuffer: WasmNotSendSync + fmt::Debug; - type Buffer: fmt::Debug + WasmNotSend + WasmNotSync + 'static; - type Texture: fmt::Debug + WasmNotSend + WasmNotSync + 'static; - type SurfaceTexture: fmt::Debug + WasmNotSend + WasmNotSync + Borrow; - type TextureView: fmt::Debug + WasmNotSend + WasmNotSync; - type Sampler: fmt::Debug + WasmNotSend + WasmNotSync; - type QuerySet: fmt::Debug + WasmNotSend + WasmNotSync; - type Fence: fmt::Debug + WasmNotSend + WasmNotSync; + type Buffer: fmt::Debug + WasmNotSendSync + 'static; + type Texture: fmt::Debug + WasmNotSendSync + 'static; + type SurfaceTexture: fmt::Debug + WasmNotSendSync + Borrow; + type TextureView: fmt::Debug + WasmNotSendSync; + type Sampler: fmt::Debug + WasmNotSendSync; + type QuerySet: fmt::Debug + WasmNotSendSync; + type Fence: fmt::Debug + WasmNotSendSync; - type BindGroupLayout: fmt::Debug + WasmNotSend + WasmNotSync; - type BindGroup: fmt::Debug + WasmNotSend + WasmNotSync; - type PipelineLayout: WasmNotSend + WasmNotSync; - type ShaderModule: fmt::Debug + WasmNotSend + WasmNotSync; - type RenderPipeline: WasmNotSend + WasmNotSync; - type ComputePipeline: WasmNotSend + WasmNotSync; + type BindGroupLayout: fmt::Debug + WasmNotSendSync; + type BindGroup: fmt::Debug + WasmNotSendSync; + type PipelineLayout: fmt::Debug + WasmNotSendSync; + type ShaderModule: fmt::Debug + WasmNotSendSync; + type RenderPipeline: fmt::Debug + WasmNotSendSync; + type ComputePipeline: fmt::Debug + WasmNotSendSync; } -pub trait Instance: Sized + WasmNotSend + WasmNotSync { +pub trait Instance: Sized + WasmNotSendSync { unsafe fn init(desc: &InstanceDescriptor) -> Result; unsafe fn create_surface( &self, @@ -229,7 +229,7 @@ pub trait Instance: Sized + WasmNotSend + WasmNotSync { unsafe fn enumerate_adapters(&self) -> Vec>; } -pub trait Surface: WasmNotSend + WasmNotSync { +pub trait Surface: WasmNotSendSync { /// Configures the surface to use the given device. /// /// # Safety @@ -239,7 +239,7 @@ pub trait Surface: WasmNotSend + WasmNotSync { /// - All [`Api::TextureView`]s derived from the [`AcquiredSurfaceTexture`]s must have been destroyed. /// - All surfaces created using other devices must have been unconfigured before this call. unsafe fn configure( - &mut self, + &self, device: &A::Device, config: &SurfaceConfiguration, ) -> Result<(), SurfaceError>; @@ -252,7 +252,7 @@ pub trait Surface: WasmNotSend + WasmNotSync { /// - All [`AcquiredSurfaceTexture`]s must have been destroyed. /// - All [`Api::TextureView`]s derived from the [`AcquiredSurfaceTexture`]s must have been destroyed. /// - The surface must have been configured on the given device. - unsafe fn unconfigure(&mut self, device: &A::Device); + unsafe fn unconfigure(&self, device: &A::Device); /// Returns the next texture to be presented by the swapchain for drawing /// @@ -265,13 +265,13 @@ pub trait Surface: WasmNotSend + WasmNotSync { /// /// Returns `None` on timing out. unsafe fn acquire_texture( - &mut self, + &self, timeout: Option, ) -> Result>, SurfaceError>; - unsafe fn discard_texture(&mut self, texture: A::SurfaceTexture); + unsafe fn discard_texture(&self, texture: A::SurfaceTexture); } -pub trait Adapter: WasmNotSend + WasmNotSync { +pub trait Adapter: WasmNotSendSync { unsafe fn open( &self, features: wgt::Features, @@ -295,7 +295,7 @@ pub trait Adapter: WasmNotSend + WasmNotSync { unsafe fn get_presentation_timestamp(&self) -> wgt::PresentationTimestamp; } -pub trait Device: WasmNotSend + WasmNotSync { +pub trait Device: WasmNotSendSync { /// Exit connection to this logical device. unsafe fn exit(self, queue: A::Queue); /// Creates a new buffer. @@ -391,7 +391,7 @@ pub trait Device: WasmNotSend + WasmNotSync { unsafe fn stop_capture(&self); } -pub trait Queue: WasmNotSend + WasmNotSync { +pub trait Queue: WasmNotSendSync { /// Submits the command buffers for execution on GPU. /// /// Valid usage: @@ -399,13 +399,13 @@ pub trait Queue: WasmNotSend + WasmNotSync { /// that are associated with this queue. /// - all of the command buffers had `CommadBuffer::finish()` called. unsafe fn submit( - &mut self, + &self, command_buffers: &[&A::CommandBuffer], signal_fence: Option<(&mut A::Fence, FenceValue)>, ) -> Result<(), DeviceError>; unsafe fn present( - &mut self, - surface: &mut A::Surface, + &self, + surface: &A::Surface, texture: A::SurfaceTexture, ) -> Result<(), SurfaceError>; unsafe fn get_timestamp_period(&self) -> f32; @@ -415,7 +415,7 @@ pub trait Queue: WasmNotSend + WasmNotSync { /// Serves as a parent for all the encoded command buffers. /// Works in bursts of action: one or more command buffers are recorded, /// then submitted to a queue, and then it needs to be `reset_all()`. -pub trait CommandEncoder: WasmNotSend + WasmNotSync + fmt::Debug { +pub trait CommandEncoder: WasmNotSendSync + fmt::Debug { /// Begin encoding a new command buffer. unsafe fn begin_encoding(&mut self, label: Label) -> Result<(), DeviceError>; /// Discard currently recorded list, if any. diff --git a/third_party/rust/wgpu-hal/src/metal/mod.rs b/third_party/rust/wgpu-hal/src/metal/mod.rs index 29439ee0ca9b9..360c648daf69e 100644 --- a/third_party/rust/wgpu-hal/src/metal/mod.rs +++ b/third_party/rust/wgpu-hal/src/metal/mod.rs @@ -35,7 +35,7 @@ use std::{ use arrayvec::ArrayVec; use bitflags::bitflags; use metal::foreign_types::ForeignTypeRef as _; -use parking_lot::Mutex; +use parking_lot::{Mutex, RwLock}; #[derive(Clone, Debug)] pub struct Api; @@ -334,8 +334,8 @@ pub struct Device { pub struct Surface { view: Option>, render_layer: Mutex, - swapchain_format: Option, - extent: wgt::Extent3d, + swapchain_format: RwLock>, + extent: RwLock, main_thread_id: thread::ThreadId, // Useful for UI-intensive applications that are sensitive to // window resizing. @@ -363,7 +363,7 @@ unsafe impl Sync for SurfaceTexture {} impl crate::Queue for Queue { unsafe fn submit( - &mut self, + &self, command_buffers: &[&CommandBuffer], signal_fence: Option<(&mut Fence, crate::FenceValue)>, ) -> Result<(), crate::DeviceError> { @@ -410,8 +410,8 @@ impl crate::Queue for Queue { Ok(()) } unsafe fn present( - &mut self, - _surface: &mut Surface, + &self, + _surface: &Surface, texture: SurfaceTexture, ) -> Result<(), crate::SurfaceError> { let queue = &self.raw.lock(); @@ -695,6 +695,7 @@ impl PipelineStageInfo { } } +#[derive(Debug)] pub struct RenderPipeline { raw: metal::RenderPipelineState, #[allow(dead_code)] @@ -714,6 +715,7 @@ pub struct RenderPipeline { unsafe impl Send for RenderPipeline {} unsafe impl Sync for RenderPipeline {} +#[derive(Debug)] pub struct ComputePipeline { raw: metal::ComputePipelineState, #[allow(dead_code)] diff --git a/third_party/rust/wgpu-hal/src/metal/surface.rs b/third_party/rust/wgpu-hal/src/metal/surface.rs index 3b25e5572940a..e54a176da595a 100644 --- a/third_party/rust/wgpu-hal/src/metal/surface.rs +++ b/third_party/rust/wgpu-hal/src/metal/surface.rs @@ -14,7 +14,7 @@ use objc::{ runtime::{Class, Object, Sel, BOOL, NO, YES}, sel, sel_impl, }; -use parking_lot::Mutex; +use parking_lot::{Mutex, RwLock}; #[cfg(target_os = "macos")] #[cfg_attr(feature = "link", link(name = "QuartzCore", kind = "framework"))] @@ -63,8 +63,8 @@ impl super::Surface { Self { view, render_layer: Mutex::new(layer), - swapchain_format: None, - extent: wgt::Extent3d::default(), + swapchain_format: RwLock::new(None), + extent: RwLock::new(wgt::Extent3d::default()), main_thread_id: thread::current().id(), present_with_transaction: false, } @@ -171,15 +171,15 @@ impl super::Surface { impl crate::Surface for super::Surface { unsafe fn configure( - &mut self, + &self, device: &super::Device, config: &crate::SurfaceConfiguration, ) -> Result<(), crate::SurfaceError> { - log::info!("build swapchain {:?}", config); + log::debug!("build swapchain {:?}", config); let caps = &device.shared.private_caps; - self.swapchain_format = Some(config.format); - self.extent = config.extent; + *self.swapchain_format.write() = Some(config.format); + *self.extent.write() = config.extent; let render_layer = self.render_layer.lock(); let framebuffer_only = config.usage == crate::TextureUses::COLOR_TARGET; @@ -233,12 +233,12 @@ impl crate::Surface for super::Surface { Ok(()) } - unsafe fn unconfigure(&mut self, _device: &super::Device) { - self.swapchain_format = None; + unsafe fn unconfigure(&self, _device: &super::Device) { + *self.swapchain_format.write() = None; } unsafe fn acquire_texture( - &mut self, + &self, _timeout_ms: Option, //TODO ) -> Result>, crate::SurfaceError> { let render_layer = self.render_layer.lock(); @@ -251,16 +251,18 @@ impl crate::Surface for super::Surface { None => return Ok(None), }; + let swapchain_format = self.swapchain_format.read().unwrap(); + let extent = self.extent.read(); let suf_texture = super::SurfaceTexture { texture: super::Texture { raw: texture, - format: self.swapchain_format.unwrap(), + format: swapchain_format, raw_type: metal::MTLTextureType::D2, array_layers: 1, mip_levels: 1, copy_size: crate::CopyExtent { - width: self.extent.width, - height: self.extent.height, + width: extent.width, + height: extent.height, depth: 1, }, }, @@ -274,5 +276,5 @@ impl crate::Surface for super::Surface { })) } - unsafe fn discard_texture(&mut self, _texture: super::SurfaceTexture) {} + unsafe fn discard_texture(&self, _texture: super::SurfaceTexture) {} } diff --git a/third_party/rust/wgpu-hal/src/vulkan/adapter.rs b/third_party/rust/wgpu-hal/src/vulkan/adapter.rs index fd62473fd7497..6fcb7b7df6dd2 100644 --- a/third_party/rust/wgpu-hal/src/vulkan/adapter.rs +++ b/third_party/rust/wgpu-hal/src/vulkan/adapter.rs @@ -3,7 +3,11 @@ use super::conv; use ash::{extensions::khr, vk}; use parking_lot::Mutex; -use std::{collections::BTreeMap, ffi::CStr, sync::Arc}; +use std::{ + collections::BTreeMap, + ffi::CStr, + sync::{atomic::AtomicIsize, Arc}, +}; fn depth_stencil_required_flags() -> vk::FormatFeatureFlags { vk::FormatFeatureFlags::SAMPLED_IMAGE | vk::FormatFeatureFlags::DEPTH_STENCIL_ATTACHMENT @@ -986,6 +990,15 @@ impl super::Instance { ); }; + if let Some(driver) = phd_capabilities.driver { + if driver.conformance_version.major == 0 { + log::warn!( + "Adapter is not Vulkan compliant, hiding adapter: {}", + info.name + ); + return None; + } + } if phd_capabilities.device_api_version == vk::API_VERSION_1_0 && !phd_capabilities.supports_extension(vk::KhrStorageBufferStorageClassFn::name()) { @@ -1351,7 +1364,7 @@ impl super::Adapter { device: Arc::clone(&shared), family_index, relay_semaphores, - relay_index: None, + relay_index: AtomicIsize::new(-1), }; let mem_allocator = { diff --git a/third_party/rust/wgpu-hal/src/vulkan/device.rs b/third_party/rust/wgpu-hal/src/vulkan/device.rs index 8eb2935a32b1f..5f7fbac47af32 100644 --- a/third_party/rust/wgpu-hal/src/vulkan/device.rs +++ b/third_party/rust/wgpu-hal/src/vulkan/device.rs @@ -539,7 +539,7 @@ struct CompiledStage { impl super::Device { pub(super) unsafe fn create_swapchain( &self, - surface: &mut super::Surface, + surface: &super::Surface, config: &crate::SurfaceConfiguration, provided_old_swapchain: Option, ) -> Result { diff --git a/third_party/rust/wgpu-hal/src/vulkan/instance.rs b/third_party/rust/wgpu-hal/src/vulkan/instance.rs index da9eaea8b27c8..a72c6e848b075 100644 --- a/third_party/rust/wgpu-hal/src/vulkan/instance.rs +++ b/third_party/rust/wgpu-hal/src/vulkan/instance.rs @@ -9,6 +9,7 @@ use ash::{ extensions::{ext, khr}, vk, }; +use parking_lot::RwLock; unsafe extern "system" fn debug_utils_messenger_callback( message_severity: vk::DebugUtilsMessageSeverityFlagsEXT, @@ -285,7 +286,7 @@ impl super::Instance { }) { true } else { - log::info!("Unable to find extension: {}", ext.to_string_lossy()); + log::warn!("Unable to find extension: {}", ext.to_string_lossy()); false } }); @@ -314,7 +315,7 @@ impl super::Instance { has_nv_optimus: bool, drop_guard: Option, ) -> Result { - log::info!("Instance version: 0x{:x}", instance_api_version); + log::debug!("Instance version: 0x{:x}", instance_api_version); let debug_utils = if let Some(debug_utils_create_info) = debug_utils_create_info { if extensions.contains(&ext::DebugUtils::name()) { @@ -344,7 +345,7 @@ impl super::Instance { let get_physical_device_properties = if extensions.contains(&khr::GetPhysicalDeviceProperties2::name()) { - log::info!("Enabling device properties2"); + log::debug!("Enabling device properties2"); Some(khr::GetPhysicalDeviceProperties2::new( &entry, &raw_instance, @@ -539,7 +540,7 @@ impl super::Instance { raw: surface, functor, instance: Arc::clone(&self.shared), - swapchain: None, + swapchain: RwLock::new(None), } } } @@ -618,7 +619,7 @@ impl crate::Instance for super::Instance { entry.enumerate_instance_layer_properties() }; let instance_layers = instance_layers.map_err(|e| { - log::info!("enumerate_instance_layer_properties: {:?}", e); + log::debug!("enumerate_instance_layer_properties: {:?}", e); crate::InstanceError::with_source( String::from("enumerate_instance_layer_properties() failed"), e, @@ -877,24 +878,24 @@ impl crate::Instance for super::Instance { impl crate::Surface for super::Surface { unsafe fn configure( - &mut self, + &self, device: &super::Device, config: &crate::SurfaceConfiguration, ) -> Result<(), crate::SurfaceError> { // Safety: `configure`'s contract guarantees there are no resources derived from the swapchain in use. - let old = self - .swapchain + let mut swap_chain = self.swapchain.write(); + let old = swap_chain .take() .map(|sc| unsafe { sc.release_resources(&device.shared.raw) }); let swapchain = unsafe { device.create_swapchain(self, config, old)? }; - self.swapchain = Some(swapchain); + *swap_chain = Some(swapchain); Ok(()) } - unsafe fn unconfigure(&mut self, device: &super::Device) { - if let Some(sc) = self.swapchain.take() { + unsafe fn unconfigure(&self, device: &super::Device) { + if let Some(sc) = self.swapchain.write().take() { // Safety: `unconfigure`'s contract guarantees there are no resources derived from the swapchain in use. let swapchain = unsafe { sc.release_resources(&device.shared.raw) }; unsafe { swapchain.functor.destroy_swapchain(swapchain.raw, None) }; @@ -902,10 +903,11 @@ impl crate::Surface for super::Surface { } unsafe fn acquire_texture( - &mut self, + &self, timeout: Option, ) -> Result>, crate::SurfaceError> { - let sc = self.swapchain.as_mut().unwrap(); + let mut swapchain = self.swapchain.write(); + let sc = swapchain.as_mut().unwrap(); let mut timeout_ns = match timeout { Some(duration) => duration.as_nanos() as u64, @@ -992,5 +994,5 @@ impl crate::Surface for super::Surface { })) } - unsafe fn discard_texture(&mut self, _texture: super::SurfaceTexture) {} + unsafe fn discard_texture(&self, _texture: super::SurfaceTexture) {} } diff --git a/third_party/rust/wgpu-hal/src/vulkan/mod.rs b/third_party/rust/wgpu-hal/src/vulkan/mod.rs index a0f7123552ec9..843e4ef36fd15 100644 --- a/third_party/rust/wgpu-hal/src/vulkan/mod.rs +++ b/third_party/rust/wgpu-hal/src/vulkan/mod.rs @@ -31,14 +31,23 @@ mod conv; mod device; mod instance; -use std::{borrow::Borrow, ffi::CStr, fmt, num::NonZeroU32, sync::Arc}; +use std::{ + borrow::Borrow, + ffi::CStr, + fmt, + num::NonZeroU32, + sync::{ + atomic::{AtomicIsize, Ordering}, + Arc, + }, +}; use arrayvec::ArrayVec; use ash::{ extensions::{ext, khr}, vk, }; -use parking_lot::Mutex; +use parking_lot::{Mutex, RwLock}; const MILLIS_TO_NANOS: u64 = 1_000_000; const MAX_TOTAL_ATTACHMENTS: usize = crate::MAX_COLOR_ATTACHMENTS * 2 + 1; @@ -146,7 +155,7 @@ pub struct Surface { raw: vk::SurfaceKHR, functor: khr::Surface, instance: Arc, - swapchain: Option, + swapchain: RwLock>, } #[derive(Debug)] @@ -340,7 +349,7 @@ pub struct Queue { /// It would be correct to use a single semaphore there, but /// [Intel hangs in `anv_queue_finish`](https://gitlab.freedesktop.org/mesa/mesa/-/issues/5508). relay_semaphores: [vk::Semaphore; 2], - relay_index: Option, + relay_index: AtomicIsize, } #[derive(Debug)] @@ -560,7 +569,7 @@ impl Fence { impl crate::Queue for Queue { unsafe fn submit( - &mut self, + &self, command_buffers: &[&CommandBuffer], signal_fence: Option<(&mut Fence, crate::FenceValue)>, ) -> Result<(), crate::DeviceError> { @@ -605,16 +614,17 @@ impl crate::Queue for Queue { } let wait_stage_mask = [vk::PipelineStageFlags::TOP_OF_PIPE]; - let sem_index = match self.relay_index { - Some(old_index) => { - vk_info = vk_info - .wait_semaphores(&self.relay_semaphores[old_index..old_index + 1]) - .wait_dst_stage_mask(&wait_stage_mask); - (old_index + 1) % self.relay_semaphores.len() - } - None => 0, + let old_index = self.relay_index.load(Ordering::Relaxed); + let sem_index = if old_index >= 0 { + vk_info = vk_info + .wait_semaphores(&self.relay_semaphores[old_index as usize..old_index as usize + 1]) + .wait_dst_stage_mask(&wait_stage_mask); + (old_index as usize + 1) % self.relay_semaphores.len() + } else { + 0 }; - self.relay_index = Some(sem_index); + self.relay_index + .store(sem_index as isize, Ordering::Relaxed); signal_semaphores[0] = self.relay_semaphores[sem_index]; let signal_count = if signal_semaphores[1] == vk::Semaphore::null() { @@ -634,11 +644,12 @@ impl crate::Queue for Queue { } unsafe fn present( - &mut self, - surface: &mut Surface, + &self, + surface: &Surface, texture: SurfaceTexture, ) -> Result<(), crate::SurfaceError> { - let ssc = surface.swapchain.as_ref().unwrap(); + let mut swapchain = surface.swapchain.write(); + let ssc = swapchain.as_mut().unwrap(); let swapchains = [ssc.raw]; let image_indices = [texture.index]; @@ -646,8 +657,11 @@ impl crate::Queue for Queue { .swapchains(&swapchains) .image_indices(&image_indices); - if let Some(old_index) = self.relay_index.take() { - vk_info = vk_info.wait_semaphores(&self.relay_semaphores[old_index..old_index + 1]); + let old_index = self.relay_index.swap(-1, Ordering::Relaxed); + if old_index >= 0 { + vk_info = vk_info.wait_semaphores( + &self.relay_semaphores[old_index as usize..old_index as usize + 1], + ); } let suboptimal = { diff --git a/third_party/rust/wgpu-types/.cargo-checksum.json b/third_party/rust/wgpu-types/.cargo-checksum.json index caaa83a60df80..3a6107dedaaba 100644 --- a/third_party/rust/wgpu-types/.cargo-checksum.json +++ b/third_party/rust/wgpu-types/.cargo-checksum.json @@ -1 +1 @@ -{"files":{"Cargo.toml":"e45ee369c8f91526056ba7f46504e270da1d8b2de65431bbaf36426bcd54da68","LICENSE.APACHE":"a6cba85bc92e0cff7a450b1d873c0eaa2e9fc96bf472df0247a26bec77bf3ff9","LICENSE.MIT":"c7fea58d1cfe49634cd92e54fc10a9d871f4b275321a4cd8c09e449122caaeb4","src/assertions.rs":"3fe98027aa73970c8ab7874a3e13dbfd6faa87df2081beb5c83aeec4c60f372f","src/lib.rs":"4e1ef56078799af2c15ca573a05b5deebbb084b447a5840cd61971afa4c73924","src/math.rs":"4d03039736dd6926feb139bc68734cb59df34ede310427bbf059e5c925e0af3b"},"package":null} \ No newline at end of file +{"files":{"Cargo.toml":"e45ee369c8f91526056ba7f46504e270da1d8b2de65431bbaf36426bcd54da68","LICENSE.APACHE":"a6cba85bc92e0cff7a450b1d873c0eaa2e9fc96bf472df0247a26bec77bf3ff9","LICENSE.MIT":"c7fea58d1cfe49634cd92e54fc10a9d871f4b275321a4cd8c09e449122caaeb4","src/assertions.rs":"3fe98027aa73970c8ab7874a3e13dbfd6faa87df2081beb5c83aeec4c60f372f","src/lib.rs":"6c3886f27653eee4359a8d150a6091a7d6e0ff1abab44c9afff2d54a0d0a474b","src/math.rs":"4d03039736dd6926feb139bc68734cb59df34ede310427bbf059e5c925e0af3b"},"package":null} \ No newline at end of file diff --git a/third_party/rust/wgpu-types/src/lib.rs b/third_party/rust/wgpu-types/src/lib.rs index 81a9c1eca5ad4..56a42b4ba7f48 100644 --- a/third_party/rust/wgpu-types/src/lib.rs +++ b/third_party/rust/wgpu-types/src/lib.rs @@ -6663,6 +6663,8 @@ pub use send_sync::*; #[doc(hidden)] mod send_sync { + pub trait WasmNotSendSync: WasmNotSend + WasmNotSync {} + impl WasmNotSendSync for T {} #[cfg(any( not(target_arch = "wasm32"), all(