From 4c29cc8935965b059e99959848e71b4bb383c3fb Mon Sep 17 00:00:00 2001 From: Jim Blandy Date: Fri, 13 Sep 2024 05:48:59 +0000 Subject: [PATCH] Bug 1917102: Update wgpu crates to c2e0ad293 (2024-09-12). r=supply-chain-reviewers,webgpu-reviewers,nical,ErichDonGubler Demote some tests to backlog (filed as https://github.com/gfx-rs/wgpu/issues/6232): - webgpu:shader,validation,const_assert,const_assert:constant_expression_assert:* - webgpu:shader,validation,const_assert,const_assert:constant_expression_logical_and_assert:* - webgpu:shader,validation,const_assert,const_assert:constant_expression_logical_or_assert:* Differential Revision: https://phabricator.services.mozilla.com/D221272 --- .cargo/config.toml.in | 4 +- Cargo.lock | 8 +- dom/webgpu/Utility.cpp | 2 +- gfx/wgpu_bindings/Cargo.toml | 12 +- gfx/wgpu_bindings/cbindgen.toml | 3 +- gfx/wgpu_bindings/moz.yaml | 4 +- gfx/wgpu_bindings/src/command.rs | 4 +- gfx/wgpu_bindings/src/error.rs | 43 +- gfx/wgpu_bindings/src/server.rs | 53 +- supply-chain/audits.toml | 16 +- .../const_assert/cts.https.html.ini | 9 + third_party/rust/naga/.cargo-checksum.json | 2 +- third_party/rust/naga/Cargo.toml | 5 +- .../rust/naga/src/back/glsl/features.rs | 2 +- third_party/rust/naga/src/back/glsl/mod.rs | 19 +- third_party/rust/naga/src/back/hlsl/conv.rs | 4 +- third_party/rust/naga/src/back/hlsl/writer.rs | 4 +- third_party/rust/naga/src/back/mod.rs | 4 + third_party/rust/naga/src/back/msl/mod.rs | 1 + .../rust/naga/src/back/pipeline_constants.rs | 1 + .../rust/naga/src/back/spv/instructions.rs | 2 +- third_party/rust/naga/src/back/spv/writer.rs | 7 +- third_party/rust/naga/src/back/wgsl/writer.rs | 16 +- .../rust/naga/src/front/glsl/parser/types.rs | 2 +- .../rust/naga/src/front/spv/convert.rs | 2 +- third_party/rust/naga/src/front/wgsl/error.rs | 18 + third_party/rust/naga/src/front/wgsl/index.rs | 43 +- .../naga/src/front/wgsl/lower/construction.rs | 30 +- .../rust/naga/src/front/wgsl/lower/mod.rs | 316 +++++-- .../rust/naga/src/front/wgsl/parse/ast.rs | 29 +- .../rust/naga/src/front/wgsl/parse/conv.rs | 4 +- .../rust/naga/src/front/wgsl/parse/mod.rs | 314 ++++--- .../rust/naga/src/front/wgsl/to_wgsl.rs | 2 +- third_party/rust/naga/src/lib.rs | 9 +- .../rust/naga/src/proc/constant_evaluator.rs | 120 ++- third_party/rust/naga/src/proc/mod.rs | 15 +- third_party/rust/naga/src/valid/interface.rs | 30 + .../rust/wgpu-core/.cargo-checksum.json | 2 +- third_party/rust/wgpu-core/Cargo.toml | 2 +- .../rust/wgpu-core/src/binding_model.rs | 32 +- .../rust/wgpu-core/src/command/bundle.rs | 59 +- .../rust/wgpu-core/src/command/clear.rs | 52 +- .../rust/wgpu-core/src/command/compute.rs | 225 +++-- .../wgpu-core/src/command/compute_command.rs | 53 +- .../rust/wgpu-core/src/command/draw.rs | 11 - third_party/rust/wgpu-core/src/command/mod.rs | 344 ++++---- .../rust/wgpu-core/src/command/query.rs | 87 +- .../rust/wgpu-core/src/command/render.rs | 210 ++--- .../wgpu-core/src/command/render_command.rs | 459 +++++----- .../rust/wgpu-core/src/command/transfer.rs | 223 ++--- .../rust/wgpu-core/src/device/global.rs | 810 +++++++----------- third_party/rust/wgpu-core/src/device/mod.rs | 73 +- .../rust/wgpu-core/src/device/queue.rs | 324 ++++--- .../rust/wgpu-core/src/device/resource.rs | 191 ++--- third_party/rust/wgpu-core/src/global.rs | 4 +- third_party/rust/wgpu-core/src/hub.rs | 40 +- .../rust/wgpu-core/src/init_tracker/mod.rs | 41 + third_party/rust/wgpu-core/src/instance.rs | 196 ++--- .../rust/wgpu-core/src/lock/observing.rs | 20 +- .../rust/wgpu-core/src/lock/vanilla.rs | 9 + third_party/rust/wgpu-core/src/pipeline.rs | 24 +- third_party/rust/wgpu-core/src/present.rs | 35 +- third_party/rust/wgpu-core/src/registry.rs | 34 +- third_party/rust/wgpu-core/src/resource.rs | 115 ++- third_party/rust/wgpu-core/src/storage.rs | 107 +-- .../rust/wgpu-core/src/track/texture.rs | 3 +- third_party/rust/wgpu-core/src/validation.rs | 12 +- .../rust/wgpu-hal/.cargo-checksum.json | 2 +- third_party/rust/wgpu-hal/Cargo.toml | 16 +- .../rust/wgpu-hal/examples/halmark/main.rs | 16 +- .../rust/wgpu-hal/examples/raw-gles.rs | 4 +- .../examples/ray-traced-triangle/main.rs | 13 +- .../rust/wgpu-hal/src/auxil/dxgi/conv.rs | 2 +- .../rust/wgpu-hal/src/auxil/dxgi/factory.rs | 223 +---- .../rust/wgpu-hal/src/auxil/dxgi/result.rs | 34 +- .../rust/wgpu-hal/src/auxil/dxgi/time.rs | 2 +- third_party/rust/wgpu-hal/src/dx12/adapter.rs | 54 +- third_party/rust/wgpu-hal/src/dx12/command.rs | 32 +- .../rust/wgpu-hal/src/dx12/descriptor.rs | 22 +- third_party/rust/wgpu-hal/src/dx12/device.rs | 59 +- .../rust/wgpu-hal/src/dx12/instance.rs | 70 +- third_party/rust/wgpu-hal/src/dx12/mod.rs | 218 +++-- .../wgpu-hal/src/dx12/shader_compilation.rs | 467 +++++----- .../rust/wgpu-hal/src/dx12/suballocation.rs | 56 +- .../rust/wgpu-hal/src/dynamic/command.rs | 10 +- .../rust/wgpu-hal/src/dynamic/device.rs | 12 + third_party/rust/wgpu-hal/src/empty.rs | 4 + third_party/rust/wgpu-hal/src/gles/adapter.rs | 12 +- third_party/rust/wgpu-hal/src/gles/conv.rs | 2 +- third_party/rust/wgpu-hal/src/gles/device.rs | 8 + third_party/rust/wgpu-hal/src/gles/egl.rs | 20 +- third_party/rust/wgpu-hal/src/gles/wgl.rs | 109 ++- third_party/rust/wgpu-hal/src/lib.rs | 199 ++++- .../rust/wgpu-hal/src/metal/adapter.rs | 8 +- third_party/rust/wgpu-hal/src/metal/device.rs | 8 + third_party/rust/wgpu-hal/src/metal/mod.rs | 21 +- .../rust/wgpu-hal/src/metal/surface.rs | 282 ++++-- .../rust/wgpu-hal/src/vulkan/adapter.rs | 49 +- third_party/rust/wgpu-hal/src/vulkan/conv.rs | 2 +- .../rust/wgpu-hal/src/vulkan/device.rs | 27 +- .../rust/wgpu-hal/src/vulkan/instance.rs | 18 +- third_party/rust/wgpu-hal/src/vulkan/mod.rs | 32 +- .../rust/wgpu-types/.cargo-checksum.json | 2 +- third_party/rust/wgpu-types/Cargo.toml | 2 +- third_party/rust/wgpu-types/src/lib.rs | 70 +- 105 files changed, 3769 insertions(+), 3374 deletions(-) diff --git a/.cargo/config.toml.in b/.cargo/config.toml.in index 24138a2f79d4f..66ca22dd45d6c 100644 --- a/.cargo/config.toml.in +++ b/.cargo/config.toml.in @@ -25,9 +25,9 @@ git = "https://github.com/franziskuskiefer/cose-rust" rev = "43c22248d136c8b38fe42ea709d08da6355cf04b" replace-with = "vendored-sources" -[source."git+https://github.com/gfx-rs/wgpu?rev=bbdbafdf8a947b563b46f632a778632b906d9eb4"] +[source."git+https://github.com/gfx-rs/wgpu?rev=c2e0ad293fc635c8695e8b2358610a5918f77695"] git = "https://github.com/gfx-rs/wgpu" -rev = "bbdbafdf8a947b563b46f632a778632b906d9eb4" +rev = "c2e0ad293fc635c8695e8b2358610a5918f77695" replace-with = "vendored-sources" [source."git+https://github.com/hsivonen/any_all_workaround?rev=7fb1b7034c9f172aade21ee1c8554e8d8a48af80"] diff --git a/Cargo.lock b/Cargo.lock index a98a501161f89..f396ed80855dd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4025,7 +4025,7 @@ checksum = "a2983372caf4480544083767bf2d27defafe32af49ab4df3a0b7fc90793a3664" [[package]] name = "naga" version = "22.0.0" -source = "git+https://github.com/gfx-rs/wgpu?rev=bbdbafdf8a947b563b46f632a778632b906d9eb4#bbdbafdf8a947b563b46f632a778632b906d9eb4" +source = "git+https://github.com/gfx-rs/wgpu?rev=c2e0ad293fc635c8695e8b2358610a5918f77695#c2e0ad293fc635c8695e8b2358610a5918f77695" dependencies = [ "arrayvec", "bit-set", @@ -6795,7 +6795,7 @@ dependencies = [ [[package]] name = "wgpu-core" version = "22.0.0" -source = "git+https://github.com/gfx-rs/wgpu?rev=bbdbafdf8a947b563b46f632a778632b906d9eb4#bbdbafdf8a947b563b46f632a778632b906d9eb4" +source = "git+https://github.com/gfx-rs/wgpu?rev=c2e0ad293fc635c8695e8b2358610a5918f77695#c2e0ad293fc635c8695e8b2358610a5918f77695" dependencies = [ "arrayvec", "bit-vec", @@ -6820,7 +6820,7 @@ dependencies = [ [[package]] name = "wgpu-hal" version = "22.0.0" -source = "git+https://github.com/gfx-rs/wgpu?rev=bbdbafdf8a947b563b46f632a778632b906d9eb4#bbdbafdf8a947b563b46f632a778632b906d9eb4" +source = "git+https://github.com/gfx-rs/wgpu?rev=c2e0ad293fc635c8695e8b2358610a5918f77695#c2e0ad293fc635c8695e8b2358610a5918f77695" dependencies = [ "android_system_properties", "arrayvec", @@ -6859,7 +6859,7 @@ dependencies = [ [[package]] name = "wgpu-types" version = "22.0.0" -source = "git+https://github.com/gfx-rs/wgpu?rev=bbdbafdf8a947b563b46f632a778632b906d9eb4#bbdbafdf8a947b563b46f632a778632b906d9eb4" +source = "git+https://github.com/gfx-rs/wgpu?rev=c2e0ad293fc635c8695e8b2358610a5918f77695#c2e0ad293fc635c8695e8b2358610a5918f77695" dependencies = [ "bitflags 2.6.0", "js-sys", diff --git a/dom/webgpu/Utility.cpp b/dom/webgpu/Utility.cpp index a7b505451f062..0b19cef6dad43 100644 --- a/dom/webgpu/Utility.cpp +++ b/dom/webgpu/Utility.cpp @@ -139,7 +139,7 @@ ffi::WGPUTextureFormat ConvertTextureFormat( result.tag = ffi::WGPUTextureFormat_Rgb10a2Unorm; break; case dom::GPUTextureFormat::Rg11b10ufloat: - result.tag = ffi::WGPUTextureFormat_Rg11b10UFloat; + result.tag = ffi::WGPUTextureFormat_Rg11b10Ufloat; break; case dom::GPUTextureFormat::Rg32uint: result.tag = ffi::WGPUTextureFormat_Rg32Uint; diff --git a/gfx/wgpu_bindings/Cargo.toml b/gfx/wgpu_bindings/Cargo.toml index 7c66c2e7028a1..2c9cc041f27a7 100644 --- a/gfx/wgpu_bindings/Cargo.toml +++ b/gfx/wgpu_bindings/Cargo.toml @@ -17,7 +17,7 @@ default = [] [dependencies.wgc] package = "wgpu-core" git = "https://github.com/gfx-rs/wgpu" -rev = "bbdbafdf8a947b563b46f632a778632b906d9eb4" +rev = "c2e0ad293fc635c8695e8b2358610a5918f77695" # TODO: remove the replay feature on the next update containing https://github.com/gfx-rs/wgpu/pull/5182 features = ["serde", "replay", "trace", "strict_asserts", "wgsl", "api_log_info"] @@ -26,32 +26,32 @@ features = ["serde", "replay", "trace", "strict_asserts", "wgsl", "api_log_info" [target.'cfg(any(target_os = "macos", target_os = "ios"))'.dependencies.wgc] package = "wgpu-core" git = "https://github.com/gfx-rs/wgpu" -rev = "bbdbafdf8a947b563b46f632a778632b906d9eb4" +rev = "c2e0ad293fc635c8695e8b2358610a5918f77695" features = ["metal"] # We want the wgpu-core Direct3D backends on Windows. [target.'cfg(windows)'.dependencies.wgc] package = "wgpu-core" git = "https://github.com/gfx-rs/wgpu" -rev = "bbdbafdf8a947b563b46f632a778632b906d9eb4" +rev = "c2e0ad293fc635c8695e8b2358610a5918f77695" features = ["dx12"] # We want the wgpu-core Vulkan backend on Linux and Windows. [target.'cfg(any(windows, all(unix, not(any(target_os = "macos", target_os = "ios")))))'.dependencies.wgc] package = "wgpu-core" git = "https://github.com/gfx-rs/wgpu" -rev = "bbdbafdf8a947b563b46f632a778632b906d9eb4" +rev = "c2e0ad293fc635c8695e8b2358610a5918f77695" features = ["vulkan"] [dependencies.wgt] package = "wgpu-types" git = "https://github.com/gfx-rs/wgpu" -rev = "bbdbafdf8a947b563b46f632a778632b906d9eb4" +rev = "c2e0ad293fc635c8695e8b2358610a5918f77695" [dependencies.wgh] package = "wgpu-hal" git = "https://github.com/gfx-rs/wgpu" -rev = "bbdbafdf8a947b563b46f632a778632b906d9eb4" +rev = "c2e0ad293fc635c8695e8b2358610a5918f77695" features = ["oom_panic", "device_lost_panic", "internal_error_panic"] [target.'cfg(windows)'.dependencies] diff --git a/gfx/wgpu_bindings/cbindgen.toml b/gfx/wgpu_bindings/cbindgen.toml index 5d2f3048dab31..bd752b527f2b2 100644 --- a/gfx/wgpu_bindings/cbindgen.toml +++ b/gfx/wgpu_bindings/cbindgen.toml @@ -25,6 +25,7 @@ typedef uint8_t WGPUOption_NonZeroU8; typedef uint64_t WGPUOption_AdapterId; typedef uint64_t WGPUOption_BufferId; typedef uint64_t WGPUOption_PipelineLayoutId; +typedef uint64_t WGPUOption_BindGroupId; typedef uint64_t WGPUOption_BindGroupLayoutId; typedef uint64_t WGPUOption_SamplerId; typedef uint64_t WGPUOption_SurfaceId; @@ -62,7 +63,7 @@ style = "tag" prefix = "WGPU" renaming_overrides_prefixing = true exclude = [ - "Option_AdapterId", "Option_BufferId", "Option_PipelineLayoutId", "Option_BindGroupLayoutId", + "Option_AdapterId", "Option_BufferId", "Option_BindGroupId", "Option_PipelineLayoutId", "Option_BindGroupLayoutId", "Option_SamplerId", "Option_SurfaceId", "Option_TextureViewId", "Option_BufferSize", "Option_NonZeroU32", "Option_NonZeroU8", "ANativeWindow_setBuffersGeometry", "Option_QuerySetId", diff --git a/gfx/wgpu_bindings/moz.yaml b/gfx/wgpu_bindings/moz.yaml index ea0f6f31d2b98..2f900ae58b937 100644 --- a/gfx/wgpu_bindings/moz.yaml +++ b/gfx/wgpu_bindings/moz.yaml @@ -20,11 +20,11 @@ origin: # Human-readable identifier for this version/release # Generally "version NNN", "tag SSS", "bookmark SSS" - release: bbdbafdf8a947b563b46f632a778632b906d9eb4 (2024-08-29T02:44:20Z). + release: c2e0ad293fc635c8695e8b2358610a5918f77695 (Thu Sep 12 15:03:04 2024 +0200) # Revision to pull in # Must be a long or short commit SHA (long preferred) - revision: bbdbafdf8a947b563b46f632a778632b906d9eb4 + revision: c2e0ad293fc635c8695e8b2358610a5918f77695 license: ['MIT', 'Apache-2.0'] diff --git a/gfx/wgpu_bindings/src/command.rs b/gfx/wgpu_bindings/src/command.rs index 374ca6d870550..99db55b52328d 100644 --- a/gfx/wgpu_bindings/src/command.rs +++ b/gfx/wgpu_bindings/src/command.rs @@ -774,7 +774,7 @@ pub fn replay_render_pass_impl( bind_group_id, } => { let offsets = dynamic_offsets(num_dynamic_offsets); - global.render_pass_set_bind_group(dst_pass, index, bind_group_id, offsets) + global.render_pass_set_bind_group(dst_pass, index, Some(bind_group_id), offsets) } RenderCommand::SetPipeline(pipeline_id) => { global.render_pass_set_pipeline(dst_pass, pipeline_id) @@ -966,7 +966,7 @@ fn replay_compute_pass_impl( bind_group_id, } => { let offsets = dynamic_offsets(num_dynamic_offsets); - global.compute_pass_set_bind_group(dst_pass, index, bind_group_id, offsets)?; + global.compute_pass_set_bind_group(dst_pass, index, Some(bind_group_id), offsets)?; } ComputeCommand::SetPipeline(pipeline_id) => { global.compute_pass_set_pipeline(dst_pass, pipeline_id)?; diff --git a/gfx/wgpu_bindings/src/error.rs b/gfx/wgpu_bindings/src/error.rs index 8cc02b2c90e84..58c3f069953c3 100644 --- a/gfx/wgpu_bindings/src/error.rs +++ b/gfx/wgpu_bindings/src/error.rs @@ -172,9 +172,7 @@ mod foreign { impl HasErrorBufferType for RequestAdapterError { fn error_type(&self) -> ErrorBufferType { match self { - RequestAdapterError::NotFound | RequestAdapterError::InvalidSurface(_) => { - ErrorBufferType::Validation - } + RequestAdapterError::NotFound => ErrorBufferType::Validation, // N.B: forced non-exhaustiveness _ => ErrorBufferType::Validation, @@ -185,13 +183,8 @@ mod foreign { impl HasErrorBufferType for RequestDeviceError { fn error_type(&self) -> ErrorBufferType { match self { - RequestDeviceError::OutOfMemory => ErrorBufferType::OutOfMemory, - - RequestDeviceError::DeviceLost => ErrorBufferType::DeviceLost, - - RequestDeviceError::Internal - | RequestDeviceError::InvalidAdapter - | RequestDeviceError::NoGraphicsQueue => ErrorBufferType::Internal, + RequestDeviceError::Device(e) => e.error_type(), + RequestDeviceError::NoGraphicsQueue => ErrorBufferType::Internal, RequestDeviceError::UnsupportedFeature(_) | RequestDeviceError::LimitsExceeded(_) => ErrorBufferType::Validation, @@ -226,7 +219,7 @@ mod foreign { BufferAccessError::Device(e) => e.error_type(), BufferAccessError::Failed - | BufferAccessError::InvalidBufferId(_) + | BufferAccessError::InvalidResource(_) | BufferAccessError::DestroyedResource(_) | BufferAccessError::AlreadyMapped | BufferAccessError::MapAlreadyPending @@ -312,7 +305,7 @@ mod foreign { match self { CreatePipelineLayoutError::Device(e) => e.error_type(), - CreatePipelineLayoutError::InvalidBindGroupLayoutId(_) + CreatePipelineLayoutError::InvalidResource(_) | CreatePipelineLayoutError::MisalignedPushConstantRange { .. } | CreatePipelineLayoutError::MissingFeatures(_) | CreatePipelineLayoutError::MoreThanOnePushConstantRangePerStage { .. } @@ -331,10 +324,7 @@ mod foreign { match self { CreateBindGroupError::Device(e) => e.error_type(), - CreateBindGroupError::InvalidLayout - | CreateBindGroupError::InvalidBufferId(_) - | CreateBindGroupError::InvalidTextureViewId(_) - | CreateBindGroupError::InvalidSamplerId(_) + CreateBindGroupError::InvalidResource(_) | CreateBindGroupError::BindingArrayPartialLengthMismatch { .. } | CreateBindGroupError::BindingArrayLengthMismatch { .. } | CreateBindGroupError::BindingArrayZeroLength @@ -393,7 +383,7 @@ mod foreign { CreateComputePipelineError::Internal(_) => ErrorBufferType::Internal, - CreateComputePipelineError::InvalidLayout + CreateComputePipelineError::InvalidResource(_) | CreateComputePipelineError::Implicit(_) | CreateComputePipelineError::Stage(_) | CreateComputePipelineError::MissingDownlevelFlags(_) => { @@ -414,7 +404,7 @@ mod foreign { CreateRenderPipelineError::Internal { .. } => ErrorBufferType::Internal, CreateRenderPipelineError::ColorAttachment(_) - | CreateRenderPipelineError::InvalidLayout + | CreateRenderPipelineError::InvalidResource(_) | CreateRenderPipelineError::Implicit(_) | CreateRenderPipelineError::ColorState(_, _) | CreateRenderPipelineError::DepthStencilState(_) @@ -452,7 +442,6 @@ mod foreign { DeviceError::Invalid(_) | DeviceError::DeviceMismatch(_) => { ErrorBufferType::Validation } - DeviceError::InvalidDeviceId => ErrorBufferType::Validation, DeviceError::Lost => ErrorBufferType::DeviceLost, DeviceError::OutOfMemory => ErrorBufferType::OutOfMemory, DeviceError::ResourceCreationFailed => ErrorBufferType::Internal, @@ -466,8 +455,8 @@ mod foreign { match self { CreateTextureViewError::OutOfMemory => ErrorBufferType::OutOfMemory, - CreateTextureViewError::InvalidTextureId(_) - | CreateTextureViewError::InvalidTextureViewDimension { .. } + CreateTextureViewError::InvalidTextureViewDimension { .. } + | CreateTextureViewError::InvalidResource(_) | CreateTextureViewError::InvalidMultisampledTextureViewDimension(_) | CreateTextureViewError::InvalidCubemapTextureDepth { .. } | CreateTextureViewError::InvalidCubemapArrayTextureDepth { .. } @@ -493,6 +482,8 @@ mod foreign { CopyError::Encoder(e) => e.error_type(), CopyError::Transfer(e) => e.error_type(), + CopyError::InvalidResource(_) => ErrorBufferType::Validation, + // N.B: forced non-exhaustiveness _ => ErrorBufferType::Validation, } @@ -504,9 +495,7 @@ mod foreign { match self { TransferError::MemoryInitFailure(e) => e.error_type(), - TransferError::InvalidBufferId(_) - | TransferError::InvalidTextureId(_) - | TransferError::SameSourceDestinationBuffer + TransferError::SameSourceDestinationBuffer | TransferError::MissingRenderAttachmentUsageFlag(_) | TransferError::BufferOverrun { .. } | TransferError::TextureOverrun { .. } @@ -558,9 +547,7 @@ mod foreign { QueryError::Use(e) => e.error_type(), QueryError::Resolve(e) => e.error_type(), - QueryError::InvalidBufferId(_) | QueryError::InvalidQuerySetId(_) => { - ErrorBufferType::Validation - } + QueryError::InvalidResource(_) => ErrorBufferType::Validation, // N.B: forced non-exhaustiveness _ => ErrorBufferType::Validation, @@ -625,7 +612,7 @@ mod foreign { | QueueSubmitError::BufferStillMapped(_) | QueueSubmitError::SurfaceOutputDropped | QueueSubmitError::SurfaceUnconfigured - | QueueSubmitError::InvalidQueueId => ErrorBufferType::Validation, + | QueueSubmitError::InvalidResource(_) => ErrorBufferType::Validation, // N.B: forced non-exhaustiveness _ => ErrorBufferType::Validation, diff --git a/gfx/wgpu_bindings/src/server.rs b/gfx/wgpu_bindings/src/server.rs index ed5eed4c2a370..d760ddbd1b4a7 100644 --- a/gfx/wgpu_bindings/src/server.rs +++ b/gfx/wgpu_bindings/src/server.rs @@ -239,7 +239,7 @@ pub unsafe extern "C" fn wgpu_server_adapter_pack_info( driver, driver_info, backend, - } = global.adapter_get_info(id).unwrap(); + } = global.adapter_get_info(id); let is_hardware = match device_type { wgt::DeviceType::IntegratedGpu | wgt::DeviceType::DiscreteGpu => true, @@ -262,8 +262,8 @@ pub unsafe extern "C" fn wgpu_server_adapter_pack_info( let info = AdapterInformation { id, - limits: restrict_limits(global.adapter_limits(id).unwrap()), - features: global.adapter_features(id).unwrap(), + limits: restrict_limits(global.adapter_limits(id)), + features: global.adapter_features(id), name, vendor, device, @@ -310,14 +310,14 @@ pub unsafe extern "C" fn wgpu_server_adapter_request_device( // TODO: in https://github.com/gfx-rs/wgpu/pull/3626/files#diff-033343814319f5a6bd781494692ea626f06f6c3acc0753a12c867b53a646c34eR97 // which introduced the queue id parameter, the queue id is also the device id. I don't know how applicable this is to // other situations (this one in particular). - let (_, _, error) = global.adapter_request_device( + let res = global.adapter_request_device( self_id, &desc, trace_path, Some(new_device_id), Some(new_queue_id), ); - if let Some(err) = error { + if let Err(err) = res { error_buf.init(err); } } @@ -479,22 +479,23 @@ pub extern "C" fn wgpu_server_device_create_buffer( let label = utf8_label.as_ref().map(|s| Cow::from(&s[..])); let usage = wgt::BufferUsages::from_bits_retain(usage); + let desc = wgc::resource::BufferDescriptor { + label, + size, + usage, + mapped_at_creation, + }; + // Don't trust the graphics driver with buffer sizes larger than our conservative max texture size. if shm_allocation_failed || size > MAX_BUFFER_SIZE { error_buf.init(ErrMsg { message: "Out of memory", r#type: ErrorBufferType::OutOfMemory, }); - global.create_buffer_error(buffer_id.backend(), Some(buffer_id)); + global.create_buffer_error(buffer_id.backend(), Some(buffer_id), &desc); return; } - let desc = wgc::resource::BufferDescriptor { - label, - size, - usage, - mapped_at_creation, - }; let (_, error) = global.device_create_buffer(self_id, &desc, Some(buffer_id)); if let Some(err) = error { error_buf.init(err); @@ -569,7 +570,7 @@ pub extern "C" fn wgpu_server_buffer_unmap( // WebGPU spec. previously, but this doesn't seem formally specified now. :confused: // // TODO: upstream this; see . - BufferAccessError::InvalidBufferId(_) => (), + BufferAccessError::InvalidResource(_) => (), other => error_buf.init(other), } } @@ -675,7 +676,7 @@ impl Global { || desc.size.height > max || desc.size.depth_or_array_layers > max { - self.create_texture_error(id.backend(), Some(id)); + self.create_texture_error(id.backend(), Some(id), &desc); error_buf.init(ErrMsg { message: "Out of memory", r#type: ErrorBufferType::OutOfMemory, @@ -710,7 +711,7 @@ impl Global { ) }; if ret != true { - self.create_texture_error(id.backend(), Some(id)); + self.create_texture_error(id.backend(), Some(id), &desc); error_buf.init(ErrMsg { message: "Failed to create external texture", r#type: ErrorBufferType::Internal, @@ -728,7 +729,7 @@ impl Global { let handle = unsafe { wgpu_server_get_external_texture_handle(self.owner, id) }; if handle.is_null() { - self.create_texture_error(id.backend(), Some(id)); + self.create_texture_error(id.backend(), Some(id), &desc); error_buf.init(ErrMsg { message: "Failed to get external texture handle", r#type: ErrorBufferType::Internal, @@ -740,7 +741,7 @@ impl Global { dx12_device.OpenSharedHandle(Foundation::HANDLE(handle), &mut resource) }; if res.is_err() || resource.is_none() { - self.create_texture_error(id.backend(), Some(id)); + self.create_texture_error(id.backend(), Some(id), &desc); error_buf.init(ErrMsg { message: "Failed to open shared handle", r#type: ErrorBufferType::Internal, @@ -855,8 +856,12 @@ impl Global { error_buf.init(err); } } - DeviceAction::CreateRenderBundleError(buffer_id, _label) => { - self.create_render_bundle_error(buffer_id.backend(), Some(buffer_id)); + DeviceAction::CreateRenderBundleError(buffer_id, label) => { + self.create_render_bundle_error( + buffer_id.backend(), + Some(buffer_id), + &wgt::RenderBundleDescriptor { label }, + ); } DeviceAction::CreateCommandEncoder(id, desc) => { let (_, error) = self.device_create_command_encoder(self_id, &desc, Some(id)); @@ -1211,12 +1216,10 @@ pub unsafe extern "C" fn wgpu_server_on_submitted_work_done( self_id: id::QueueId, callback: wgc::device::queue::SubmittedWorkDoneClosureC, ) { - global - .queue_on_submitted_work_done( - self_id, - wgc::device::queue::SubmittedWorkDoneClosure::from_c(callback), - ) - .unwrap(); + global.queue_on_submitted_work_done( + self_id, + wgc::device::queue::SubmittedWorkDoneClosure::from_c(callback), + ); } /// # Safety diff --git a/supply-chain/audits.toml b/supply-chain/audits.toml index 5ee2c4452f2d3..0ff1f378a0ac6 100644 --- a/supply-chain/audits.toml +++ b/supply-chain/audits.toml @@ -3127,12 +3127,12 @@ delta = "0.20.0 -> 22.0.0" [[audits.naga]] who = [ - "Jim Blandy ", "Teodor Tanasoaia ", "Erich Gubler ", + "Jim Blandy ", ] criteria = "safe-to-deploy" -delta = "22.0.0 -> 22.0.0@git:bbdbafdf8a947b563b46f632a778632b906d9eb4" +delta = "22.0.0 -> 22.0.0@git:c2e0ad293fc635c8695e8b2358610a5918f77695" importable = false [[audits.net2]] @@ -5116,12 +5116,12 @@ delta = "0.20.0 -> 22.0.0" [[audits.wgpu-core]] who = [ - "Jim Blandy ", "Teodor Tanasoaia ", "Erich Gubler ", + "Jim Blandy ", ] criteria = "safe-to-deploy" -delta = "22.0.0 -> 22.0.0@git:bbdbafdf8a947b563b46f632a778632b906d9eb4" +delta = "22.0.0 -> 22.0.0@git:c2e0ad293fc635c8695e8b2358610a5918f77695" importable = false [[audits.wgpu-hal]] @@ -5189,12 +5189,12 @@ delta = "0.20.0 -> 22.0.0" [[audits.wgpu-hal]] who = [ - "Jim Blandy ", "Teodor Tanasoaia ", "Erich Gubler ", + "Jim Blandy ", ] criteria = "safe-to-deploy" -delta = "22.0.0 -> 22.0.0@git:bbdbafdf8a947b563b46f632a778632b906d9eb4" +delta = "22.0.0 -> 22.0.0@git:c2e0ad293fc635c8695e8b2358610a5918f77695" importable = false [[audits.wgpu-types]] @@ -5262,12 +5262,12 @@ delta = "0.20.0 -> 22.0.0" [[audits.wgpu-types]] who = [ - "Jim Blandy ", "Teodor Tanasoaia ", "Erich Gubler ", + "Jim Blandy ", ] criteria = "safe-to-deploy" -delta = "22.0.0 -> 22.0.0@git:bbdbafdf8a947b563b46f632a778632b906d9eb4" +delta = "22.0.0 -> 22.0.0@git:c2e0ad293fc635c8695e8b2358610a5918f77695" importable = false [[audits.whatsys]] diff --git a/testing/web-platform/mozilla/meta/webgpu/cts/webgpu/shader/validation/const_assert/const_assert/cts.https.html.ini b/testing/web-platform/mozilla/meta/webgpu/cts/webgpu/shader/validation/const_assert/const_assert/cts.https.html.ini index ab410551a7ab0..35e4be4041331 100644 --- a/testing/web-platform/mozilla/meta/webgpu/cts/webgpu/shader/validation/const_assert/const_assert/cts.https.html.ini +++ b/testing/web-platform/mozilla/meta/webgpu/cts/webgpu/shader/validation/const_assert/const_assert/cts.https.html.ini @@ -1,13 +1,19 @@ [cts.https.html?q=webgpu:shader,validation,const_assert,const_assert:constant_expression_assert:*] + implementation-status: backlog [:scope="function"] + expected: FAIL [:scope="module"] + expected: FAIL [cts.https.html?q=webgpu:shader,validation,const_assert,const_assert:constant_expression_logical_and_assert:*] + implementation-status: backlog [:scope="function"] + expected: FAIL [:scope="module"] + expected: FAIL [cts.https.html?q=webgpu:shader,validation,const_assert,const_assert:constant_expression_logical_and_no_assert:*] @@ -20,9 +26,12 @@ [cts.https.html?q=webgpu:shader,validation,const_assert,const_assert:constant_expression_logical_or_assert:*] + implementation-status: backlog [:scope="function"] + expected: FAIL [:scope="module"] + expected: FAIL [cts.https.html?q=webgpu:shader,validation,const_assert,const_assert:constant_expression_logical_or_no_assert:*] diff --git a/third_party/rust/naga/.cargo-checksum.json b/third_party/rust/naga/.cargo-checksum.json index 6f2a150260888..88438fc739387 100644 --- a/third_party/rust/naga/.cargo-checksum.json +++ b/third_party/rust/naga/.cargo-checksum.json @@ -1 +1 @@ -{"files":{".cargo/config.toml":"d7389d2a0c08ec72b79e83a3c76980903e3f9123625c32e69c798721193e2e74","CHANGELOG.md":"fcace5fd54cbe1d42881e5d0148725a456c607d88534ee35bf07279a86d49c04","Cargo.toml":"fd146d9d0c99d78385df4b0bff58156ee0e4d1057c5f2f6dd32eff15cee7d9c3","README.md":"00a6070ebadb15a9488f117b286f50e4838a7dd0a28d41c9fcff4781c6b49171","build.rs":"824e90067aa04ad8726a49476b1af51f5c5c16f9a0b372f00398b3748d723662","src/arena/handle.rs":"1bf2edcca03d7cb466e1f8bd4bf1c966b805505ea2ee3641c39aa04811413478","src/arena/handle_set.rs":"f84ca7edafc907e282abea094c0c9b1da1cf4500e0c6389bb3ce2ef655f4c4ea","src/arena/handlevec.rs":"dfc9249478eb6980c13587c740d91234214bc7e0eef90e6f40b3d99decf70a58","src/arena/mod.rs":"14db142163133eb9ca582bc3665450f49cc4b1cfbda043b8b1518590cdda73be","src/arena/range.rs":"88683c36b137499ab537cf906505c2bd0871e0b9177e3a4e55da9db4e86df6a2","src/arena/unique_arena.rs":"22761770dbcbb6ac497ebdcf07173b3f07fc3c74bc0a70990d5fad882f4f471f","src/back/continue_forward.rs":"f95b810164db51fde368362624ce2569826b47dc3460df316b879abba48e5388","src/back/dot/mod.rs":"b9f57441a9491e4a53b6d9dcb253484f9e935419d179513c2d24db08be591fae","src/back/glsl/features.rs":"747285643e28d0ea7d8293a5ee802b4e88482f771c36845cefe7e8c16ef54637","src/back/glsl/keywords.rs":"b8883e5ee8a3d400fa44fef2baffe2853e72ff91e832499c128454d325ceccd9","src/back/glsl/mod.rs":"89e5aaeebaf7c897e98a3d929baecc8b65b447c2ce30497fbe505c650137c98d","src/back/hlsl/conv.rs":"764f62d356b4d5606e39f258156487db4b33907c82e0bd25d0e374cc999fae88","src/back/hlsl/help.rs":"de631bd11ee65fbbeeae80733103789abfa6ff54b7b39b11e7c22cd02b486a1c","src/back/hlsl/keywords.rs":"a7164690a4da866e6bfb18ced20e32cc8c42dd7387e0e84addf0c2674f529cf5","src/back/hlsl/mod.rs":"8a88e0925ad62296349851f3f2f60959a63ae466422d1932bde53dd94eab586a","src/back/hlsl/storage.rs":"2c2a0071cafe487a398e396dddc85bdb319b1a5d74c097d529078e247a904359","src/back/hlsl/writer.rs":"323d32ceb58439a3bb29cbd420d77f03101b134dcfb22246b6050636704bc317","src/back/mod.rs":"9de236c7a5219b2ff371e44a7d97462cce3d1ccef4c7063632f1a6a7718d683d","src/back/msl/keywords.rs":"e6a4ef77363f995de1f8079c0b8591497cbf9520c5d3b2d41c7e1f483e8abd24","src/back/msl/mod.rs":"a03a380bbc8e9183e9f54c34e5326bedc6a2e94ecbf7cfc53190d2accafa6c7e","src/back/msl/sampler.rs":"9b01d68669e12ff7123243284b85e1a9d2c4d49140bd74ca32dedc007cbf15af","src/back/msl/writer.rs":"ad13b7f42046bdeef091ea793e8dc1d992153140abddb5f78a9629fca362ba97","src/back/pipeline_constants.rs":"50b3a87b6f4eb95c8b20dcc77cecf594cb0cf8d6c4fd7b315a6dd97271acda23","src/back/spv/block.rs":"056e2a11d27c41a1f042f3687b237f01d4a84df4756d62e5b7cc3cf4840f0db5","src/back/spv/helpers.rs":"bd666bf519a5d5561c2fab6ff78229739853ae6877d132911e71b57cfd656e42","src/back/spv/image.rs":"32b8f1ccdd54d80519ccf585451dcc6721d2a6a109d295a704e6c61857832556","src/back/spv/index.rs":"14f704d2d9409bbc9345d601725355048aa74471aaeb6aa781fc45a99ceaec38","src/back/spv/instructions.rs":"27ae0fb206b28cb8240b342a28396cb0a30e3a5fb50fa02413e0cfa6d507a5a4","src/back/spv/layout.rs":"e263de53cd2f9a03ad94b82b434ce636609bc1ed435a2d1132951663bfaa8ebd","src/back/spv/mod.rs":"91fa840f044ae40cc955db2278d5abcb03f53185452ba02ac0a78aedd37c24f3","src/back/spv/ray.rs":"a34bf6b26d873f7270caa45841d9ef291aca8d9732ecd086b14d8856038e1e41","src/back/spv/recyclable.rs":"8061e39ea5357c8d55081a81fb836e762fd9793c1c6b04ae5d714af32677e616","src/back/spv/selection.rs":"81e404abfa0a977f7c1f76ccb37a78d13ccadbda229048dad53cc67687cc39db","src/back/spv/subgroup.rs":"cb68fb9581064ec9ef79e56a0c94c802b2f04cc5e2173a953ae9a7b2776042d5","src/back/spv/writer.rs":"e7fdc6a0424a83bb06e2c01919c72bb16a62b46039c92258be251eceaa851601","src/back/wgsl/mod.rs":"2dd12bbea9ace835850192bb68c5760953da6bac6a636073d1eca19381c0c0b6","src/back/wgsl/writer.rs":"b21ec11688e924708f4101417324f4f1547d63bff72f2de0e8849d619684917f","src/block.rs":"e447f7e041fd67052b23d1139cf0574eea93220a818af71691d960bdf026d45f","src/compact/expressions.rs":"a04f5dbc936693280fddc328df623af0b8d1fc0d11bd01987df9104e9c27bea1","src/compact/functions.rs":"27c084ca6b475999a3367f8ea429dbf7326a54c83ef86898b7ba67366f9bb828","src/compact/handle_set_map.rs":"748de9563f0ff897c113ba665f3e59ab3a023050e2206eb7d566a31f61fc32e5","src/compact/mod.rs":"7ec6bea36d11438c391c37d9a193b323d7fb365de6b9dfd45e78a3240dbab7ba","src/compact/statements.rs":"360e2bb6c2b7fdf4f9de368048ae574b2efd171adf34be44f84fbe21a15f5def","src/compact/types.rs":"9e73ac7dfdaf4b10eda1c938b881857392abc1315adcc035fc3980d63236cf37","src/error.rs":"70f7adbb59ea38ee7ebc44e8ad020d6db4f2dd4976913fb194e779d115241861","src/front/atomic_upgrade.rs":"af1386a3ef42e853042d2d08bf7d0e08fb8a4578ddbf44b5f57326643b9ba613","src/front/glsl/ast.rs":"663e815ea3e511f0445ef910340c380ff1bcf63805329ab7ca8cf35de7f902ed","src/front/glsl/builtins.rs":"8665ccc33938ae2d43257f25dd3cb75b604d30ccaf98888889b130ca7651695f","src/front/glsl/context.rs":"00c489ca8cb0e200595122ab1c857e2354625a680ff44ac3d7b33eadc4b991cd","src/front/glsl/error.rs":"2ef5061decd42dfc3054fd0f1a86dc7168e2064572571147117d798e39675722","src/front/glsl/functions.rs":"52802412788f95e1a55b92a72980e04792c876fbedff6e6d71f8fe0d23cbae80","src/front/glsl/lex.rs":"08736ae8beb955da5b0e6e3e0f45995a824995f7096d516a2910417e9c7afa32","src/front/glsl/mod.rs":"bf97bf1710d5d1f8facb77913cb82868697920914a96ed309adf0f19154a2ab4","src/front/glsl/offset.rs":"9358602ca4f9ef21d5066d674dae757bf88fdf5c289c4360534354d13bd41dc0","src/front/glsl/parser.rs":"a752f2abac17e6d35c0eb406f2f1c24d2b622a55b1bececbdd13614edb686622","src/front/glsl/parser/declarations.rs":"796514c1d571b324817bf1236d4c1227a91a512c0660ce5bb1264cd900027158","src/front/glsl/parser/expressions.rs":"7fd1ccb9261eaf1f9966798c2fed62c54c2c1d9d140d3e9d41d81775acbe35de","src/front/glsl/parser/functions.rs":"dd5980bd731bcd8044bbfa16f760cdc77c02422ec116d7ebbbe64fcbe1fb7a04","src/front/glsl/parser/types.rs":"08d708d0bae448bbfc4c79e6128f0b0bc489ae079018e69bf9589b77803e0372","src/front/glsl/parser_tests.rs":"c44ed3252096c83a0ce1ea9be8d2b867784cdc1c11aa4501aee1a85d86c62511","src/front/glsl/token.rs":"c25c489b152ee2d445ace3c2046473abe64d558b8d27fa08709110e58718b6ac","src/front/glsl/types.rs":"91c3a4e4d11b397ea647ff287e17a48baedf2f2c310e81b04618675daeb83f69","src/front/glsl/variables.rs":"6cb3db649a99452bc91f2bbebe425c0447920f7e9a87c38fa84d54c84f48861e","src/front/interpolator.rs":"9b6ca498d5fbd9bc1515510a04e303a00b324121d7285da3c955cfe18eb4224c","src/front/mod.rs":"ba984fc9b933a844fa8dadf05dea70f308911054ad3f10448d652706e158b4bd","src/front/spv/convert.rs":"b4b19b7cf2360d17405ea4ec97d9bf02376150047dfef008c2a358690625802c","src/front/spv/error.rs":"f41593a3ba8d3bb4371fd26e813affc8d8ce1d6581c8af410cf5e2084a308049","src/front/spv/function.rs":"e584dbeaa5f7e1ff107f3e7e5211b0bfb8f7b9878a74dbe29dc7941cb33b2e0f","src/front/spv/image.rs":"a5ac477c01894a9c9ce5ec9d6751f991622d610f5581a759fc2e72034677d9e4","src/front/spv/mod.rs":"1f32a2a8174546075e57bc52bd0bba6e3fbae9a290e75a05332f244554bcf993","src/front/spv/null.rs":"a8ff44e97ffe42a4773d89b88fdb3a8ef8fba58bf6645c73b7a66470234ccf10","src/front/type_gen.rs":"49d8aae89143a0cba182c5271a2aa60972251c1603144bd21567565ff64b2274","src/front/wgsl/error.rs":"eb0d142e9d25ded215131b8f8ee79da72bf9881c1b5af711fccfc42381641d08","src/front/wgsl/index.rs":"c5928ac405ffbd74de60b96360f7e07a3470b39f8f76c6e0b06544b4b5f61f57","src/front/wgsl/lower/construction.rs":"c31aabb26bf7b8b62d7dee2e6b6fd26a0314247dd5665efe2190921ef97e3213","src/front/wgsl/lower/conversion.rs":"492293f4f3fc7786a3a480ca0f43a10ae5ac8557cf1903cdd616ca624093b951","src/front/wgsl/lower/mod.rs":"8c16d6bb6db75fed70c6ce1d397c696678e04e8b21aa6453c8700ab229294b0a","src/front/wgsl/mod.rs":"cebe3f61843cca32d6764b93b07cb369ae22d1240d81eabe25f5c42dc603fca1","src/front/wgsl/parse/ast.rs":"766ff3412f79652a71964a521755fb758f2192f157c62b553d4c53f9f0ba07d7","src/front/wgsl/parse/conv.rs":"860a02ccceb10468a89e28562547092c5dacce24411621e16e7e08ec45e27e09","src/front/wgsl/parse/lexer.rs":"17db87d0017f8f9a80fa151b8545f04e1b40c4e5feef6197f4a117efa03488bf","src/front/wgsl/parse/mod.rs":"61419806f2366812f8c14fed0d87a759eeb899d7e66bfb0b188156053bfbde27","src/front/wgsl/parse/number.rs":"dafd3d8651cfa1389cb359d76d39bd689e54f8d5025aa23e06c6edd871369efd","src/front/wgsl/tests.rs":"7a0a083a5b66af8e7d4b1a02401b27f077eb72d07181b610693f35b11f107c6c","src/front/wgsl/to_wgsl.rs":"20f9e503b26826020f024b9c99ca06e38438ad44f8dbb8c7cbf8ee7828780e61","src/keywords/mod.rs":"47a6fde012bf7d1e70f0fac7762f6a8e7dca6b9bbb99e2cada773c61527cfbfe","src/keywords/wgsl.rs":"c648ac44241ad55c8c8bad3d8f1bab973d11ddb9c380dcca369b735ed3975309","src/lib.rs":"37defb3e1b4cbbb29f18fc7e21b2169e423dec7f8025752d8079316f434d2d6b","src/non_max_u32.rs":"3f4aafc8e2775982187507d49e7aa58befffdaba71c9287be42afdfde28bcb66","src/proc/constant_evaluator.rs":"1cff379df1e1383e97bdaa54e79121bab9b3cc0cfa5b13a13abb5b3d6f3978b0","src/proc/emitter.rs":"39ac886c651e2ad33c06a676a7e4826a0e93de0af660c01e8e4b1f7406742f88","src/proc/index.rs":"7c10d3d89ad8c50d063f39ed5b24492e41b6f2d78a7b92138b1ee214a1ea06a2","src/proc/layouter.rs":"e95fd9defa08e0c610d22d7cd795a6af787610d1ff0d4ed27798b4ffd2098cf1","src/proc/mod.rs":"b0b856056eac4f57f4495ec6ec39e79e50386177b2ee0b042ee4d0dd9f9540da","src/proc/namer.rs":"7328fac41e40890c64c7ee2fa985a4395424f18b08d30f30ca2583fdabd2fd35","src/proc/terminator.rs":"fef2160473fcddd670c6b5806a8ea0ecbdcc0fdf6ed793dce131ecd08cce3944","src/proc/typifier.rs":"0ee6be729fe141399a279a6bc8a6f1f5cb82ee397f4352902ad3b662e2e9a618","src/span.rs":"fd3b338256f9301fc5289c37af9ccec704d2e5db92923b0dec45da371e84ce6a","src/valid/analyzer.rs":"64f0c49988a14e4bf4c9718be69e43d152994756095bb438a3467641c4f4cc1c","src/valid/compose.rs":"83e4c09c39f853cf085b83b87e48b3db571da619132960d3ec954ebdfb0a74f2","src/valid/expression.rs":"50f2fa21d80fd121bc4372dce616e1eeb7507cef29d93273c8c3f86df919b510","src/valid/function.rs":"a624138dc5c4a8d49f68e48fb66e64d14c2da1cd6e8dadfc74bab7a9f63a2f9f","src/valid/handles.rs":"fd9e52370d76b9ff57357c651a951112246ca6c4794b59c021601e9c6b11a0bd","src/valid/interface.rs":"a6d60fcca8c2eb840ddd2cd9148df8b9613d6624d16ab1e3bc1b83a81aef3e03","src/valid/mod.rs":"4130656e5d14c54de38d4f7e7e8cb15ffe60c19ddf93aa976b2fc3e990ddbf45","src/valid/type.rs":"52528d261599f5eb8c61197200b1ffba5dcea6357838edc09f2c51f406af527c"},"package":null} \ No newline at end of file +{"files":{".cargo/config.toml":"d7389d2a0c08ec72b79e83a3c76980903e3f9123625c32e69c798721193e2e74","CHANGELOG.md":"fcace5fd54cbe1d42881e5d0148725a456c607d88534ee35bf07279a86d49c04","Cargo.toml":"2d3875e32bc7f5ed91ff206c6cb2f37e60cd0fefa3ae313af82af34db732c8a4","README.md":"00a6070ebadb15a9488f117b286f50e4838a7dd0a28d41c9fcff4781c6b49171","build.rs":"824e90067aa04ad8726a49476b1af51f5c5c16f9a0b372f00398b3748d723662","src/arena/handle.rs":"1bf2edcca03d7cb466e1f8bd4bf1c966b805505ea2ee3641c39aa04811413478","src/arena/handle_set.rs":"f84ca7edafc907e282abea094c0c9b1da1cf4500e0c6389bb3ce2ef655f4c4ea","src/arena/handlevec.rs":"dfc9249478eb6980c13587c740d91234214bc7e0eef90e6f40b3d99decf70a58","src/arena/mod.rs":"14db142163133eb9ca582bc3665450f49cc4b1cfbda043b8b1518590cdda73be","src/arena/range.rs":"88683c36b137499ab537cf906505c2bd0871e0b9177e3a4e55da9db4e86df6a2","src/arena/unique_arena.rs":"22761770dbcbb6ac497ebdcf07173b3f07fc3c74bc0a70990d5fad882f4f471f","src/back/continue_forward.rs":"f95b810164db51fde368362624ce2569826b47dc3460df316b879abba48e5388","src/back/dot/mod.rs":"b9f57441a9491e4a53b6d9dcb253484f9e935419d179513c2d24db08be591fae","src/back/glsl/features.rs":"df0edf6cec20778846c8e2a914e18854a4efb9ae6b755fa281a975aac0fc12c6","src/back/glsl/keywords.rs":"b8883e5ee8a3d400fa44fef2baffe2853e72ff91e832499c128454d325ceccd9","src/back/glsl/mod.rs":"26e9859fc729929ae6cbf3224a2156a7f336d72d70a80db0d6a9bb940795a687","src/back/hlsl/conv.rs":"ce2f2ce5d2a3df43a5d9b2961f29cc011dc3e737c841f45b465e1e9812437620","src/back/hlsl/help.rs":"de631bd11ee65fbbeeae80733103789abfa6ff54b7b39b11e7c22cd02b486a1c","src/back/hlsl/keywords.rs":"a7164690a4da866e6bfb18ced20e32cc8c42dd7387e0e84addf0c2674f529cf5","src/back/hlsl/mod.rs":"8a88e0925ad62296349851f3f2f60959a63ae466422d1932bde53dd94eab586a","src/back/hlsl/storage.rs":"2c2a0071cafe487a398e396dddc85bdb319b1a5d74c097d529078e247a904359","src/back/hlsl/writer.rs":"0e452c9b40ff38536bb2c8265d5347d52abcda4b6b34f51319007f0e81ebf429","src/back/mod.rs":"59a3fd1215829497dc6d576fd01618c730987517c36244338ff7bb2a60eb663b","src/back/msl/keywords.rs":"e6a4ef77363f995de1f8079c0b8591497cbf9520c5d3b2d41c7e1f483e8abd24","src/back/msl/mod.rs":"6bfb164b854102dfbdf5e43528ff8050ff092eb805a7c3ccb8a2eea40a7509e5","src/back/msl/sampler.rs":"9b01d68669e12ff7123243284b85e1a9d2c4d49140bd74ca32dedc007cbf15af","src/back/msl/writer.rs":"ad13b7f42046bdeef091ea793e8dc1d992153140abddb5f78a9629fca362ba97","src/back/pipeline_constants.rs":"0d3353f30d86c89a54504b984997e841d9efe849d7fec7489df8357c9d88ff45","src/back/spv/block.rs":"056e2a11d27c41a1f042f3687b237f01d4a84df4756d62e5b7cc3cf4840f0db5","src/back/spv/helpers.rs":"bd666bf519a5d5561c2fab6ff78229739853ae6877d132911e71b57cfd656e42","src/back/spv/image.rs":"32b8f1ccdd54d80519ccf585451dcc6721d2a6a109d295a704e6c61857832556","src/back/spv/index.rs":"14f704d2d9409bbc9345d601725355048aa74471aaeb6aa781fc45a99ceaec38","src/back/spv/instructions.rs":"253b1fcb2883302734255fbc2d5fcebe3309ccdbb4784f33cf33c7fa66317967","src/back/spv/layout.rs":"e263de53cd2f9a03ad94b82b434ce636609bc1ed435a2d1132951663bfaa8ebd","src/back/spv/mod.rs":"91fa840f044ae40cc955db2278d5abcb03f53185452ba02ac0a78aedd37c24f3","src/back/spv/ray.rs":"a34bf6b26d873f7270caa45841d9ef291aca8d9732ecd086b14d8856038e1e41","src/back/spv/recyclable.rs":"8061e39ea5357c8d55081a81fb836e762fd9793c1c6b04ae5d714af32677e616","src/back/spv/selection.rs":"81e404abfa0a977f7c1f76ccb37a78d13ccadbda229048dad53cc67687cc39db","src/back/spv/subgroup.rs":"cb68fb9581064ec9ef79e56a0c94c802b2f04cc5e2173a953ae9a7b2776042d5","src/back/spv/writer.rs":"f5ec8a0697504c00eafa30862209a0892b7afcc3a993e3b8862a993f84b75787","src/back/wgsl/mod.rs":"2dd12bbea9ace835850192bb68c5760953da6bac6a636073d1eca19381c0c0b6","src/back/wgsl/writer.rs":"f80c2ef520ce506a46bf222010b30d4648a2de71346b61f202fc6e2cc0edac89","src/block.rs":"e447f7e041fd67052b23d1139cf0574eea93220a818af71691d960bdf026d45f","src/compact/expressions.rs":"a04f5dbc936693280fddc328df623af0b8d1fc0d11bd01987df9104e9c27bea1","src/compact/functions.rs":"27c084ca6b475999a3367f8ea429dbf7326a54c83ef86898b7ba67366f9bb828","src/compact/handle_set_map.rs":"748de9563f0ff897c113ba665f3e59ab3a023050e2206eb7d566a31f61fc32e5","src/compact/mod.rs":"7ec6bea36d11438c391c37d9a193b323d7fb365de6b9dfd45e78a3240dbab7ba","src/compact/statements.rs":"360e2bb6c2b7fdf4f9de368048ae574b2efd171adf34be44f84fbe21a15f5def","src/compact/types.rs":"9e73ac7dfdaf4b10eda1c938b881857392abc1315adcc035fc3980d63236cf37","src/error.rs":"70f7adbb59ea38ee7ebc44e8ad020d6db4f2dd4976913fb194e779d115241861","src/front/atomic_upgrade.rs":"af1386a3ef42e853042d2d08bf7d0e08fb8a4578ddbf44b5f57326643b9ba613","src/front/glsl/ast.rs":"663e815ea3e511f0445ef910340c380ff1bcf63805329ab7ca8cf35de7f902ed","src/front/glsl/builtins.rs":"8665ccc33938ae2d43257f25dd3cb75b604d30ccaf98888889b130ca7651695f","src/front/glsl/context.rs":"00c489ca8cb0e200595122ab1c857e2354625a680ff44ac3d7b33eadc4b991cd","src/front/glsl/error.rs":"2ef5061decd42dfc3054fd0f1a86dc7168e2064572571147117d798e39675722","src/front/glsl/functions.rs":"52802412788f95e1a55b92a72980e04792c876fbedff6e6d71f8fe0d23cbae80","src/front/glsl/lex.rs":"08736ae8beb955da5b0e6e3e0f45995a824995f7096d516a2910417e9c7afa32","src/front/glsl/mod.rs":"bf97bf1710d5d1f8facb77913cb82868697920914a96ed309adf0f19154a2ab4","src/front/glsl/offset.rs":"9358602ca4f9ef21d5066d674dae757bf88fdf5c289c4360534354d13bd41dc0","src/front/glsl/parser.rs":"a752f2abac17e6d35c0eb406f2f1c24d2b622a55b1bececbdd13614edb686622","src/front/glsl/parser/declarations.rs":"796514c1d571b324817bf1236d4c1227a91a512c0660ce5bb1264cd900027158","src/front/glsl/parser/expressions.rs":"7fd1ccb9261eaf1f9966798c2fed62c54c2c1d9d140d3e9d41d81775acbe35de","src/front/glsl/parser/functions.rs":"dd5980bd731bcd8044bbfa16f760cdc77c02422ec116d7ebbbe64fcbe1fb7a04","src/front/glsl/parser/types.rs":"0aad1c665058d3babe3c8ddc0ccf9be21c4cf18c807167fd125fb86a6b5f8873","src/front/glsl/parser_tests.rs":"c44ed3252096c83a0ce1ea9be8d2b867784cdc1c11aa4501aee1a85d86c62511","src/front/glsl/token.rs":"c25c489b152ee2d445ace3c2046473abe64d558b8d27fa08709110e58718b6ac","src/front/glsl/types.rs":"91c3a4e4d11b397ea647ff287e17a48baedf2f2c310e81b04618675daeb83f69","src/front/glsl/variables.rs":"6cb3db649a99452bc91f2bbebe425c0447920f7e9a87c38fa84d54c84f48861e","src/front/interpolator.rs":"9b6ca498d5fbd9bc1515510a04e303a00b324121d7285da3c955cfe18eb4224c","src/front/mod.rs":"ba984fc9b933a844fa8dadf05dea70f308911054ad3f10448d652706e158b4bd","src/front/spv/convert.rs":"8e509c2c277bb93fb2e77309d8880e0026c0f1e2b861f5f9de1e7dd8caa0c013","src/front/spv/error.rs":"f41593a3ba8d3bb4371fd26e813affc8d8ce1d6581c8af410cf5e2084a308049","src/front/spv/function.rs":"e584dbeaa5f7e1ff107f3e7e5211b0bfb8f7b9878a74dbe29dc7941cb33b2e0f","src/front/spv/image.rs":"a5ac477c01894a9c9ce5ec9d6751f991622d610f5581a759fc2e72034677d9e4","src/front/spv/mod.rs":"1f32a2a8174546075e57bc52bd0bba6e3fbae9a290e75a05332f244554bcf993","src/front/spv/null.rs":"a8ff44e97ffe42a4773d89b88fdb3a8ef8fba58bf6645c73b7a66470234ccf10","src/front/type_gen.rs":"49d8aae89143a0cba182c5271a2aa60972251c1603144bd21567565ff64b2274","src/front/wgsl/error.rs":"ebe67771da52bce13238fd9f1839243b22e27fbff1064b005638f4d1ebdcf3fe","src/front/wgsl/index.rs":"caf16ac352824d580a5073253ec13a284cb66d6dbe7b9ac08262fc722e31292a","src/front/wgsl/lower/construction.rs":"68ccb994d4e23e6067a47d355609798278fef65e17cc9256264b3ed007c81b03","src/front/wgsl/lower/conversion.rs":"492293f4f3fc7786a3a480ca0f43a10ae5ac8557cf1903cdd616ca624093b951","src/front/wgsl/lower/mod.rs":"d9159a152ae0479a5c78c60d9fa120348709062bcc535a3eb7c8534cf976096d","src/front/wgsl/mod.rs":"cebe3f61843cca32d6764b93b07cb369ae22d1240d81eabe25f5c42dc603fca1","src/front/wgsl/parse/ast.rs":"dc32291b31f1e60dfbb65ead29fcc5c7229ef0ede0eb35c5433788ff8a2d368a","src/front/wgsl/parse/conv.rs":"190dd3c1c0f14f0cd26fca6efc328c893c96eb0300fb332f1aedbfb40b8a8000","src/front/wgsl/parse/lexer.rs":"17db87d0017f8f9a80fa151b8545f04e1b40c4e5feef6197f4a117efa03488bf","src/front/wgsl/parse/mod.rs":"abd5135fd2f2d7f512c3ca2983f3ec4c815a04dd99f3966a068ef2bf145cfc8f","src/front/wgsl/parse/number.rs":"dafd3d8651cfa1389cb359d76d39bd689e54f8d5025aa23e06c6edd871369efd","src/front/wgsl/tests.rs":"7a0a083a5b66af8e7d4b1a02401b27f077eb72d07181b610693f35b11f107c6c","src/front/wgsl/to_wgsl.rs":"84be4fdb54f7d7d6dfcaedd1b48b6e6ee958685d950ec2007567b909cdc79289","src/keywords/mod.rs":"47a6fde012bf7d1e70f0fac7762f6a8e7dca6b9bbb99e2cada773c61527cfbfe","src/keywords/wgsl.rs":"c648ac44241ad55c8c8bad3d8f1bab973d11ddb9c380dcca369b735ed3975309","src/lib.rs":"70d7a857bd3c254caa5c7b135461d26e6e620a2d948a6fe5cfe337d3ce74ae6e","src/non_max_u32.rs":"3f4aafc8e2775982187507d49e7aa58befffdaba71c9287be42afdfde28bcb66","src/proc/constant_evaluator.rs":"b9d755878765fd7dbe31573ed378b026ac7dfaf4ee5780974f1e44305646c769","src/proc/emitter.rs":"39ac886c651e2ad33c06a676a7e4826a0e93de0af660c01e8e4b1f7406742f88","src/proc/index.rs":"7c10d3d89ad8c50d063f39ed5b24492e41b6f2d78a7b92138b1ee214a1ea06a2","src/proc/layouter.rs":"e95fd9defa08e0c610d22d7cd795a6af787610d1ff0d4ed27798b4ffd2098cf1","src/proc/mod.rs":"a150a2a251e09b54ce7e6104bd88eaa17e8e06221301a142c7e3770af92f1e07","src/proc/namer.rs":"7328fac41e40890c64c7ee2fa985a4395424f18b08d30f30ca2583fdabd2fd35","src/proc/terminator.rs":"fef2160473fcddd670c6b5806a8ea0ecbdcc0fdf6ed793dce131ecd08cce3944","src/proc/typifier.rs":"0ee6be729fe141399a279a6bc8a6f1f5cb82ee397f4352902ad3b662e2e9a618","src/span.rs":"fd3b338256f9301fc5289c37af9ccec704d2e5db92923b0dec45da371e84ce6a","src/valid/analyzer.rs":"64f0c49988a14e4bf4c9718be69e43d152994756095bb438a3467641c4f4cc1c","src/valid/compose.rs":"83e4c09c39f853cf085b83b87e48b3db571da619132960d3ec954ebdfb0a74f2","src/valid/expression.rs":"50f2fa21d80fd121bc4372dce616e1eeb7507cef29d93273c8c3f86df919b510","src/valid/function.rs":"a624138dc5c4a8d49f68e48fb66e64d14c2da1cd6e8dadfc74bab7a9f63a2f9f","src/valid/handles.rs":"fd9e52370d76b9ff57357c651a951112246ca6c4794b59c021601e9c6b11a0bd","src/valid/interface.rs":"29e5d07743689c3872c0254bbee585a0cc4bd95ea930e66a868de9164ece4709","src/valid/mod.rs":"4130656e5d14c54de38d4f7e7e8cb15ffe60c19ddf93aa976b2fc3e990ddbf45","src/valid/type.rs":"52528d261599f5eb8c61197200b1ffba5dcea6357838edc09f2c51f406af527c"},"package":null} \ No newline at end of file diff --git a/third_party/rust/naga/Cargo.toml b/third_party/rust/naga/Cargo.toml index 7c5d5295ed345..7bf955150b858 100644 --- a/third_party/rust/naga/Cargo.toml +++ b/third_party/rust/naga/Cargo.toml @@ -84,7 +84,7 @@ optional = true version = "1.1.0" [dependencies.serde] -version = "1.0.208" +version = "1.0.210" features = ["derive"] optional = true @@ -112,6 +112,9 @@ version = "0.11" [dev-dependencies.hlsl-snapshots] path = "./hlsl-snapshots" +[dev-dependencies.itertools] +version = "0.10.5" + [dev-dependencies.rspirv] version = "0.11" git = "https://github.com/gfx-rs/rspirv" diff --git a/third_party/rust/naga/src/back/glsl/features.rs b/third_party/rust/naga/src/back/glsl/features.rs index b22bcbe6514c7..362f4bb4f38d8 100644 --- a/third_party/rust/naga/src/back/glsl/features.rs +++ b/third_party/rust/naga/src/back/glsl/features.rs @@ -399,7 +399,7 @@ impl<'a, W> Writer<'a, W> { | StorageFormat::Rg16Float | StorageFormat::Rgb10a2Uint | StorageFormat::Rgb10a2Unorm - | StorageFormat::Rg11b10UFloat + | StorageFormat::Rg11b10Ufloat | StorageFormat::Rg32Uint | StorageFormat::Rg32Sint | StorageFormat::Rg32Float => { diff --git a/third_party/rust/naga/src/back/glsl/mod.rs b/third_party/rust/naga/src/back/glsl/mod.rs index 4c7f8b325167f..159d9cdcf2010 100644 --- a/third_party/rust/naga/src/back/glsl/mod.rs +++ b/third_party/rust/naga/src/back/glsl/mod.rs @@ -47,7 +47,7 @@ pub use features::Features; use crate::{ back::{self, Baked}, - proc::{self, NameKey}, + proc::{self, ExpressionKindTracker, NameKey}, valid, Handle, ShaderStage, TypeInner, }; use features::FeaturesManager; @@ -498,6 +498,9 @@ pub enum Error { Custom(String), #[error("overrides should not be present at this stage")] Override, + /// [`crate::Sampling::First`] is unsupported. + #[error("`{:?}` sampling is unsupported", crate::Sampling::First)] + FirstSamplingNotSupported, } /// Binary operation with a different logic on the GLSL side. @@ -1534,7 +1537,7 @@ impl<'a, W: Write> Writer<'a, W> { // here, regardless of the version. if let Some(sampling) = sampling { if emit_interpolation_and_auxiliary { - if let Some(qualifier) = glsl_sampling(sampling) { + if let Some(qualifier) = glsl_sampling(sampling)? { write!(self.out, "{qualifier} ")?; } } @@ -1584,6 +1587,7 @@ impl<'a, W: Write> Writer<'a, W> { info, expressions: &func.expressions, named_expressions: &func.named_expressions, + expr_kind_tracker: ExpressionKindTracker::from_arena(&func.expressions), }; self.named_expressions.clear(); @@ -4770,14 +4774,15 @@ const fn glsl_interpolation(interpolation: crate::Interpolation) -> &'static str } /// Return the GLSL auxiliary qualifier for the given sampling value. -const fn glsl_sampling(sampling: crate::Sampling) -> Option<&'static str> { +const fn glsl_sampling(sampling: crate::Sampling) -> BackendResult> { use crate::Sampling as S; - match sampling { - S::Center => None, + Ok(match sampling { + S::First => return Err(Error::FirstSamplingNotSupported), + S::Center | S::Either => None, S::Centroid => Some("centroid"), S::Sample => Some("sample"), - } + }) } /// Helper function that returns the glsl dimension string of [`ImageDimension`](crate::ImageDimension) @@ -4820,7 +4825,7 @@ fn glsl_storage_format(format: crate::StorageFormat) -> Result<&'static str, Err Sf::Rgba8Sint => "rgba8i", Sf::Rgb10a2Uint => "rgb10_a2ui", Sf::Rgb10a2Unorm => "rgb10_a2", - Sf::Rg11b10UFloat => "r11f_g11f_b10f", + Sf::Rg11b10Ufloat => "r11f_g11f_b10f", Sf::Rg32Uint => "rg32ui", Sf::Rg32Sint => "rg32i", Sf::Rg32Float => "rg32f", diff --git a/third_party/rust/naga/src/back/hlsl/conv.rs b/third_party/rust/naga/src/back/hlsl/conv.rs index 9df73b279c85c..473fc9476d287 100644 --- a/third_party/rust/naga/src/back/hlsl/conv.rs +++ b/third_party/rust/naga/src/back/hlsl/conv.rs @@ -132,7 +132,7 @@ impl crate::StorageFormat { Self::Rg8Sint | Self::Rg16Sint | Self::Rg32Uint => "int2", Self::Rg8Uint | Self::Rg16Uint | Self::Rg32Sint => "uint2", - Self::Rg11b10UFloat => "float3", + Self::Rg11b10Ufloat => "float3", Self::Rgba16Float | Self::Rgba32Float => "float4", Self::Rgba8Unorm | Self::Bgra8Unorm | Self::Rgba16Unorm | Self::Rgb10a2Unorm => { @@ -202,7 +202,7 @@ impl crate::Sampling { /// Return the HLSL auxiliary qualifier for the given sampling value. pub(super) const fn to_hlsl_str(self) -> Option<&'static str> { match self { - Self::Center => None, + Self::Center | Self::First | Self::Either => None, Self::Centroid => Some("centroid"), Self::Sample => Some("sample"), } diff --git a/third_party/rust/naga/src/back/hlsl/writer.rs b/third_party/rust/naga/src/back/hlsl/writer.rs index 85d943e8500ae..e33fc79f20827 100644 --- a/third_party/rust/naga/src/back/hlsl/writer.rs +++ b/third_party/rust/naga/src/back/hlsl/writer.rs @@ -8,7 +8,7 @@ use super::{ }; use crate::{ back::{self, Baked}, - proc::{self, NameKey}, + proc::{self, ExpressionKindTracker, NameKey}, valid, Handle, Module, Scalar, ScalarKind, ShaderStage, TypeInner, }; use std::{fmt, mem}; @@ -346,6 +346,7 @@ impl<'a, W: fmt::Write> super::Writer<'a, W> { info, expressions: &function.expressions, named_expressions: &function.named_expressions, + expr_kind_tracker: ExpressionKindTracker::from_arena(&function.expressions), }; let name = self.names[&NameKey::Function(handle)].clone(); @@ -386,6 +387,7 @@ impl<'a, W: fmt::Write> super::Writer<'a, W> { info, expressions: &ep.function.expressions, named_expressions: &ep.function.named_expressions, + expr_kind_tracker: ExpressionKindTracker::from_arena(&ep.function.expressions), }; self.write_wrapped_functions(module, &ctx)?; diff --git a/third_party/rust/naga/src/back/mod.rs b/third_party/rust/naga/src/back/mod.rs index 352adc37ecdc2..58c7fa02cb6e6 100644 --- a/third_party/rust/naga/src/back/mod.rs +++ b/third_party/rust/naga/src/back/mod.rs @@ -3,6 +3,8 @@ Backend functions that export shader [`Module`](super::Module)s into binary and */ #![allow(dead_code)] // can be dead if none of the enabled backends need it +use crate::proc::ExpressionKindTracker; + #[cfg(dot_out)] pub mod dot; #[cfg(glsl_out)] @@ -118,6 +120,8 @@ pub struct FunctionCtx<'a> { pub expressions: &'a crate::Arena, /// Map of expressions that have associated variable names pub named_expressions: &'a crate::NamedExpressions, + /// For constness checks + pub expr_kind_tracker: ExpressionKindTracker, } impl FunctionCtx<'_> { diff --git a/third_party/rust/naga/src/back/msl/mod.rs b/third_party/rust/naga/src/back/msl/mod.rs index 626475debcd38..96dd142a50a71 100644 --- a/third_party/rust/naga/src/back/msl/mod.rs +++ b/third_party/rust/naga/src/back/msl/mod.rs @@ -627,6 +627,7 @@ impl ResolvedInterpolation { (I::Linear, S::Centroid) => Self::CentroidNoPerspective, (I::Linear, S::Sample) => Self::SampleNoPerspective, (I::Flat, _) => Self::Flat, + _ => unreachable!(), } } diff --git a/third_party/rust/naga/src/back/pipeline_constants.rs b/third_party/rust/naga/src/back/pipeline_constants.rs index 77ee530be96e5..5f82862f72ae9 100644 --- a/third_party/rust/naga/src/back/pipeline_constants.rs +++ b/third_party/rust/naga/src/back/pipeline_constants.rs @@ -289,6 +289,7 @@ fn process_function( &mut local_expression_kind_tracker, &mut emitter, &mut block, + false, ); for (old_h, mut expr, span) in expressions.drain() { diff --git a/third_party/rust/naga/src/back/spv/instructions.rs b/third_party/rust/naga/src/back/spv/instructions.rs index 9029c973debf6..9bd58508a1643 100644 --- a/third_party/rust/naga/src/back/spv/instructions.rs +++ b/third_party/rust/naga/src/back/spv/instructions.rs @@ -1170,7 +1170,7 @@ impl From for spirv::ImageFormat { Sf::Bgra8Unorm => Self::Unknown, Sf::Rgb10a2Uint => Self::Rgb10a2ui, Sf::Rgb10a2Unorm => Self::Rgb10A2, - Sf::Rg11b10UFloat => Self::R11fG11fB10f, + Sf::Rg11b10Ufloat => Self::R11fG11fB10f, Sf::Rg32Uint => Self::Rg32ui, Sf::Rg32Sint => Self::Rg32i, Sf::Rg32Float => Self::Rg32f, diff --git a/third_party/rust/naga/src/back/spv/writer.rs b/third_party/rust/naga/src/back/spv/writer.rs index d1c1e82a2022b..488371558975b 100644 --- a/third_party/rust/naga/src/back/spv/writer.rs +++ b/third_party/rust/naga/src/back/spv/writer.rs @@ -1511,7 +1511,12 @@ impl Writer { } match sampling { // Center sampling is the default in SPIR-V. - None | Some(crate::Sampling::Center) => (), + None + | Some( + crate::Sampling::Center + | crate::Sampling::First + | crate::Sampling::Either, + ) => (), Some(crate::Sampling::Centroid) => { self.decorate(id, Decoration::Centroid, &[]); } diff --git a/third_party/rust/naga/src/back/wgsl/writer.rs b/third_party/rust/naga/src/back/wgsl/writer.rs index e5a5e5f647998..0f2635eb0d926 100644 --- a/third_party/rust/naga/src/back/wgsl/writer.rs +++ b/third_party/rust/naga/src/back/wgsl/writer.rs @@ -1,7 +1,7 @@ use super::Error; use crate::{ back::{self, Baked}, - proc::{self, NameKey}, + proc::{self, ExpressionKindTracker, NameKey}, valid, Handle, Module, ShaderStage, TypeInner, }; use std::fmt::Write; @@ -166,6 +166,7 @@ impl Writer { info: fun_info, expressions: &function.expressions, named_expressions: &function.named_expressions, + expr_kind_tracker: ExpressionKindTracker::from_arena(&function.expressions), }; // Write the function @@ -193,6 +194,7 @@ impl Writer { info: info.get_entry_point(index), expressions: &ep.function.expressions, named_expressions: &ep.function.named_expressions, + expr_kind_tracker: ExpressionKindTracker::from_arena(&ep.function.expressions), }; self.write_function(module, &ep.function, &func_ctx)?; @@ -1115,8 +1117,14 @@ impl Writer { func_ctx: &back::FunctionCtx, name: &str, ) -> BackendResult { + // Some functions are marked as const, but are not yet implemented as constant expression + let quantifier = if func_ctx.expr_kind_tracker.is_impl_const(handle) { + "const" + } else { + "let" + }; // Write variable name - write!(self.out, "let {name}")?; + write!(self.out, "{quantifier} {name}")?; if self.flags.contains(WriterFlags::EXPLICIT_TYPES) { write!(self.out, ": ")?; let ty = &func_ctx.info[handle].ty; @@ -2015,7 +2023,7 @@ const fn storage_format_str(format: crate::StorageFormat) -> &'static str { Sf::Bgra8Unorm => "bgra8unorm", Sf::Rgb10a2Uint => "rgb10a2uint", Sf::Rgb10a2Unorm => "rgb10a2unorm", - Sf::Rg11b10UFloat => "rg11b10float", + Sf::Rg11b10Ufloat => "rg11b10float", Sf::Rg32Uint => "rg32uint", Sf::Rg32Sint => "rg32sint", Sf::Rg32Float => "rg32float", @@ -2053,6 +2061,8 @@ const fn sampling_str(sampling: crate::Sampling) -> &'static str { S::Center => "", S::Centroid => "centroid", S::Sample => "sample", + S::First => "first", + S::Either => "either", } } diff --git a/third_party/rust/naga/src/front/glsl/parser/types.rs b/third_party/rust/naga/src/front/glsl/parser/types.rs index d22387f375b1a..829b9fd8975d9 100644 --- a/third_party/rust/naga/src/front/glsl/parser/types.rs +++ b/third_party/rust/naga/src/front/glsl/parser/types.rs @@ -397,7 +397,7 @@ fn map_image_format(word: &str) -> Option { "rgba16f" => Sf::Rgba16Float, "rg32f" => Sf::Rg32Float, "rg16f" => Sf::Rg16Float, - "r11f_g11f_b10f" => Sf::Rg11b10UFloat, + "r11f_g11f_b10f" => Sf::Rg11b10Ufloat, "r32f" => Sf::R32Float, "r16f" => Sf::R16Float, "rgba16" => Sf::Rgba16Unorm, diff --git a/third_party/rust/naga/src/front/spv/convert.rs b/third_party/rust/naga/src/front/spv/convert.rs index 88d171b5b73e0..f131db616e8b6 100644 --- a/third_party/rust/naga/src/front/spv/convert.rs +++ b/third_party/rust/naga/src/front/spv/convert.rs @@ -104,7 +104,7 @@ pub(super) fn map_image_format(word: spirv::Word) -> Result Ok(crate::StorageFormat::Rgba8Sint), Some(spirv::ImageFormat::Rgb10a2ui) => Ok(crate::StorageFormat::Rgb10a2Uint), Some(spirv::ImageFormat::Rgb10A2) => Ok(crate::StorageFormat::Rgb10a2Unorm), - Some(spirv::ImageFormat::R11fG11fB10f) => Ok(crate::StorageFormat::Rg11b10UFloat), + Some(spirv::ImageFormat::R11fG11fB10f) => Ok(crate::StorageFormat::Rg11b10Ufloat), Some(spirv::ImageFormat::Rg32ui) => Ok(crate::StorageFormat::Rg32Uint), Some(spirv::ImageFormat::Rg32i) => Ok(crate::StorageFormat::Rg32Sint), Some(spirv::ImageFormat::Rg32f) => Ok(crate::StorageFormat::Rg32Float), diff --git a/third_party/rust/naga/src/front/wgsl/error.rs b/third_party/rust/naga/src/front/wgsl/error.rs index ed7006d501710..3d4ac621831b4 100644 --- a/third_party/rust/naga/src/front/wgsl/error.rs +++ b/third_party/rust/naga/src/front/wgsl/error.rs @@ -263,6 +263,8 @@ pub(crate) enum Error<'a> { limit: u8, }, PipelineConstantIDValue(Span), + NotBool(Span), + ConstAssertFailed(Span), } #[derive(Clone, Debug)] @@ -815,6 +817,22 @@ impl<'a> Error<'a> { )], notes: vec![], }, + Error::NotBool(span) => ParseError { + message: "must be a const-expression that resolves to a bool".to_string(), + labels: vec![( + span, + "must resolve to bool".into(), + )], + notes: vec![], + }, + Error::ConstAssertFailed(span) => ParseError { + message: "const_assert failure".to_string(), + labels: vec![( + span, + "evaluates to false".into(), + )], + notes: vec![], + }, } } } diff --git a/third_party/rust/naga/src/front/wgsl/index.rs b/third_party/rust/naga/src/front/wgsl/index.rs index 593405508fdac..bc0af670ff00b 100644 --- a/third_party/rust/naga/src/front/wgsl/index.rs +++ b/third_party/rust/naga/src/front/wgsl/index.rs @@ -20,13 +20,16 @@ impl<'a> Index<'a> { // While doing so, reject conflicting definitions. let mut globals = FastHashMap::with_capacity_and_hasher(tu.decls.len(), Default::default()); for (handle, decl) in tu.decls.iter() { - let ident = decl_ident(decl); - let name = ident.name; - if let Some(old) = globals.insert(name, handle) { - return Err(Error::Redefinition { - previous: decl_ident(&tu.decls[old]).span, - current: ident.span, - }); + if let Some(ident) = decl_ident(decl) { + let name = ident.name; + if let Some(old) = globals.insert(name, handle) { + return Err(Error::Redefinition { + previous: decl_ident(&tu.decls[old]) + .expect("decl should have ident for redefinition") + .span, + current: ident.span, + }); + } } } @@ -130,7 +133,7 @@ impl<'a> DependencySolver<'a, '_> { return if dep_id == id { // A declaration refers to itself directly. Err(Error::RecursiveDeclaration { - ident: decl_ident(decl).span, + ident: decl_ident(decl).expect("decl should have ident").span, usage: dep.usage, }) } else { @@ -146,14 +149,19 @@ impl<'a> DependencySolver<'a, '_> { .unwrap_or(0); Err(Error::CyclicDeclaration { - ident: decl_ident(&self.module.decls[dep_id]).span, + ident: decl_ident(&self.module.decls[dep_id]) + .expect("decl should have ident") + .span, path: self.path[start_at..] .iter() .map(|curr_dep| { let curr_id = curr_dep.decl; let curr_decl = &self.module.decls[curr_id]; - (decl_ident(curr_decl).span, curr_dep.usage) + ( + decl_ident(curr_decl).expect("decl should have ident").span, + curr_dep.usage, + ) }) .collect(), }) @@ -182,13 +190,14 @@ impl<'a> DependencySolver<'a, '_> { } } -const fn decl_ident<'a>(decl: &ast::GlobalDecl<'a>) -> ast::Ident<'a> { +const fn decl_ident<'a>(decl: &ast::GlobalDecl<'a>) -> Option> { match decl.kind { - ast::GlobalDeclKind::Fn(ref f) => f.name, - ast::GlobalDeclKind::Var(ref v) => v.name, - ast::GlobalDeclKind::Const(ref c) => c.name, - ast::GlobalDeclKind::Override(ref o) => o.name, - ast::GlobalDeclKind::Struct(ref s) => s.name, - ast::GlobalDeclKind::Type(ref t) => t.name, + ast::GlobalDeclKind::Fn(ref f) => Some(f.name), + ast::GlobalDeclKind::Var(ref v) => Some(v.name), + ast::GlobalDeclKind::Const(ref c) => Some(c.name), + ast::GlobalDeclKind::Override(ref o) => Some(o.name), + ast::GlobalDeclKind::Struct(ref s) => Some(s.name), + ast::GlobalDeclKind::Type(ref t) => Some(t.name), + ast::GlobalDeclKind::ConstAssert(_) => None, } } diff --git a/third_party/rust/naga/src/front/wgsl/lower/construction.rs b/third_party/rust/naga/src/front/wgsl/lower/construction.rs index cb6ceb0613c9e..dff2e9d6e400f 100644 --- a/third_party/rust/naga/src/front/wgsl/lower/construction.rs +++ b/third_party/rust/naga/src/front/wgsl/lower/construction.rs @@ -578,8 +578,13 @@ impl<'source, 'temp> Lowerer<'source, 'temp> { Constructor::Type(ty) } ast::ConstructorType::PartialVector { size } => Constructor::PartialVector { size }, - ast::ConstructorType::Vector { size, scalar } => { - let ty = ctx.ensure_type_exists(scalar.to_inner_vector(size)); + ast::ConstructorType::Vector { size, ty, ty_span } => { + let ty = self.resolve_ast_type(ty, &mut ctx.as_global())?; + let scalar = match ctx.module.types[ty].inner { + crate::TypeInner::Scalar(sc) => sc, + _ => return Err(Error::UnknownScalarType(ty_span)), + }; + let ty = ctx.ensure_type_exists(crate::TypeInner::Vector { size, scalar }); Constructor::Type(ty) } ast::ConstructorType::PartialMatrix { columns, rows } => { @@ -588,13 +593,22 @@ impl<'source, 'temp> Lowerer<'source, 'temp> { ast::ConstructorType::Matrix { rows, columns, - width, + ty, + ty_span, } => { - let ty = ctx.ensure_type_exists(crate::TypeInner::Matrix { - columns, - rows, - scalar: crate::Scalar::float(width), - }); + let ty = self.resolve_ast_type(ty, &mut ctx.as_global())?; + let scalar = match ctx.module.types[ty].inner { + crate::TypeInner::Scalar(sc) => sc, + _ => return Err(Error::UnknownScalarType(ty_span)), + }; + let ty = match scalar.kind { + crate::ScalarKind::Float => ctx.ensure_type_exists(crate::TypeInner::Matrix { + columns, + rows, + scalar, + }), + _ => return Err(Error::BadMatrixScalarKind(ty_span, scalar)), + }; Constructor::Type(ty) } ast::ConstructorType::PartialArray => Constructor::PartialArray, diff --git a/third_party/rust/naga/src/front/wgsl/lower/mod.rs b/third_party/rust/naga/src/front/wgsl/lower/mod.rs index ceed9c3990dc2..7586f98dc3cfc 100644 --- a/third_party/rust/naga/src/front/wgsl/lower/mod.rs +++ b/third_party/rust/naga/src/front/wgsl/lower/mod.rs @@ -98,7 +98,7 @@ impl<'source> GlobalContext<'source, '_, '_> { types: self.types, module: self.module, const_typifier: self.const_typifier, - expr_type: ExpressionContextType::Constant, + expr_type: ExpressionContextType::Constant(None), global_expression_kind_tracker: self.global_expression_kind_tracker, } } @@ -160,7 +160,8 @@ pub struct StatementContext<'source, 'temp, 'out> { /// /// [`LocalVariable`]: crate::Expression::LocalVariable /// [`FunctionArgument`]: crate::Expression::FunctionArgument - local_table: &'temp mut FastHashMap, Typed>>, + local_table: + &'temp mut FastHashMap, Declared>>>, const_typifier: &'temp mut Typifier, typifier: &'temp mut Typifier, @@ -184,6 +185,32 @@ pub struct StatementContext<'source, 'temp, 'out> { } impl<'a, 'temp> StatementContext<'a, 'temp, '_> { + fn as_const<'t>( + &'t mut self, + block: &'t mut crate::Block, + emitter: &'t mut Emitter, + ) -> ExpressionContext<'a, 't, '_> + where + 'temp: 't, + { + ExpressionContext { + globals: self.globals, + types: self.types, + ast_expressions: self.ast_expressions, + const_typifier: self.const_typifier, + global_expression_kind_tracker: self.global_expression_kind_tracker, + module: self.module, + expr_type: ExpressionContextType::Constant(Some(LocalExpressionContext { + local_table: self.local_table, + function: self.function, + block, + emitter, + typifier: self.typifier, + local_expression_kind_tracker: self.local_expression_kind_tracker, + })), + } + } + fn as_expression<'t>( &'t mut self, block: &'t mut crate::Block, @@ -199,7 +226,7 @@ impl<'a, 'temp> StatementContext<'a, 'temp, '_> { const_typifier: self.const_typifier, global_expression_kind_tracker: self.global_expression_kind_tracker, module: self.module, - expr_type: ExpressionContextType::Runtime(RuntimeExpressionContext { + expr_type: ExpressionContextType::Runtime(LocalExpressionContext { local_table: self.local_table, function: self.function, block, @@ -235,12 +262,12 @@ impl<'a, 'temp> StatementContext<'a, 'temp, '_> { } } -pub struct RuntimeExpressionContext<'temp, 'out> { +pub struct LocalExpressionContext<'temp, 'out> { /// A map from [`ast::Local`] handles to the Naga expressions we've built for them. /// /// This is always [`StatementContext::local_table`] for the /// enclosing statement; see that documentation for details. - local_table: &'temp FastHashMap, Typed>>, + local_table: &'temp FastHashMap, Declared>>>, function: &'out mut crate::Function, block: &'temp mut crate::Block, @@ -259,18 +286,18 @@ pub enum ExpressionContextType<'temp, 'out> { /// We are lowering to an arbitrary runtime expression, to be /// included in a function's body. /// - /// The given [`RuntimeExpressionContext`] holds information about local + /// The given [`LocalExpressionContext`] holds information about local /// variables, arguments, and other definitions available only to runtime /// expressions, not constant or override expressions. - Runtime(RuntimeExpressionContext<'temp, 'out>), + Runtime(LocalExpressionContext<'temp, 'out>), /// We are lowering to a constant expression, to be included in the module's /// constant expression arena. /// - /// Everything constant expressions are allowed to refer to is - /// available in the [`ExpressionContext`], so this variant - /// carries no further information. - Constant, + /// Everything global constant expressions are allowed to refer to is + /// available in the [`ExpressionContext`], but local constant expressions can + /// also refer to other + Constant(Option>), /// We are lowering to an override expression, to be included in the module's /// constant expression arena. @@ -352,7 +379,7 @@ impl<'source, 'temp, 'out> ExpressionContext<'source, 'temp, 'out> { ast_expressions: self.ast_expressions, const_typifier: self.const_typifier, module: self.module, - expr_type: ExpressionContextType::Constant, + expr_type: ExpressionContextType::Constant(None), global_expression_kind_tracker: self.global_expression_kind_tracker, } } @@ -376,8 +403,19 @@ impl<'source, 'temp, 'out> ExpressionContext<'source, 'temp, 'out> { rctx.local_expression_kind_tracker, rctx.emitter, rctx.block, + false, ), - ExpressionContextType::Constant => ConstantEvaluator::for_wgsl_module( + ExpressionContextType::Constant(Some(ref mut rctx)) => { + ConstantEvaluator::for_wgsl_function( + self.module, + &mut rctx.function.expressions, + rctx.local_expression_kind_tracker, + rctx.emitter, + rctx.block, + true, + ) + } + ExpressionContextType::Constant(None) => ConstantEvaluator::for_wgsl_module( self.module, self.global_expression_kind_tracker, false, @@ -412,15 +450,27 @@ impl<'source, 'temp, 'out> ExpressionContext<'source, 'temp, 'out> { .eval_expr_to_u32_from(handle, &ctx.function.expressions) .ok() } - ExpressionContextType::Constant => self.module.to_ctx().eval_expr_to_u32(handle).ok(), + ExpressionContextType::Constant(Some(ref ctx)) => { + assert!(ctx.local_expression_kind_tracker.is_const(handle)); + self.module + .to_ctx() + .eval_expr_to_u32_from(handle, &ctx.function.expressions) + .ok() + } + ExpressionContextType::Constant(None) => { + self.module.to_ctx().eval_expr_to_u32(handle).ok() + } ExpressionContextType::Override => None, } } fn get_expression_span(&self, handle: Handle) -> Span { match self.expr_type { - ExpressionContextType::Runtime(ref ctx) => ctx.function.expressions.get_span(handle), - ExpressionContextType::Constant | ExpressionContextType::Override => { + ExpressionContextType::Runtime(ref ctx) + | ExpressionContextType::Constant(Some(ref ctx)) => { + ctx.function.expressions.get_span(handle) + } + ExpressionContextType::Constant(None) | ExpressionContextType::Override => { self.module.global_expressions.get_span(handle) } } @@ -428,20 +478,35 @@ impl<'source, 'temp, 'out> ExpressionContext<'source, 'temp, 'out> { fn typifier(&self) -> &Typifier { match self.expr_type { - ExpressionContextType::Runtime(ref ctx) => ctx.typifier, - ExpressionContextType::Constant | ExpressionContextType::Override => { + ExpressionContextType::Runtime(ref ctx) + | ExpressionContextType::Constant(Some(ref ctx)) => ctx.typifier, + ExpressionContextType::Constant(None) | ExpressionContextType::Override => { self.const_typifier } } } + fn local( + &mut self, + local: &Handle, + span: Span, + ) -> Result>, Error<'source>> { + match self.expr_type { + ExpressionContextType::Runtime(ref ctx) => Ok(ctx.local_table[local].runtime()), + ExpressionContextType::Constant(Some(ref ctx)) => ctx.local_table[local] + .const_time() + .ok_or(Error::UnexpectedOperationInConstContext(span)), + _ => Err(Error::UnexpectedOperationInConstContext(span)), + } + } + fn runtime_expression_ctx( &mut self, span: Span, - ) -> Result<&mut RuntimeExpressionContext<'temp, 'out>, Error<'source>> { + ) -> Result<&mut LocalExpressionContext<'temp, 'out>, Error<'source>> { match self.expr_type { ExpressionContextType::Runtime(ref mut ctx) => Ok(ctx), - ExpressionContextType::Constant | ExpressionContextType::Override => { + ExpressionContextType::Constant(_) | ExpressionContextType::Override => { Err(Error::UnexpectedOperationInConstContext(span)) } } @@ -480,7 +545,7 @@ impl<'source, 'temp, 'out> ExpressionContext<'source, 'temp, 'out> { } // This means a `gather` operation appeared in a constant expression. // This error refers to the `gather` itself, not its "component" argument. - ExpressionContextType::Constant | ExpressionContextType::Override => { + ExpressionContextType::Constant(_) | ExpressionContextType::Override => { Err(Error::UnexpectedOperationInConstContext(gather_span)) } } @@ -505,8 +570,9 @@ impl<'source, 'temp, 'out> ExpressionContext<'source, 'temp, 'out> { // except that this lets the borrow checker see that it's okay // to also borrow self.module.types mutably below. let typifier = match self.expr_type { - ExpressionContextType::Runtime(ref ctx) => ctx.typifier, - ExpressionContextType::Constant | ExpressionContextType::Override => { + ExpressionContextType::Runtime(ref ctx) + | ExpressionContextType::Constant(Some(ref ctx)) => ctx.typifier, + ExpressionContextType::Constant(None) | ExpressionContextType::Override => { &*self.const_typifier } }; @@ -542,7 +608,8 @@ impl<'source, 'temp, 'out> ExpressionContext<'source, 'temp, 'out> { let typifier; let expressions; match self.expr_type { - ExpressionContextType::Runtime(ref mut ctx) => { + ExpressionContextType::Runtime(ref mut ctx) + | ExpressionContextType::Constant(Some(ref mut ctx)) => { resolve_ctx = ResolveContext::with_locals( self.module, &ctx.function.local_variables, @@ -551,7 +618,7 @@ impl<'source, 'temp, 'out> ExpressionContext<'source, 'temp, 'out> { typifier = &mut *ctx.typifier; expressions = &ctx.function.expressions; } - ExpressionContextType::Constant | ExpressionContextType::Override => { + ExpressionContextType::Constant(None) | ExpressionContextType::Override => { resolve_ctx = ResolveContext::with_locals(self.module, &empty_arena, &[]); typifier = self.const_typifier; expressions = &self.module.global_expressions; @@ -643,18 +710,20 @@ impl<'source, 'temp, 'out> ExpressionContext<'source, 'temp, 'out> { span: Span, ) -> Result, Error<'source>> { match self.expr_type { - ExpressionContextType::Runtime(ref mut rctx) => { + ExpressionContextType::Runtime(ref mut rctx) + | ExpressionContextType::Constant(Some(ref mut rctx)) => { rctx.block .extend(rctx.emitter.finish(&rctx.function.expressions)); } - ExpressionContextType::Constant | ExpressionContextType::Override => {} + ExpressionContextType::Constant(None) | ExpressionContextType::Override => {} } let result = self.append_expression(expression, span); match self.expr_type { - ExpressionContextType::Runtime(ref mut rctx) => { + ExpressionContextType::Runtime(ref mut rctx) + | ExpressionContextType::Constant(Some(ref mut rctx)) => { rctx.emitter.start(&rctx.function.expressions); } - ExpressionContextType::Constant | ExpressionContextType::Override => {} + ExpressionContextType::Constant(None) | ExpressionContextType::Override => {} } result } @@ -718,6 +787,30 @@ impl<'source> ArgumentContext<'_, 'source> { } } +#[derive(Debug, Copy, Clone)] +enum Declared { + /// Value declared as const + Const(T), + + /// Value declared as non-const + Runtime(T), +} + +impl Declared { + fn runtime(self) -> T { + match self { + Declared::Const(t) | Declared::Runtime(t) => t, + } + } + + fn const_time(self) -> Option { + match self { + Declared::Const(t) => Some(t), + Declared::Runtime(_) => None, + } + } +} + /// WGSL type annotations on expressions, types, values, etc. /// /// Naga and WGSL types are very close, but Naga lacks WGSL's `ref` types, which @@ -814,7 +907,13 @@ impl Components { *comp = Self::letter_component(ch).ok_or(Error::BadAccessor(name_span))?; } - Ok(Components::Swizzle { size, pattern }) + if name.chars().all(|c| matches!(c, 'x' | 'y' | 'z' | 'w')) + || name.chars().all(|c| matches!(c, 'r' | 'g' | 'b' | 'a')) + { + Ok(Components::Swizzle { size, pattern }) + } else { + Err(Error::BadAccessor(name_span)) + } } } @@ -935,16 +1034,21 @@ impl<'source, 'temp> Lowerer<'source, 'temp> { ctx.globals.insert(f.name.name, lowered_decl); } ast::GlobalDeclKind::Var(ref v) => { - let ty = self.resolve_ast_type(v.ty, &mut ctx)?; - - let init; - if let Some(init_ast) = v.init { - let mut ectx = ctx.as_override(); - let lowered = self.expression_for_abstract(init_ast, &mut ectx)?; - let ty_res = crate::proc::TypeResolution::Handle(ty); - let converted = ectx - .try_automatic_conversions(lowered, &ty_res, v.name.span) - .map_err(|error| match error { + let explicit_ty = + v.ty.map(|ast| self.resolve_ast_type(ast, &mut ctx)) + .transpose()?; + + let mut ectx = ctx.as_override(); + + let ty; + let initializer; + match (v.init, explicit_ty) { + (Some(init), Some(explicit_ty)) => { + let init = self.expression_for_abstract(init, &mut ectx)?; + let ty_res = crate::proc::TypeResolution::Handle(explicit_ty); + let init = ectx + .try_automatic_conversions(init, &ty_res, v.name.span) + .map_err(|error| match error { Error::AutoConversion(e) => Error::InitializationTypeMismatch { name: v.name.span, expected: e.dest_type, @@ -952,9 +1056,19 @@ impl<'source, 'temp> Lowerer<'source, 'temp> { }, other => other, })?; - init = Some(converted); - } else { - init = None; + ty = explicit_ty; + initializer = Some(init); + } + (Some(init), None) => { + let concretized = self.expression(init, &mut ectx)?; + ty = ectx.register_type(concretized)?; + initializer = Some(concretized); + } + (None, Some(explicit_ty)) => { + ty = explicit_ty; + initializer = None; + } + (None, None) => return Err(Error::DeclMissingTypeAndInit(v.name.span)), } let binding = if let Some(ref binding) = v.binding { @@ -972,7 +1086,7 @@ impl<'source, 'temp> Lowerer<'source, 'temp> { space: v.space, binding, ty, - init, + init: initializer, }, span, ); @@ -1090,6 +1204,20 @@ impl<'source, 'temp> Lowerer<'source, 'temp> { ctx.globals .insert(alias.name.name, LoweredGlobalDecl::Type(ty)); } + ast::GlobalDeclKind::ConstAssert(condition) => { + let condition = self.expression(condition, &mut ctx.as_const())?; + + let span = ctx.module.global_expressions.get_span(condition); + match ctx + .module + .to_ctx() + .eval_expr_to_bool_from(condition, &ctx.module.global_expressions) + { + Some(true) => Ok(()), + Some(false) => Err(Error::ConstAssertFailed(span)), + _ => Err(Error::NotBool(span)), + }?; + } } } @@ -1120,7 +1248,7 @@ impl<'source, 'temp> Lowerer<'source, 'temp> { let ty = self.resolve_ast_type(arg.ty, ctx)?; let expr = expressions .append(crate::Expression::FunctionArgument(i as u32), arg.name.span); - local_table.insert(arg.handle, Typed::Plain(expr)); + local_table.insert(arg.handle, Declared::Runtime(Typed::Plain(expr))); named_expressions.insert(expr, (arg.name.name.to_string(), arg.name.span)); local_expression_kind_tracker.insert(expr, crate::proc::ExpressionKind::Runtime); @@ -1268,7 +1396,8 @@ impl<'source, 'temp> Lowerer<'source, 'temp> { } block.extend(emitter.finish(&ctx.function.expressions)); - ctx.local_table.insert(l.handle, Typed::Plain(value)); + ctx.local_table + .insert(l.handle, Declared::Runtime(Typed::Plain(value))); ctx.named_expressions .insert(value, (l.name.name.to_string(), l.name.span)); @@ -1350,7 +1479,8 @@ impl<'source, 'temp> Lowerer<'source, 'temp> { Span::UNDEFINED, )?; block.extend(emitter.finish(&ctx.function.expressions)); - ctx.local_table.insert(v.handle, Typed::Reference(handle)); + ctx.local_table + .insert(v.handle, Declared::Runtime(Typed::Reference(handle))); match initializer { Some(initializer) => crate::Statement::Store { @@ -1360,6 +1490,41 @@ impl<'source, 'temp> Lowerer<'source, 'temp> { None => return Ok(()), } } + ast::LocalDecl::Const(ref c) => { + let mut emitter = Emitter::default(); + emitter.start(&ctx.function.expressions); + + let ectx = &mut ctx.as_const(block, &mut emitter); + + let mut init = self.expression_for_abstract(c.init, ectx)?; + + if let Some(explicit_ty) = c.ty { + let explicit_ty = + self.resolve_ast_type(explicit_ty, &mut ectx.as_global())?; + let explicit_ty_res = crate::proc::TypeResolution::Handle(explicit_ty); + init = ectx + .try_automatic_conversions(init, &explicit_ty_res, c.name.span) + .map_err(|error| match error { + Error::AutoConversion(error) => Error::InitializationTypeMismatch { + name: c.name.span, + expected: error.dest_type, + got: error.source_type, + }, + other => other, + })?; + } else { + init = ectx.concretize(init)?; + ectx.register_type(init)?; + } + + block.extend(emitter.finish(&ctx.function.expressions)); + ctx.local_table + .insert(c.handle, Declared::Const(Typed::Plain(init))); + ctx.named_expressions + .insert(init, (c.name.name.to_string(), c.name.span)); + + return Ok(()); + } }, ast::StatementKind::If { condition, @@ -1591,6 +1756,28 @@ impl<'source, 'temp> Lowerer<'source, 'temp> { value, } } + ast::StatementKind::ConstAssert(condition) => { + let mut emitter = Emitter::default(); + emitter.start(&ctx.function.expressions); + + let condition = + self.expression(condition, &mut ctx.as_const(block, &mut emitter))?; + + let span = ctx.function.expressions.get_span(condition); + match ctx + .module + .to_ctx() + .eval_expr_to_bool_from(condition, &ctx.function.expressions) + { + Some(true) => Ok(()), + Some(false) => Err(Error::ConstAssertFailed(span)), + _ => Err(Error::NotBool(span)), + }?; + + block.extend(emitter.finish(&ctx.function.expressions)); + + return Ok(()); + } ast::StatementKind::Ignore(expr) => { let mut emitter = Emitter::default(); emitter.start(&ctx.function.expressions); @@ -1658,8 +1845,7 @@ impl<'source, 'temp> Lowerer<'source, 'temp> { return Ok(Typed::Plain(handle)); } ast::Expression::Ident(ast::IdentExpr::Local(local)) => { - let rctx = ctx.runtime_expression_ctx(span)?; - return Ok(rctx.local_table[&local]); + return ctx.local(&local, span); } ast::Expression::Ident(ast::IdentExpr::Unresolved(name)) => { let global = ctx @@ -2830,16 +3016,34 @@ impl<'source, 'temp> Lowerer<'source, 'temp> { ) -> Result, Error<'source>> { let inner = match ctx.types[handle] { ast::Type::Scalar(scalar) => scalar.to_inner_scalar(), - ast::Type::Vector { size, scalar } => scalar.to_inner_vector(size), + ast::Type::Vector { size, ty, ty_span } => { + let ty = self.resolve_ast_type(ty, ctx)?; + let scalar = match ctx.module.types[ty].inner { + crate::TypeInner::Scalar(sc) => sc, + _ => return Err(Error::UnknownScalarType(ty_span)), + }; + crate::TypeInner::Vector { size, scalar } + } ast::Type::Matrix { rows, columns, - width, - } => crate::TypeInner::Matrix { - columns, - rows, - scalar: crate::Scalar::float(width), - }, + ty, + ty_span, + } => { + let ty = self.resolve_ast_type(ty, ctx)?; + let scalar = match ctx.module.types[ty].inner { + crate::TypeInner::Scalar(sc) => sc, + _ => return Err(Error::UnknownScalarType(ty_span)), + }; + match scalar.kind { + crate::ScalarKind::Float => crate::TypeInner::Matrix { + columns, + rows, + scalar, + }, + _ => return Err(Error::BadMatrixScalarKind(ty_span, scalar)), + } + } ast::Type::Atomic(scalar) => scalar.to_inner_atomic(), ast::Type::Pointer { base, space } => { let base = self.resolve_ast_type(base, ctx)?; diff --git a/third_party/rust/naga/src/front/wgsl/parse/ast.rs b/third_party/rust/naga/src/front/wgsl/parse/ast.rs index 7df5c8a1c9789..c4a7984115409 100644 --- a/third_party/rust/naga/src/front/wgsl/parse/ast.rs +++ b/third_party/rust/naga/src/front/wgsl/parse/ast.rs @@ -85,6 +85,7 @@ pub enum GlobalDeclKind<'a> { Override(Override<'a>), Struct(Struct<'a>), Type(TypeAlias<'a>), + ConstAssert(Handle>), } #[derive(Debug)] @@ -109,7 +110,7 @@ pub struct EntryPoint<'a> { } #[cfg(doc)] -use crate::front::wgsl::lower::{RuntimeExpressionContext, StatementContext}; +use crate::front::wgsl::lower::{LocalExpressionContext, StatementContext}; #[derive(Debug)] pub struct Function<'a> { @@ -142,7 +143,7 @@ pub struct GlobalVariable<'a> { pub name: Ident<'a>, pub space: crate::AddressSpace, pub binding: Option>, - pub ty: Handle>, + pub ty: Option>>, pub init: Option>>, } @@ -198,12 +199,14 @@ pub enum Type<'a> { Scalar(Scalar), Vector { size: crate::VectorSize, - scalar: Scalar, + ty: Handle>, + ty_span: Span, }, Matrix { columns: crate::VectorSize, rows: crate::VectorSize, - width: crate::Bytes, + ty: Handle>, + ty_span: Span, }, Atomic(Scalar), Pointer { @@ -282,6 +285,7 @@ pub enum StatementKind<'a> { Increment(Handle>), Decrement(Handle>), Ignore(Handle>), + ConstAssert(Handle>), } #[derive(Debug)] @@ -330,7 +334,8 @@ pub enum ConstructorType<'a> { /// `vec3(1.0)`. Vector { size: crate::VectorSize, - scalar: Scalar, + ty: Handle>, + ty_span: Span, }, /// A matrix construction whose component type is inferred from the @@ -345,7 +350,8 @@ pub enum ConstructorType<'a> { Matrix { columns: crate::VectorSize, rows: crate::VectorSize, - width: crate::Bytes, + ty: Handle>, + ty_span: Span, }, /// An array whose component type and size are inferred from the arguments: @@ -460,14 +466,23 @@ pub struct Let<'a> { pub handle: Handle, } +#[derive(Debug)] +pub struct LocalConst<'a> { + pub name: Ident<'a>, + pub ty: Option>>, + pub init: Handle>, + pub handle: Handle, +} + #[derive(Debug)] pub enum LocalDecl<'a> { Var(LocalVariable<'a>), Let(Let<'a>), + Const(LocalConst<'a>), } #[derive(Debug)] /// A placeholder for a local variable declaration. /// -/// See [`Function::locals`] for more information. +/// See [`super::ExpressionContext::locals`] for more information. pub struct Local; diff --git a/third_party/rust/naga/src/front/wgsl/parse/conv.rs b/third_party/rust/naga/src/front/wgsl/parse/conv.rs index 4718b85e5e7fc..3ba71b07ccc5c 100644 --- a/third_party/rust/naga/src/front/wgsl/parse/conv.rs +++ b/third_party/rust/naga/src/front/wgsl/parse/conv.rs @@ -58,6 +58,8 @@ pub fn map_sampling(word: &str, span: Span) -> Result "center" => Ok(crate::Sampling::Center), "centroid" => Ok(crate::Sampling::Centroid), "sample" => Ok(crate::Sampling::Sample), + "first" => Ok(crate::Sampling::First), + "either" => Ok(crate::Sampling::Either), _ => Err(Error::UnknownAttribute(span)), } } @@ -92,7 +94,7 @@ pub fn map_storage_format(word: &str, span: Span) -> Result Sf::Rgba8Sint, "rgb10a2uint" => Sf::Rgb10a2Uint, "rgb10a2unorm" => Sf::Rgb10a2Unorm, - "rg11b10float" => Sf::Rg11b10UFloat, + "rg11b10float" => Sf::Rg11b10Ufloat, "rg32uint" => Sf::Rg32Uint, "rg32sint" => Sf::Rg32Sint, "rg32float" => Sf::Rg32Float, diff --git a/third_party/rust/naga/src/front/wgsl/parse/mod.rs b/third_party/rust/naga/src/front/wgsl/parse/mod.rs index c9114d685ddb1..0157aa3a781a5 100644 --- a/third_party/rust/naga/src/front/wgsl/parse/mod.rs +++ b/third_party/rust/naga/src/front/wgsl/parse/mod.rs @@ -31,20 +31,20 @@ struct ExpressionContext<'input, 'temp, 'out> { /// A map from identifiers in scope to the locals/arguments they represent. /// - /// The handles refer to the [`Function::locals`] area; see that field's + /// The handles refer to the [`locals`] arena; see that field's /// documentation for details. /// - /// [`Function::locals`]: ast::Function::locals + /// [`locals`]: ExpressionContext::locals local_table: &'temp mut SymbolTable<&'input str, Handle>, /// Local variable and function argument arena for the function we're building. /// - /// Note that the `Local` here is actually a zero-sized type. The AST keeps - /// all the detailed information about locals - names, types, etc. - in - /// [`LocalDecl`] statements. For arguments, that information is kept in - /// [`arguments`]. This `Arena`'s only role is to assign a unique `Handle` - /// to each of them, and track their definitions' spans for use in - /// diagnostics. + /// Note that the [`ast::Local`] here is actually a zero-sized type. This + /// `Arena`'s only role is to assign a unique `Handle` to each local + /// identifier, and track its definition's span for use in diagnostics. All + /// the detailed information about locals - names, types, etc. - is kept in + /// the [`LocalDecl`] statements we parsed from their declarations. For + /// arguments, that information is kept in [`arguments`]. /// /// In the AST, when an [`Ident`] expression refers to a local variable or /// argument, its [`IdentExpr`] holds the referent's `Handle` in this @@ -53,14 +53,15 @@ struct ExpressionContext<'input, 'temp, 'out> { /// During lowering, [`LocalDecl`] statements add entries to a per-function /// table that maps `Handle` values to their Naga representations, /// accessed via [`StatementContext::local_table`] and - /// [`RuntimeExpressionContext::local_table`]. This table is then consulted when + /// [`LocalExpressionContext::local_table`]. This table is then consulted when /// lowering subsequent [`Ident`] expressions. /// - /// [`LocalDecl`]: StatementKind::LocalDecl - /// [`arguments`]: Function::arguments - /// [`Ident`]: Expression::Ident - /// [`StatementContext::local_table`]: StatementContext::local_table - /// [`RuntimeExpressionContext::local_table`]: RuntimeExpressionContext::local_table + /// [`LocalDecl`]: ast::StatementKind::LocalDecl + /// [`arguments`]: ast::Function::arguments + /// [`Ident`]: ast::Expression::Ident + /// [`IdentExpr`]: ast::IdentExpr + /// [`StatementContext::local_table`]: super::lower::StatementContext::local_table + /// [`LocalExpressionContext::local_table`]: super::lower::LocalExpressionContext::local_table locals: &'out mut Arena, /// Identifiers used by the current global declaration that have no local definition. @@ -111,6 +112,11 @@ impl<'a> ExpressionContext<'a, '_, '_> { Ok(handle) } } + + fn new_scalar(&mut self, scalar: Scalar) -> Handle> { + self.types + .append(ast::Type::Scalar(scalar), Span::UNDEFINED) + } } /// Which grammar rule we are in the midst of parsing. @@ -310,25 +316,22 @@ impl Parser { "vec2i" => { return Ok(Some(ast::ConstructorType::Vector { size: crate::VectorSize::Bi, - scalar: Scalar { - kind: crate::ScalarKind::Sint, - width: 4, - }, + ty: ctx.new_scalar(Scalar::I32), + ty_span: Span::UNDEFINED, })) } "vec2u" => { return Ok(Some(ast::ConstructorType::Vector { size: crate::VectorSize::Bi, - scalar: Scalar { - kind: crate::ScalarKind::Uint, - width: 4, - }, + ty: ctx.new_scalar(Scalar::U32), + ty_span: Span::UNDEFINED, })) } "vec2f" => { return Ok(Some(ast::ConstructorType::Vector { size: crate::VectorSize::Bi, - scalar: Scalar::F32, + ty: ctx.new_scalar(Scalar::F32), + ty_span: Span::UNDEFINED, })) } "vec3" => ast::ConstructorType::PartialVector { @@ -337,19 +340,22 @@ impl Parser { "vec3i" => { return Ok(Some(ast::ConstructorType::Vector { size: crate::VectorSize::Tri, - scalar: Scalar::I32, + ty: ctx.new_scalar(Scalar::I32), + ty_span: Span::UNDEFINED, })) } "vec3u" => { return Ok(Some(ast::ConstructorType::Vector { size: crate::VectorSize::Tri, - scalar: Scalar::U32, + ty: ctx.new_scalar(Scalar::U32), + ty_span: Span::UNDEFINED, })) } "vec3f" => { return Ok(Some(ast::ConstructorType::Vector { size: crate::VectorSize::Tri, - scalar: Scalar::F32, + ty: ctx.new_scalar(Scalar::F32), + ty_span: Span::UNDEFINED, })) } "vec4" => ast::ConstructorType::PartialVector { @@ -358,19 +364,22 @@ impl Parser { "vec4i" => { return Ok(Some(ast::ConstructorType::Vector { size: crate::VectorSize::Quad, - scalar: Scalar::I32, + ty: ctx.new_scalar(Scalar::I32), + ty_span: Span::UNDEFINED, })) } "vec4u" => { return Ok(Some(ast::ConstructorType::Vector { size: crate::VectorSize::Quad, - scalar: Scalar::U32, + ty: ctx.new_scalar(Scalar::U32), + ty_span: Span::UNDEFINED, })) } "vec4f" => { return Ok(Some(ast::ConstructorType::Vector { size: crate::VectorSize::Quad, - scalar: Scalar::F32, + ty: ctx.new_scalar(Scalar::F32), + ty_span: Span::UNDEFINED, })) } "mat2x2" => ast::ConstructorType::PartialMatrix { @@ -381,7 +390,8 @@ impl Parser { return Ok(Some(ast::ConstructorType::Matrix { columns: crate::VectorSize::Bi, rows: crate::VectorSize::Bi, - width: 4, + ty: ctx.new_scalar(Scalar::F32), + ty_span: Span::UNDEFINED, })) } "mat2x3" => ast::ConstructorType::PartialMatrix { @@ -392,7 +402,8 @@ impl Parser { return Ok(Some(ast::ConstructorType::Matrix { columns: crate::VectorSize::Bi, rows: crate::VectorSize::Tri, - width: 4, + ty: ctx.new_scalar(Scalar::F32), + ty_span: Span::UNDEFINED, })) } "mat2x4" => ast::ConstructorType::PartialMatrix { @@ -403,7 +414,8 @@ impl Parser { return Ok(Some(ast::ConstructorType::Matrix { columns: crate::VectorSize::Bi, rows: crate::VectorSize::Quad, - width: 4, + ty: ctx.new_scalar(Scalar::F32), + ty_span: Span::UNDEFINED, })) } "mat3x2" => ast::ConstructorType::PartialMatrix { @@ -414,7 +426,8 @@ impl Parser { return Ok(Some(ast::ConstructorType::Matrix { columns: crate::VectorSize::Tri, rows: crate::VectorSize::Bi, - width: 4, + ty: ctx.new_scalar(Scalar::F32), + ty_span: Span::UNDEFINED, })) } "mat3x3" => ast::ConstructorType::PartialMatrix { @@ -425,7 +438,8 @@ impl Parser { return Ok(Some(ast::ConstructorType::Matrix { columns: crate::VectorSize::Tri, rows: crate::VectorSize::Tri, - width: 4, + ty: ctx.new_scalar(Scalar::F32), + ty_span: Span::UNDEFINED, })) } "mat3x4" => ast::ConstructorType::PartialMatrix { @@ -436,7 +450,8 @@ impl Parser { return Ok(Some(ast::ConstructorType::Matrix { columns: crate::VectorSize::Tri, rows: crate::VectorSize::Quad, - width: 4, + ty: ctx.new_scalar(Scalar::F32), + ty_span: Span::UNDEFINED, })) } "mat4x2" => ast::ConstructorType::PartialMatrix { @@ -447,7 +462,8 @@ impl Parser { return Ok(Some(ast::ConstructorType::Matrix { columns: crate::VectorSize::Quad, rows: crate::VectorSize::Bi, - width: 4, + ty: ctx.new_scalar(Scalar::F32), + ty_span: Span::UNDEFINED, })) } "mat4x3" => ast::ConstructorType::PartialMatrix { @@ -458,7 +474,8 @@ impl Parser { return Ok(Some(ast::ConstructorType::Matrix { columns: crate::VectorSize::Quad, rows: crate::VectorSize::Tri, - width: 4, + ty: ctx.new_scalar(Scalar::F32), + ty_span: Span::UNDEFINED, })) } "mat4x4" => ast::ConstructorType::PartialMatrix { @@ -469,7 +486,8 @@ impl Parser { return Ok(Some(ast::ConstructorType::Matrix { columns: crate::VectorSize::Quad, rows: crate::VectorSize::Quad, - width: 4, + ty: ctx.new_scalar(Scalar::F32), + ty_span: Span::UNDEFINED, })) } "array" => ast::ConstructorType::PartialArray, @@ -502,19 +520,17 @@ impl Parser { // parse component type if present match (lexer.peek().0, partial) { (Token::Paren('<'), ast::ConstructorType::PartialVector { size }) => { - let scalar = lexer.next_scalar_generic()?; - Ok(Some(ast::ConstructorType::Vector { size, scalar })) + let (ty, ty_span) = self.singular_generic(lexer, ctx)?; + Ok(Some(ast::ConstructorType::Vector { size, ty, ty_span })) } (Token::Paren('<'), ast::ConstructorType::PartialMatrix { columns, rows }) => { - let (scalar, span) = lexer.next_scalar_generic_with_span()?; - match scalar.kind { - crate::ScalarKind::Float => Ok(Some(ast::ConstructorType::Matrix { - columns, - rows, - width: scalar.width, - })), - _ => Err(Error::BadMatrixScalarKind(span, scalar)), - } + let (ty, ty_span) = self.singular_generic(lexer, ctx)?; + Ok(Some(ast::ConstructorType::Matrix { + columns, + rows, + ty, + ty_span, + })) } (Token::Paren('<'), ast::ConstructorType::PartialArray) => { lexer.expect_generic_paren('<')?; @@ -570,11 +586,7 @@ impl Parser { let expr = match name { // bitcast looks like a function call, but it's an operator and must be handled differently. "bitcast" => { - lexer.expect_generic_paren('<')?; - let start = lexer.start_byte_offset(); - let to = self.type_decl(lexer, ctx)?; - let span = lexer.span_from(start); - lexer.expect_generic_paren('>')?; + let (to, span) = self.singular_generic(lexer, ctx)?; lexer.open_arguments()?; let expr = self.general_expression(lexer, ctx)?; @@ -980,8 +992,12 @@ impl Parser { lexer.expect(Token::Paren('>'))?; } let name = lexer.next_ident()?; - lexer.expect(Token::Separator(':'))?; - let ty = self.type_decl(lexer, ctx)?; + + let ty = if lexer.skip(Token::Separator(':')) { + Some(self.type_decl(lexer, ctx)?) + } else { + None + }; let init = if lexer.skip(Token::Operation('=')) { let handle = self.general_expression(lexer, ctx)?; @@ -1058,21 +1074,34 @@ impl Parser { Ok(members) } - fn matrix_scalar_type<'a>( + /// Parses ``, returning T and span of T + fn singular_generic<'a>( + &mut self, + lexer: &mut Lexer<'a>, + ctx: &mut ExpressionContext<'a, '_, '_>, + ) -> Result<(Handle>, Span), Error<'a>> { + lexer.expect_generic_paren('<')?; + let start = lexer.start_byte_offset(); + let ty = self.type_decl(lexer, ctx)?; + let span = lexer.span_from(start); + lexer.expect_generic_paren('>')?; + Ok((ty, span)) + } + + fn matrix_with_type<'a>( &mut self, lexer: &mut Lexer<'a>, + ctx: &mut ExpressionContext<'a, '_, '_>, columns: crate::VectorSize, rows: crate::VectorSize, ) -> Result, Error<'a>> { - let (scalar, span) = lexer.next_scalar_generic_with_span()?; - match scalar.kind { - crate::ScalarKind::Float => Ok(ast::Type::Matrix { - columns, - rows, - width: scalar.width, - }), - _ => Err(Error::BadMatrixScalarKind(span, scalar)), - } + let (ty, ty_span) = self.singular_generic(lexer, ctx)?; + Ok(ast::Type::Matrix { + columns, + rows, + ty, + ty_span, + }) } fn type_decl_impl<'a>( @@ -1087,151 +1116,154 @@ impl Parser { Ok(Some(match word { "vec2" => { - let scalar = lexer.next_scalar_generic()?; + let (ty, ty_span) = self.singular_generic(lexer, ctx)?; ast::Type::Vector { size: crate::VectorSize::Bi, - scalar, + ty, + ty_span, } } "vec2i" => ast::Type::Vector { size: crate::VectorSize::Bi, - scalar: Scalar { - kind: crate::ScalarKind::Sint, - width: 4, - }, + ty: ctx.new_scalar(Scalar::I32), + ty_span: Span::UNDEFINED, }, "vec2u" => ast::Type::Vector { size: crate::VectorSize::Bi, - scalar: Scalar { - kind: crate::ScalarKind::Uint, - width: 4, - }, + ty: ctx.new_scalar(Scalar::U32), + ty_span: Span::UNDEFINED, }, "vec2f" => ast::Type::Vector { size: crate::VectorSize::Bi, - scalar: Scalar::F32, + ty: ctx.new_scalar(Scalar::F32), + ty_span: Span::UNDEFINED, }, "vec3" => { - let scalar = lexer.next_scalar_generic()?; + let (ty, ty_span) = self.singular_generic(lexer, ctx)?; ast::Type::Vector { size: crate::VectorSize::Tri, - scalar, + ty, + ty_span, } } "vec3i" => ast::Type::Vector { size: crate::VectorSize::Tri, - scalar: Scalar { - kind: crate::ScalarKind::Sint, - width: 4, - }, + ty: ctx.new_scalar(Scalar::I32), + ty_span: Span::UNDEFINED, }, "vec3u" => ast::Type::Vector { size: crate::VectorSize::Tri, - scalar: Scalar { - kind: crate::ScalarKind::Uint, - width: 4, - }, + ty: ctx.new_scalar(Scalar::U32), + ty_span: Span::UNDEFINED, }, "vec3f" => ast::Type::Vector { size: crate::VectorSize::Tri, - scalar: Scalar::F32, + ty: ctx.new_scalar(Scalar::F32), + ty_span: Span::UNDEFINED, }, "vec4" => { - let scalar = lexer.next_scalar_generic()?; + let (ty, ty_span) = self.singular_generic(lexer, ctx)?; ast::Type::Vector { size: crate::VectorSize::Quad, - scalar, + ty, + ty_span, } } "vec4i" => ast::Type::Vector { size: crate::VectorSize::Quad, - scalar: Scalar { - kind: crate::ScalarKind::Sint, - width: 4, - }, + ty: ctx.new_scalar(Scalar::I32), + ty_span: Span::UNDEFINED, }, "vec4u" => ast::Type::Vector { size: crate::VectorSize::Quad, - scalar: Scalar { - kind: crate::ScalarKind::Uint, - width: 4, - }, + ty: ctx.new_scalar(Scalar::U32), + ty_span: Span::UNDEFINED, }, "vec4f" => ast::Type::Vector { size: crate::VectorSize::Quad, - scalar: Scalar::F32, + ty: ctx.new_scalar(Scalar::F32), + ty_span: Span::UNDEFINED, }, "mat2x2" => { - self.matrix_scalar_type(lexer, crate::VectorSize::Bi, crate::VectorSize::Bi)? + self.matrix_with_type(lexer, ctx, crate::VectorSize::Bi, crate::VectorSize::Bi)? } "mat2x2f" => ast::Type::Matrix { columns: crate::VectorSize::Bi, rows: crate::VectorSize::Bi, - width: 4, + ty: ctx.new_scalar(Scalar::F32), + ty_span: Span::UNDEFINED, }, "mat2x3" => { - self.matrix_scalar_type(lexer, crate::VectorSize::Bi, crate::VectorSize::Tri)? + self.matrix_with_type(lexer, ctx, crate::VectorSize::Bi, crate::VectorSize::Tri)? } "mat2x3f" => ast::Type::Matrix { columns: crate::VectorSize::Bi, rows: crate::VectorSize::Tri, - width: 4, + ty: ctx.new_scalar(Scalar::F32), + ty_span: Span::UNDEFINED, }, "mat2x4" => { - self.matrix_scalar_type(lexer, crate::VectorSize::Bi, crate::VectorSize::Quad)? + self.matrix_with_type(lexer, ctx, crate::VectorSize::Bi, crate::VectorSize::Quad)? } "mat2x4f" => ast::Type::Matrix { columns: crate::VectorSize::Bi, rows: crate::VectorSize::Quad, - width: 4, + ty: ctx.new_scalar(Scalar::F32), + ty_span: Span::UNDEFINED, }, "mat3x2" => { - self.matrix_scalar_type(lexer, crate::VectorSize::Tri, crate::VectorSize::Bi)? + self.matrix_with_type(lexer, ctx, crate::VectorSize::Tri, crate::VectorSize::Bi)? } "mat3x2f" => ast::Type::Matrix { columns: crate::VectorSize::Tri, rows: crate::VectorSize::Bi, - width: 4, + ty: ctx.new_scalar(Scalar::F32), + ty_span: Span::UNDEFINED, }, "mat3x3" => { - self.matrix_scalar_type(lexer, crate::VectorSize::Tri, crate::VectorSize::Tri)? + self.matrix_with_type(lexer, ctx, crate::VectorSize::Tri, crate::VectorSize::Tri)? } "mat3x3f" => ast::Type::Matrix { columns: crate::VectorSize::Tri, rows: crate::VectorSize::Tri, - width: 4, + ty: ctx.new_scalar(Scalar::F32), + ty_span: Span::UNDEFINED, }, "mat3x4" => { - self.matrix_scalar_type(lexer, crate::VectorSize::Tri, crate::VectorSize::Quad)? + self.matrix_with_type(lexer, ctx, crate::VectorSize::Tri, crate::VectorSize::Quad)? } "mat3x4f" => ast::Type::Matrix { columns: crate::VectorSize::Tri, rows: crate::VectorSize::Quad, - width: 4, + ty: ctx.new_scalar(Scalar::F32), + ty_span: Span::UNDEFINED, }, "mat4x2" => { - self.matrix_scalar_type(lexer, crate::VectorSize::Quad, crate::VectorSize::Bi)? + self.matrix_with_type(lexer, ctx, crate::VectorSize::Quad, crate::VectorSize::Bi)? } "mat4x2f" => ast::Type::Matrix { columns: crate::VectorSize::Quad, rows: crate::VectorSize::Bi, - width: 4, + ty: ctx.new_scalar(Scalar::F32), + ty_span: Span::UNDEFINED, }, "mat4x3" => { - self.matrix_scalar_type(lexer, crate::VectorSize::Quad, crate::VectorSize::Tri)? + self.matrix_with_type(lexer, ctx, crate::VectorSize::Quad, crate::VectorSize::Tri)? } "mat4x3f" => ast::Type::Matrix { columns: crate::VectorSize::Quad, rows: crate::VectorSize::Tri, - width: 4, + ty: ctx.new_scalar(Scalar::F32), + ty_span: Span::UNDEFINED, }, "mat4x4" => { - self.matrix_scalar_type(lexer, crate::VectorSize::Quad, crate::VectorSize::Quad)? + self.matrix_with_type(lexer, ctx, crate::VectorSize::Quad, crate::VectorSize::Quad)? } "mat4x4f" => ast::Type::Matrix { columns: crate::VectorSize::Quad, rows: crate::VectorSize::Quad, - width: 4, + ty: ctx.new_scalar(Scalar::F32), + ty_span: Span::UNDEFINED, }, "atomic" => { let scalar = lexer.next_scalar_generic()?; @@ -1688,6 +1720,28 @@ impl Parser { handle, })) } + "const" => { + let _ = lexer.next(); + let name = lexer.next_ident()?; + + let given_ty = if lexer.skip(Token::Separator(':')) { + let ty = self.type_decl(lexer, ctx)?; + Some(ty) + } else { + None + }; + lexer.expect(Token::Operation('='))?; + let expr_id = self.general_expression(lexer, ctx)?; + lexer.expect(Token::Separator(';'))?; + + let handle = ctx.declare_local(name)?; + ast::StatementKind::LocalDecl(ast::LocalDecl::Const(ast::LocalConst { + name, + ty: given_ty, + init: expr_id, + handle, + })) + } "var" => { let _ = lexer.next(); @@ -1963,6 +2017,20 @@ impl Parser { lexer.expect(Token::Separator(';'))?; ast::StatementKind::Kill } + // https://www.w3.org/TR/WGSL/#const-assert-statement + "const_assert" => { + let _ = lexer.next(); + // parentheses are optional + let paren = lexer.skip(Token::Paren('(')); + + let condition = self.general_expression(lexer, ctx)?; + + if paren { + lexer.expect(Token::Paren(')'))?; + } + lexer.expect(Token::Separator(';'))?; + ast::StatementKind::ConstAssert(condition) + } // assignment or a function call _ => { self.function_call_or_assignment_statement(lexer, ctx, block)?; @@ -2366,6 +2434,18 @@ impl Parser { ..function })) } + (Token::Word("const_assert"), _) => { + // parentheses are optional + let paren = lexer.skip(Token::Paren('(')); + + let condition = self.general_expression(lexer, &mut ctx)?; + + if paren { + lexer.expect(Token::Paren(')'))?; + } + lexer.expect(Token::Separator(';'))?; + Some(ast::GlobalDeclKind::ConstAssert(condition)) + } (Token::End, _) => return Ok(()), other => return Err(Error::Unexpected(other.1, ExpectedToken::GlobalItem)), }; diff --git a/third_party/rust/naga/src/front/wgsl/to_wgsl.rs b/third_party/rust/naga/src/front/wgsl/to_wgsl.rs index ec3af8edd4eb7..0884e0003bc0c 100644 --- a/third_party/rust/naga/src/front/wgsl/to_wgsl.rs +++ b/third_party/rust/naga/src/front/wgsl/to_wgsl.rs @@ -175,7 +175,7 @@ impl crate::StorageFormat { Sf::Bgra8Unorm => "bgra8unorm", Sf::Rgb10a2Uint => "rgb10a2uint", Sf::Rgb10a2Unorm => "rgb10a2unorm", - Sf::Rg11b10UFloat => "rg11b10float", + Sf::Rg11b10Ufloat => "rg11b10float", Sf::Rg32Uint => "rg32uint", Sf::Rg32Sint => "rg32sint", Sf::Rg32Float => "rg32float", diff --git a/third_party/rust/naga/src/lib.rs b/third_party/rust/naga/src/lib.rs index 60e5a1f47b1ce..85fd7a450834a 100644 --- a/third_party/rust/naga/src/lib.rs +++ b/third_party/rust/naga/src/lib.rs @@ -530,6 +530,13 @@ pub enum Sampling { /// Interpolate the value at each sample location. In multisampling, invoke /// the fragment shader once per sample. Sample, + + /// Use the value provided by the first vertex of the current primitive. + First, + + /// Use the value provided by the first or last vertex of the current primitive. The exact + /// choice is implementation-dependent. + Either, } /// Member of a user-defined structure. @@ -615,7 +622,7 @@ pub enum StorageFormat { // Packed 32-bit formats Rgb10a2Uint, Rgb10a2Unorm, - Rg11b10UFloat, + Rg11b10Ufloat, // 64-bit formats Rg32Uint, diff --git a/third_party/rust/naga/src/proc/constant_evaluator.rs b/third_party/rust/naga/src/proc/constant_evaluator.rs index deaa9c93c7e76..1b7f5cf9104ec 100644 --- a/third_party/rust/naga/src/proc/constant_evaluator.rs +++ b/third_party/rust/naga/src/proc/constant_evaluator.rs @@ -317,7 +317,7 @@ pub struct ConstantEvaluator<'a> { #[derive(Debug)] enum WgslRestrictions<'a> { /// - const-expressions will be evaluated and inserted in the arena - Const, + Const(Option>), /// - const-expressions will be evaluated and inserted in the arena /// - override-expressions will be inserted in the arena Override, @@ -347,6 +347,8 @@ struct FunctionLocalData<'a> { #[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Clone, Copy)] pub enum ExpressionKind { + /// If const is also implemented as const + ImplConst, Const, Override, Runtime, @@ -372,14 +374,23 @@ impl ExpressionKindTracker { pub fn insert(&mut self, value: Handle, expr_type: ExpressionKind) { self.inner.insert(value, expr_type); } + pub fn is_const(&self, h: Handle) -> bool { - matches!(self.type_of(h), ExpressionKind::Const) + matches!( + self.type_of(h), + ExpressionKind::Const | ExpressionKind::ImplConst + ) + } + + /// Returns `true` if naga can also evaluate expression as const + pub fn is_impl_const(&self, h: Handle) -> bool { + matches!(self.type_of(h), ExpressionKind::ImplConst) } pub fn is_const_or_override(&self, h: Handle) -> bool { matches!( self.type_of(h), - ExpressionKind::Const | ExpressionKind::Override + ExpressionKind::Const | ExpressionKind::Override | ExpressionKind::ImplConst ) } @@ -400,13 +411,14 @@ impl ExpressionKindTracker { } fn type_of_with_expr(&self, expr: &Expression) -> ExpressionKind { + use crate::MathFunction as Mf; match *expr { Expression::Literal(_) | Expression::ZeroValue(_) | Expression::Constant(_) => { - ExpressionKind::Const + ExpressionKind::ImplConst } Expression::Override(_) => ExpressionKind::Override, Expression::Compose { ref components, .. } => { - let mut expr_type = ExpressionKind::Const; + let mut expr_type = ExpressionKind::ImplConst; for component in components { expr_type = expr_type.max(self.type_of(*component)) } @@ -417,13 +429,16 @@ impl ExpressionKindTracker { Expression::Access { base, index } => self.type_of(base).max(self.type_of(index)), Expression::Swizzle { vector, .. } => self.type_of(vector), Expression::Unary { expr, .. } => self.type_of(expr), - Expression::Binary { left, right, .. } => self.type_of(left).max(self.type_of(right)), + Expression::Binary { left, right, .. } => self + .type_of(left) + .max(self.type_of(right)) + .max(ExpressionKind::Const), Expression::Math { + fun, arg, arg1, arg2, arg3, - .. } => self .type_of(arg) .max( @@ -437,8 +452,34 @@ impl ExpressionKindTracker { .max( arg3.map(|arg| self.type_of(arg)) .unwrap_or(ExpressionKind::Const), + ) + .max( + if matches!( + fun, + Mf::Dot + | Mf::Outer + | Mf::Cross + | Mf::Distance + | Mf::Length + | Mf::Normalize + | Mf::FaceForward + | Mf::Reflect + | Mf::Refract + | Mf::Ldexp + | Mf::Modf + | Mf::Mix + | Mf::Frexp + ) { + ExpressionKind::Const + } else { + ExpressionKind::ImplConst + }, ), - Expression::As { expr, .. } => self.type_of(expr), + Expression::As { convert, expr, .. } => self.type_of(expr).max(if convert.is_some() { + ExpressionKind::ImplConst + } else { + ExpressionKind::Const + }), Expression::Select { condition, accept, @@ -446,7 +487,8 @@ impl ExpressionKindTracker { } => self .type_of(condition) .max(self.type_of(accept)) - .max(self.type_of(reject)), + .max(self.type_of(reject)) + .max(ExpressionKind::Const), Expression::Relational { argument, .. } => self.type_of(argument), Expression::ArrayLength(expr) => self.type_of(expr), _ => ExpressionKind::Runtime, @@ -556,7 +598,7 @@ impl<'a> ConstantEvaluator<'a> { Behavior::Wgsl(if in_override_ctx { WgslRestrictions::Override } else { - WgslRestrictions::Const + WgslRestrictions::Const(None) }), module, global_expression_kind_tracker, @@ -603,13 +645,19 @@ impl<'a> ConstantEvaluator<'a> { local_expression_kind_tracker: &'a mut ExpressionKindTracker, emitter: &'a mut super::Emitter, block: &'a mut crate::Block, + is_const: bool, ) -> Self { + let local_data = FunctionLocalData { + global_expressions: &module.global_expressions, + emitter, + block, + }; Self { - behavior: Behavior::Wgsl(WgslRestrictions::Runtime(FunctionLocalData { - global_expressions: &module.global_expressions, - emitter, - block, - })), + behavior: Behavior::Wgsl(if is_const { + WgslRestrictions::Const(Some(local_data)) + } else { + WgslRestrictions::Runtime(local_data) + }), types: &mut module.types, constants: &module.constants, overrides: &module.overrides, @@ -718,6 +766,7 @@ impl<'a> ConstantEvaluator<'a> { span: Span, ) -> Result, ConstantEvaluatorError> { match self.expression_kind_tracker.type_of_with_expr(&expr) { + ExpressionKind::ImplConst => self.try_eval_and_append_impl(&expr, span), ExpressionKind::Const => { let eval_result = self.try_eval_and_append_impl(&expr, span); // We should be able to evaluate `Const` expressions at this @@ -740,7 +789,7 @@ impl<'a> ConstantEvaluator<'a> { Behavior::Wgsl(WgslRestrictions::Override | WgslRestrictions::Runtime(_)) => { Ok(self.append_expr(expr, span, ExpressionKind::Override)) } - Behavior::Wgsl(WgslRestrictions::Const) => { + Behavior::Wgsl(WgslRestrictions::Const(_)) => { Err(ConstantEvaluatorError::OverrideExpr) } Behavior::Glsl(_) => { @@ -761,14 +810,17 @@ impl<'a> ConstantEvaluator<'a> { const fn is_global_arena(&self) -> bool { matches!( self.behavior, - Behavior::Wgsl(WgslRestrictions::Const | WgslRestrictions::Override) + Behavior::Wgsl(WgslRestrictions::Const(None) | WgslRestrictions::Override) | Behavior::Glsl(GlslRestrictions::Const) ) } const fn function_local_data(&self) -> Option<&FunctionLocalData<'a>> { match self.behavior { - Behavior::Wgsl(WgslRestrictions::Runtime(ref function_local_data)) + Behavior::Wgsl( + WgslRestrictions::Runtime(ref function_local_data) + | WgslRestrictions::Const(Some(ref function_local_data)), + ) | Behavior::Glsl(GlslRestrictions::Runtime(ref function_local_data)) => { Some(function_local_data) } @@ -1779,9 +1831,13 @@ impl<'a> ConstantEvaluator<'a> { _ => return Err(ConstantEvaluatorError::InvalidBinaryOpArgs), }), (Literal::I32(a), Literal::U32(b)) => Literal::I32(match op { - BinaryOperator::ShiftLeft => a - .checked_shl(b) - .ok_or(ConstantEvaluatorError::ShiftedMoreThan32Bits)?, + BinaryOperator::ShiftLeft => { + if (if a.is_negative() { !a } else { a }).leading_zeros() <= b { + return Err(ConstantEvaluatorError::Overflow("<<".to_string())); + } + a.checked_shl(b) + .ok_or(ConstantEvaluatorError::ShiftedMoreThan32Bits)? + } BinaryOperator::ShiftRight => a .checked_shr(b) .ok_or(ConstantEvaluatorError::ShiftedMoreThan32Bits)?, @@ -1807,8 +1863,11 @@ impl<'a> ConstantEvaluator<'a> { BinaryOperator::ExclusiveOr => a ^ b, BinaryOperator::InclusiveOr => a | b, BinaryOperator::ShiftLeft => a - .checked_shl(b) - .ok_or(ConstantEvaluatorError::ShiftedMoreThan32Bits)?, + .checked_mul( + 1u32.checked_shl(b) + .ok_or(ConstantEvaluatorError::ShiftedMoreThan32Bits)?, + ) + .ok_or(ConstantEvaluatorError::Overflow("<<".to_string()))?, BinaryOperator::ShiftRight => a .checked_shr(b) .ok_or(ConstantEvaluatorError::ShiftedMoreThan32Bits)?, @@ -2057,7 +2116,10 @@ impl<'a> ConstantEvaluator<'a> { expr_type: ExpressionKind, ) -> Handle { let h = match self.behavior { - Behavior::Wgsl(WgslRestrictions::Runtime(ref mut function_local_data)) + Behavior::Wgsl( + WgslRestrictions::Runtime(ref mut function_local_data) + | WgslRestrictions::Const(Some(ref mut function_local_data)), + ) | Behavior::Glsl(GlslRestrictions::Runtime(ref mut function_local_data)) => { let is_running = function_local_data.emitter.is_running(); let needs_pre_emit = expr.needs_pre_emit(); @@ -2480,7 +2542,7 @@ mod tests { let expression_kind_tracker = &mut ExpressionKindTracker::from_arena(&global_expressions); let mut solver = ConstantEvaluator { - behavior: Behavior::Wgsl(WgslRestrictions::Const), + behavior: Behavior::Wgsl(WgslRestrictions::Const(None)), types: &mut types, constants: &constants, overrides: &overrides, @@ -2566,7 +2628,7 @@ mod tests { let expression_kind_tracker = &mut ExpressionKindTracker::from_arena(&global_expressions); let mut solver = ConstantEvaluator { - behavior: Behavior::Wgsl(WgslRestrictions::Const), + behavior: Behavior::Wgsl(WgslRestrictions::Const(None)), types: &mut types, constants: &constants, overrides: &overrides, @@ -2684,7 +2746,7 @@ mod tests { let expression_kind_tracker = &mut ExpressionKindTracker::from_arena(&global_expressions); let mut solver = ConstantEvaluator { - behavior: Behavior::Wgsl(WgslRestrictions::Const), + behavior: Behavior::Wgsl(WgslRestrictions::Const(None)), types: &mut types, constants: &constants, overrides: &overrides, @@ -2777,7 +2839,7 @@ mod tests { let expression_kind_tracker = &mut ExpressionKindTracker::from_arena(&global_expressions); let mut solver = ConstantEvaluator { - behavior: Behavior::Wgsl(WgslRestrictions::Const), + behavior: Behavior::Wgsl(WgslRestrictions::Const(None)), types: &mut types, constants: &constants, overrides: &overrides, @@ -2859,7 +2921,7 @@ mod tests { let expression_kind_tracker = &mut ExpressionKindTracker::from_arena(&global_expressions); let mut solver = ConstantEvaluator { - behavior: Behavior::Wgsl(WgslRestrictions::Const), + behavior: Behavior::Wgsl(WgslRestrictions::Const(None)), types: &mut types, constants: &constants, overrides: &overrides, diff --git a/third_party/rust/naga/src/proc/mod.rs b/third_party/rust/naga/src/proc/mod.rs index 642c0166159e8..a5b3ea4e381d4 100644 --- a/third_party/rust/naga/src/proc/mod.rs +++ b/third_party/rust/naga/src/proc/mod.rs @@ -48,7 +48,7 @@ impl From for super::ScalarKind { Sf::Bgra8Unorm => Sk::Float, Sf::Rgb10a2Uint => Sk::Uint, Sf::Rgb10a2Unorm => Sk::Float, - Sf::Rg11b10UFloat => Sk::Float, + Sf::Rg11b10Ufloat => Sk::Float, Sf::Rg32Uint => Sk::Uint, Sf::Rg32Sint => Sk::Sint, Sf::Rg32Float => Sk::Float, @@ -674,6 +674,19 @@ impl GlobalCtx<'_> { } } + /// Try to evaluate the expression in the `arena` using its `handle` and return it as a `bool`. + #[allow(dead_code)] + pub(super) fn eval_expr_to_bool_from( + &self, + handle: crate::Handle, + arena: &crate::Arena, + ) -> Option { + match self.eval_expr_to_literal_from(handle, arena) { + Some(crate::Literal::Bool(value)) => Some(value), + _ => None, + } + } + #[allow(dead_code)] pub(crate) fn eval_expr_to_literal( &self, diff --git a/third_party/rust/naga/src/valid/interface.rs b/third_party/rust/naga/src/valid/interface.rs index 7fce9c8fd9525..150a1f9df5e01 100644 --- a/third_party/rust/naga/src/valid/interface.rs +++ b/third_party/rust/naga/src/valid/interface.rs @@ -50,6 +50,11 @@ pub enum VaryingError { NotIOShareableType(Handle), #[error("Interpolation is not valid")] InvalidInterpolation, + #[error("Cannot combine {interpolation:?} interpolation with the {sampling:?} sample type")] + InvalidInterpolationSamplingCombination { + interpolation: crate::Interpolation, + sampling: crate::Sampling, + }, #[error("Interpolation must be specified on vertex shader outputs and fragment shader inputs")] MissingInterpolation, #[error("Built-in {0:?} is not available at this stage")] @@ -339,6 +344,31 @@ impl VaryingContext<'_> { } } + if let Some(interpolation) = interpolation { + let invalid_sampling = match (interpolation, sampling) { + (_, None) + | ( + crate::Interpolation::Perspective | crate::Interpolation::Linear, + Some( + crate::Sampling::Center + | crate::Sampling::Centroid + | crate::Sampling::Sample, + ), + ) + | ( + crate::Interpolation::Flat, + Some(crate::Sampling::First | crate::Sampling::Either), + ) => None, + (_, Some(invalid_sampling)) => Some(invalid_sampling), + }; + if let Some(sampling) = invalid_sampling { + return Err(VaryingError::InvalidInterpolationSamplingCombination { + interpolation, + sampling, + }); + } + } + let needs_interpolation = match self.stage { crate::ShaderStage::Vertex => self.output, crate::ShaderStage::Fragment => !self.output, diff --git a/third_party/rust/wgpu-core/.cargo-checksum.json b/third_party/rust/wgpu-core/.cargo-checksum.json index 06c9045cd274a..0555549c6ba37 100644 --- a/third_party/rust/wgpu-core/.cargo-checksum.json +++ b/third_party/rust/wgpu-core/.cargo-checksum.json @@ -1 +1 @@ -{"files":{"Cargo.toml":"a2dc0c9c9bf9f945ccb880978880a817b7632fe603c7c0b4908a28490c645f89","LICENSE.APACHE":"a6cba85bc92e0cff7a450b1d873c0eaa2e9fc96bf472df0247a26bec77bf3ff9","LICENSE.MIT":"c7fea58d1cfe49634cd92e54fc10a9d871f4b275321a4cd8c09e449122caaeb4","build.rs":"a99478d7f63fb41429e3834f4d0e5cd333f94ba1834c68295f929170e16987de","src/binding_model.rs":"6d3b0babc8a09218ef8cbb43f6a5e9d273ceb6fffc86e71fd2d6a6ad39b19655","src/command/allocator.rs":"882d780e5d58b7a60370b58fffd7f35ffd4c087cf2ff2536267fcd57071b5f68","src/command/bind.rs":"e6a50b50daf7d75a51498b37269d9fc2cb0174c7fb91a5292e68bfd531f13e65","src/command/bundle.rs":"1ee0f49da1f45e3514491842b36966de18606426b16838e6a52048b2491fc17d","src/command/clear.rs":"af1667085d1f83d70206db90f1dca9935a9aa7c2dda5945a7bf34695e4f73260","src/command/compute.rs":"3eecbca9f169ecbdacbc80ac6624868662d71f0f16e6cbceac5f9a9042a80044","src/command/compute_command.rs":"9ef10ab26d2a457ea1a53ac5af1c69905e1a39e5f0b14f9df427a86c03192bcf","src/command/draw.rs":"1f028fd72e17e3c48b5854ee635167198829d78b5ad1bea5130d8e9708a3921e","src/command/memory_init.rs":"fadd17881d35ea5b2918de48ae9d631e16d6c49aba744d334e3edee64c0e839d","src/command/mod.rs":"dcd79aca340054a3028e625849b7fdcea1545029a2579c9e347f1de38f659640","src/command/query.rs":"93c0285cebee0cd301fcd7541ba48fc56ad7c745dff6393271d97235a845d5da","src/command/render.rs":"f11ae17ef1d6e6a0e1636972a6a6b282cda9242c7543fc83d81785b4ba11c7b5","src/command/render_command.rs":"2becc9aea7a2fdfca79562ba0369ce83b59ed9e79fa923e73e1356f91410f088","src/command/timestamp_writes.rs":"38d3a48ec70043c0cbf4113a8751a3bf267c31c3c8328dcfd9d86bacbc800dc3","src/command/transfer.rs":"b3a971ae18d34cf2cc29af2c9c4db1b4a0bcbd98b1155c3dbb47ad071fd188b0","src/conv.rs":"9bbcd0660741a808d29788266bbac34af54c58e8db8ed856082f8d2d1dfb792d","src/device/bgl.rs":"1198f47a486d4cad4c03b49c98d690bac039a7b370cc241a5f279c6767e0b010","src/device/global.rs":"3b51c5c3f926e36f2099529acd0569309b6901936c967dfce4c776f94bef7f6b","src/device/life.rs":"5a6267bb36278c806905919cc016569bc13d1d7ab4afef10366890a32dd0fe79","src/device/mod.rs":"a2b1bae5c839d924e4555bd91aa29621d09ab9384ea718d014a301a275ad3658","src/device/queue.rs":"e04c4090ebdecce8a2e1287ef469425ceec79c7db4cc6463306be25fc92ea1e0","src/device/resource.rs":"fbf38af8fe49e119a41e76f48623473f2ae83a6250b3c38ebf8a3a173faa1861","src/device/trace.rs":"0e38395e0f41b763975d94d0b538ccbb926a3d784d4bfc78048884789c5b9d09","src/error.rs":"f509aa8a57c04e1aea9ba1b4a1589d007666018cd619db3f822c37082a76b871","src/global.rs":"601a8160b5b7afa44a633ced0e87e73cb8fa298c8ff3e29b6dfe5f50c83df04f","src/hal_api.rs":"29377c036d3eaaa05d83b87644a9a33916824eb51aa2d005704cde97e77925cb","src/hash_utils.rs":"e8d484027c7ce81978e4679a5e20af9416ab7d2fa595f1ca95992b29d625b0ca","src/hub.rs":"d357d4d52ca62211ab80e57c4781489add8ee52ff684a81aa616a6d35e444c1c","src/id.rs":"36f578a7f1c56d1ae8d5f6c7f2ab3594c26b69ca050b1fa727e2ab1dc2465c1e","src/identity.rs":"b27d4e06b1f6c25ca2f4ad276fdf8c09a5d71f890b06cbabef7ef62369cb4df6","src/init_tracker/buffer.rs":"85e7c942b0acc1b2f724bb63ca015389c8a9a853198c37108a162326a2b5e70b","src/init_tracker/mod.rs":"a0f64730cc025113b656b4690f9dcb0ec18b8770bc7ef24c7b4ad8bebae03d24","src/init_tracker/texture.rs":"8e69fa256bf7cf7ce7bcbe28991e12990bf199ddb359b7a2bd40eee950a0fea9","src/instance.rs":"bde36d69f73b055b58edcad8da07d98debbcf918863eaee42633f63678af58ac","src/lib.rs":"f23f4b83d75846ecbcecd894ca430d01c13b065193cbc74a0261599c0f7cc1ec","src/lock/mod.rs":"c58ae08a8e6108432d9e14e7d8b7bd15af7bb5e98bf700d6bd9a469b80a38b11","src/lock/observing.rs":"0ef28b269d2ecbd3c46e25e5921b657a2e3d35622c154b2850690dd4d117d47e","src/lock/rank.rs":"e6eebdb0fe0bf5e6ef7f8b522b1d83957643d244f84e1523a1e96f8f0a8d8126","src/lock/ranked.rs":"bafe6f295b4cade449cc41491c0dcd1497a6e04c5d902124040ac2056996a44d","src/lock/vanilla.rs":"a6cd5cc3c1900271783ba146adf07e28424f6fecc5466d54624dbda237770fb4","src/pipeline.rs":"1f530314b3c19d06cfdbcafb8627d8be7267584ce2ee4382590bf19fa7e90367","src/pipeline_cache.rs":"dbb6b93562a15c9b27acc006545449a6c033280433c1950806243fcfe0850c3e","src/pool.rs":"7899e74874da17c83ec39a88b360de12f564ef4bee2bb1240c37becdaeb57a86","src/present.rs":"4de7942bcee2fea830a7d7101bf59c2a1fb41b835a3963e9a56b8a0b843cbc02","src/registry.rs":"b9cd3ec0c1bda659f91e78c372ec0f58786caf24e5f2ff62f49ca5b7e07f097d","src/resource.rs":"6602249ed3ce9d04ffd7eb64aacf8741ace436665d50031772c54ba87f97ec99","src/snatch.rs":"c16e3d21547198b4b7d22a10d115376d2d6f5fa0a62d51ced2dd6a4f934d7ef7","src/storage.rs":"531eeda868cdfef205d48d0c9d3d7f3f0d22968491b6c4f15d0510b9faf86ff1","src/track/buffer.rs":"dc671db7ab4b8378af0e4a0f7dab181d86708922f942e3b01f7fc5d48507c509","src/track/metadata.rs":"44b3a8361780e8c8b2bd8910355e0ce89e2b04d921a97da4b1857a599bd6f5a4","src/track/mod.rs":"7e80349251c0874ed1d50847beee61e193ae706b9161405061e35c26d389c1f2","src/track/range.rs":"2a15794e79b0470d5ba6b3267173a42f34312878e1cb288f198d2854a7888e53","src/track/stateless.rs":"e404d58dbe2644bed4a9fcdc2c01a699322d669aaa4e6619750347519fb56625","src/track/texture.rs":"33959d21380105de4a2bafa70d323281263a2b21a5464bc5b239d46e8246e88a","src/validation.rs":"7709f256515812e7f0261dc4e3cd36f006e914084005affd795dda2c05c2ed50"},"package":null} \ No newline at end of file +{"files":{"Cargo.toml":"d2728ae8f45002282cafb4e751a240826714c5c9e5349084838e6419d3fff093","LICENSE.APACHE":"a6cba85bc92e0cff7a450b1d873c0eaa2e9fc96bf472df0247a26bec77bf3ff9","LICENSE.MIT":"c7fea58d1cfe49634cd92e54fc10a9d871f4b275321a4cd8c09e449122caaeb4","build.rs":"a99478d7f63fb41429e3834f4d0e5cd333f94ba1834c68295f929170e16987de","src/binding_model.rs":"3bb83ac752c61fe14984502d79947604e16d5105aacd8da3ab23ad8ce2c4fada","src/command/allocator.rs":"882d780e5d58b7a60370b58fffd7f35ffd4c087cf2ff2536267fcd57071b5f68","src/command/bind.rs":"e6a50b50daf7d75a51498b37269d9fc2cb0174c7fb91a5292e68bfd531f13e65","src/command/bundle.rs":"32651b8f5507158ed0920e016f0b83fafe3a96fa498ee203879dee200a40eb0f","src/command/clear.rs":"adaa0701e6d9ad3204ccc330246a71f7e4b110da7d186c0c926e92220cc84a70","src/command/compute.rs":"18f06a110c0ce050e70553dfcf82d9878b45d355869172deef3c6080a2d4c14c","src/command/compute_command.rs":"7befc7eb479c150f71510730c16e737bb390cbf5d2d58b0a44327ad4375938e9","src/command/draw.rs":"2f9e1d08f77425baa4680ea8b8e023321f8ac3a986f78299f8cc8bc083353fc1","src/command/memory_init.rs":"fadd17881d35ea5b2918de48ae9d631e16d6c49aba744d334e3edee64c0e839d","src/command/mod.rs":"c007a7f94a9ba94233b6d0721b3c3655a6e6c2cb9192ca20a408a0c3d5af788d","src/command/query.rs":"74ade834ad8098473a1d94259bfd98afb67b3d61cda591f3f5953153155e0717","src/command/render.rs":"2e6e240b8e906eb576504cae5728ed888359d1d5978559bf8fe9bd0b96fdc672","src/command/render_command.rs":"314a05272478511f0b62bdb3aa409284d1c7ce84a8715476feef7d058b782276","src/command/timestamp_writes.rs":"38d3a48ec70043c0cbf4113a8751a3bf267c31c3c8328dcfd9d86bacbc800dc3","src/command/transfer.rs":"0a263c7407a4fa05186bbb03686b6a866500c02b7394e1325ba9ad491ee8a050","src/conv.rs":"9bbcd0660741a808d29788266bbac34af54c58e8db8ed856082f8d2d1dfb792d","src/device/bgl.rs":"1198f47a486d4cad4c03b49c98d690bac039a7b370cc241a5f279c6767e0b010","src/device/global.rs":"86d2cfcf55c8e7ce24e000019c8ce0d6a4c4aaf64b692d5f3796a090da6d97e9","src/device/life.rs":"5a6267bb36278c806905919cc016569bc13d1d7ab4afef10366890a32dd0fe79","src/device/mod.rs":"66faa8b2f2ec14cc88077647e60f6a3fe30dad2382be05f7e50e205642d85bc7","src/device/queue.rs":"14a7204f8789df92270eb2b3174d384786f52af65ba1957cf1b45d0ab761aa8e","src/device/resource.rs":"39a214b52f56d51a9bee30e1106c2ec79d588de8fac633170b885c8319e38433","src/device/trace.rs":"0e38395e0f41b763975d94d0b538ccbb926a3d784d4bfc78048884789c5b9d09","src/error.rs":"f509aa8a57c04e1aea9ba1b4a1589d007666018cd619db3f822c37082a76b871","src/global.rs":"75a984b96eb64bc2fa4027c6d0730a22761d91fe87882887459e8c58a2154b92","src/hal_api.rs":"29377c036d3eaaa05d83b87644a9a33916824eb51aa2d005704cde97e77925cb","src/hash_utils.rs":"e8d484027c7ce81978e4679a5e20af9416ab7d2fa595f1ca95992b29d625b0ca","src/hub.rs":"adb40e580f58155985f77b43fa56e6fee9be8d9d715c21585c5a3e3a6b726eae","src/id.rs":"36f578a7f1c56d1ae8d5f6c7f2ab3594c26b69ca050b1fa727e2ab1dc2465c1e","src/identity.rs":"b27d4e06b1f6c25ca2f4ad276fdf8c09a5d71f890b06cbabef7ef62369cb4df6","src/init_tracker/buffer.rs":"85e7c942b0acc1b2f724bb63ca015389c8a9a853198c37108a162326a2b5e70b","src/init_tracker/mod.rs":"039f2c6a98b066f80fd28f1b01f84b5d42e56f92176ac63b22dd572b06e4804d","src/init_tracker/texture.rs":"8e69fa256bf7cf7ce7bcbe28991e12990bf199ddb359b7a2bd40eee950a0fea9","src/instance.rs":"a95d9f8b1dce291d0fd022f64049b1d178f1a8dba436507fe93575cfa87b445b","src/lib.rs":"f23f4b83d75846ecbcecd894ca430d01c13b065193cbc74a0261599c0f7cc1ec","src/lock/mod.rs":"c58ae08a8e6108432d9e14e7d8b7bd15af7bb5e98bf700d6bd9a469b80a38b11","src/lock/observing.rs":"50d7c675e64cc89a188c12163e1920e75fe64db1682e843fff778efd751aa037","src/lock/rank.rs":"e6eebdb0fe0bf5e6ef7f8b522b1d83957643d244f84e1523a1e96f8f0a8d8126","src/lock/ranked.rs":"bafe6f295b4cade449cc41491c0dcd1497a6e04c5d902124040ac2056996a44d","src/lock/vanilla.rs":"57581d4671e226438b55f74c4195baa0c4fe1d9f4911a4c026b667c77ab574ff","src/pipeline.rs":"7ccfc915125c6db19f83aa58de1db55032340d72d68cb421dec4b60eeecaf194","src/pipeline_cache.rs":"dbb6b93562a15c9b27acc006545449a6c033280433c1950806243fcfe0850c3e","src/pool.rs":"7899e74874da17c83ec39a88b360de12f564ef4bee2bb1240c37becdaeb57a86","src/present.rs":"074354ac5e86b22d4a46153f47db96ec5c2b387a56fb43ddae1dbba8c50acb0d","src/registry.rs":"66a1e52d1b58ed52ee853883d578f96b1c13799eec30675bd77c6eb4353d4f5b","src/resource.rs":"ec3777dd407935598d46139509c19e6554a3c95c320f142896b223411a5f4f24","src/snatch.rs":"c16e3d21547198b4b7d22a10d115376d2d6f5fa0a62d51ced2dd6a4f934d7ef7","src/storage.rs":"79d3d0e04a265123dfee453bfba7c9ab56a709a405767ffbcca6b33ee78b62ad","src/track/buffer.rs":"dc671db7ab4b8378af0e4a0f7dab181d86708922f942e3b01f7fc5d48507c509","src/track/metadata.rs":"44b3a8361780e8c8b2bd8910355e0ce89e2b04d921a97da4b1857a599bd6f5a4","src/track/mod.rs":"7e80349251c0874ed1d50847beee61e193ae706b9161405061e35c26d389c1f2","src/track/range.rs":"2a15794e79b0470d5ba6b3267173a42f34312878e1cb288f198d2854a7888e53","src/track/stateless.rs":"e404d58dbe2644bed4a9fcdc2c01a699322d669aaa4e6619750347519fb56625","src/track/texture.rs":"c5b6f03861907b8ad7dd304af6122b4f102bce6b4fa3e30e1b6f604872fb46fe","src/validation.rs":"6ea396fec9f934aaed00cee98b348e9b569187028fea2bf79f89b70ad8c35442"},"package":null} \ No newline at end of file diff --git a/third_party/rust/wgpu-core/Cargo.toml b/third_party/rust/wgpu-core/Cargo.toml index ccf88c0a5a190..bef4bc5e2175d 100644 --- a/third_party/rust/wgpu-core/Cargo.toml +++ b/third_party/rust/wgpu-core/Cargo.toml @@ -57,7 +57,7 @@ version = "0.8" version = "2.6" [dependencies.bytemuck] -version = "1.17" +version = "1.18" features = ["derive"] optional = true diff --git a/third_party/rust/wgpu-core/src/binding_model.rs b/third_party/rust/wgpu-core/src/binding_model.rs index d8a8b32d2fa00..2357a8c77641b 100644 --- a/third_party/rust/wgpu-core/src/binding_model.rs +++ b/third_party/rust/wgpu-core/src/binding_model.rs @@ -6,8 +6,8 @@ use crate::{ init_tracker::{BufferInitTrackerAction, TextureInitTrackerAction}, pipeline::{ComputePipeline, RenderPipeline}, resource::{ - Buffer, DestroyedResourceError, Labeled, MissingBufferUsageError, MissingTextureUsageError, - ResourceErrorIdent, Sampler, TextureView, TrackingData, + Buffer, DestroyedResourceError, InvalidResourceError, Labeled, MissingBufferUsageError, + MissingTextureUsageError, ResourceErrorIdent, Sampler, TextureView, TrackingData, }, resource_log, snatch::{SnatchGuard, Snatchable}, @@ -79,14 +79,6 @@ pub enum CreateBindGroupLayoutError { pub enum CreateBindGroupError { #[error(transparent)] Device(#[from] DeviceError), - #[error("Bind group layout is invalid")] - InvalidLayout, - #[error("BufferId {0:?} is invalid")] - InvalidBufferId(BufferId), - #[error("TextureViewId {0:?} is invalid")] - InvalidTextureViewId(TextureViewId), - #[error("SamplerId {0:?} is invalid")] - InvalidSamplerId(SamplerId), #[error(transparent)] DestroyedResource(#[from] DestroyedResourceError), #[error( @@ -188,6 +180,8 @@ pub enum CreateBindGroupError { StorageReadNotSupported(wgt::TextureFormat), #[error(transparent)] ResourceUsageCompatibility(#[from] ResourceUsageCompatibilityError), + #[error(transparent)] + InvalidResource(#[from] InvalidResourceError), } #[derive(Clone, Debug, Error)] @@ -545,8 +539,6 @@ impl BindGroupLayout { pub enum CreatePipelineLayoutError { #[error(transparent)] Device(#[from] DeviceError), - #[error("BindGroupLayoutId {0:?} is invalid")] - InvalidBindGroupLayoutId(BindGroupLayoutId), #[error( "Push constant at index {index} has range bound {bound} not aligned to {}", wgt::PUSH_CONSTANT_ALIGNMENT @@ -570,6 +562,8 @@ pub enum CreatePipelineLayoutError { TooManyBindings(BindingTypeMaxCountError), #[error("Bind group layout count {actual} exceeds device bind group limit {max}")] TooManyGroups { actual: usize, max: usize }, + #[error(transparent)] + InvalidResource(#[from] InvalidResourceError), } #[derive(Clone, Debug, Error)] @@ -884,6 +878,16 @@ pub(crate) fn buffer_binding_type_alignment( } } +pub(crate) fn buffer_binding_type_bounds_check_alignment( + alignments: &hal::Alignments, + binding_type: wgt::BufferBindingType, +) -> wgt::BufferAddress { + match binding_type { + wgt::BufferBindingType::Uniform => alignments.uniform_bounds_check_alignment.get(), + wgt::BufferBindingType::Storage { .. } => wgt::COPY_BUFFER_ALIGNMENT, + } +} + #[derive(Debug)] pub struct BindGroup { pub(crate) raw: Snatchable>, @@ -993,10 +997,10 @@ crate::impl_trackable!(BindGroup); #[derive(Clone, Debug, Error)] #[non_exhaustive] pub enum GetBindGroupLayoutError { - #[error("Pipeline is invalid")] - InvalidPipeline, #[error("Invalid group index {0}")] InvalidGroupIndex(u32), + #[error(transparent)] + InvalidResource(#[from] InvalidResourceError), } #[derive(Clone, Debug, Error, Eq, PartialEq)] diff --git a/third_party/rust/wgpu-core/src/command/bundle.rs b/third_party/rust/wgpu-core/src/command/bundle.rs index a7a43e1e2e4a6..394858f9fde18 100644 --- a/third_party/rust/wgpu-core/src/command/bundle.rs +++ b/third_party/rust/wgpu-core/src/command/bundle.rs @@ -92,7 +92,10 @@ use crate::{ id, init_tracker::{BufferInitTrackerAction, MemoryInitKind, TextureInitTrackerAction}, pipeline::{PipelineFlags, RenderPipeline, VertexStep}, - resource::{Buffer, DestroyedResourceError, Labeled, ParentDevice, TrackingData}, + resource::{ + Buffer, DestroyedResourceError, Fallible, InvalidResourceError, Labeled, ParentDevice, + TrackingData, + }, resource_log, snatch::SnatchGuard, track::RenderBundleScope, @@ -578,15 +581,20 @@ impl RenderBundleEncoder { fn set_bind_group( state: &mut State, - bind_group_guard: &crate::lock::RwLockReadGuard>, + bind_group_guard: &crate::storage::Storage>, dynamic_offsets: &[u32], index: u32, num_dynamic_offsets: usize, - bind_group_id: id::Id, + bind_group_id: Option>, ) -> Result<(), RenderBundleErrorInner> { - let bind_group = bind_group_guard - .get_owned(bind_group_id) - .map_err(|_| RenderCommandError::InvalidBindGroupId(bind_group_id))?; + if bind_group_id.is_none() { + // TODO: do appropriate cleanup for null bind_group. + return Ok(()); + } + + let bind_group_id = bind_group_id.unwrap(); + + let bind_group = bind_group_guard.get(bind_group_id).get()?; bind_group.same_device(&state.device)?; @@ -623,15 +631,13 @@ fn set_bind_group( fn set_pipeline( state: &mut State, - pipeline_guard: &crate::lock::RwLockReadGuard>, + pipeline_guard: &crate::storage::Storage>, context: &RenderPassContext, is_depth_read_only: bool, is_stencil_read_only: bool, pipeline_id: id::Id, ) -> Result<(), RenderBundleErrorInner> { - let pipeline = pipeline_guard - .get_owned(pipeline_id) - .map_err(|_| RenderCommandError::InvalidPipelineId(pipeline_id))?; + let pipeline = pipeline_guard.get(pipeline_id).get()?; pipeline.same_device(&state.device)?; @@ -666,15 +672,13 @@ fn set_pipeline( fn set_index_buffer( state: &mut State, - buffer_guard: &crate::lock::RwLockReadGuard>, + buffer_guard: &crate::storage::Storage>, buffer_id: id::Id, index_format: wgt::IndexFormat, offset: u64, size: Option, ) -> Result<(), RenderBundleErrorInner> { - let buffer = buffer_guard - .get_owned(buffer_id) - .map_err(|_| RenderCommandError::InvalidBufferId(buffer_id))?; + let buffer = buffer_guard.get(buffer_id).get()?; state .trackers @@ -701,7 +705,7 @@ fn set_index_buffer( fn set_vertex_buffer( state: &mut State, - buffer_guard: &crate::lock::RwLockReadGuard>, + buffer_guard: &crate::storage::Storage>, slot: u32, buffer_id: id::Id, offset: u64, @@ -716,9 +720,7 @@ fn set_vertex_buffer( .into()); } - let buffer = buffer_guard - .get_owned(buffer_id) - .map_err(|_| RenderCommandError::InvalidBufferId(buffer_id))?; + let buffer = buffer_guard.get(buffer_id).get()?; state .trackers @@ -845,7 +847,7 @@ fn draw_indexed( fn multi_draw_indirect( state: &mut State, dynamic_offsets: &[u32], - buffer_guard: &crate::lock::RwLockReadGuard>, + buffer_guard: &crate::storage::Storage>, buffer_id: id::Id, offset: u64, indexed: bool, @@ -857,9 +859,7 @@ fn multi_draw_indirect( let pipeline = state.pipeline()?; let used_bind_groups = pipeline.used_bind_groups; - let buffer = buffer_guard - .get_owned(buffer_id) - .map_err(|_| RenderCommandError::InvalidBufferId(buffer_id))?; + let buffer = buffer_guard.get(buffer_id).get()?; state .trackers @@ -981,12 +981,17 @@ impl RenderBundle { num_dynamic_offsets, bind_group, } => { - let raw_bg = bind_group.try_raw(snatch_guard)?; + let mut bg = None; + if bind_group.is_some() { + let bind_group = bind_group.as_ref().unwrap(); + let raw_bg = bind_group.try_raw(snatch_guard)?; + bg = Some(raw_bg); + } unsafe { raw.set_bind_group( pipeline_layout.as_ref().unwrap().raw(), *index, - raw_bg, + bg, &offsets[..*num_dynamic_offsets], ) }; @@ -1501,7 +1506,7 @@ impl State { let offsets = &contents.dynamic_offsets; return Some(ArcRenderCommand::SetBindGroup { index: i.try_into().unwrap(), - bind_group: contents.bind_group.clone(), + bind_group: Some(contents.bind_group.clone()), num_dynamic_offsets: offsets.end - offsets.start, }); } @@ -1526,6 +1531,8 @@ pub(super) enum RenderBundleErrorInner { MissingDownlevelFlags(#[from] MissingDownlevelFlags), #[error(transparent)] Bind(#[from] BindError), + #[error(transparent)] + InvalidResource(#[from] InvalidResourceError), } impl From for RenderBundleErrorInner @@ -1581,7 +1588,7 @@ pub mod bundle_ffi { pub unsafe extern "C" fn wgpu_render_bundle_set_bind_group( bundle: &mut RenderBundleEncoder, index: u32, - bind_group_id: id::BindGroupId, + bind_group_id: Option, offsets: *const DynamicOffset, offset_length: usize, ) { diff --git a/third_party/rust/wgpu-core/src/command/clear.rs b/third_party/rust/wgpu-core/src/command/clear.rs index 944dd40af4168..3a12a54117782 100644 --- a/third_party/rust/wgpu-core/src/command/clear.rs +++ b/third_party/rust/wgpu-core/src/command/clear.rs @@ -11,8 +11,8 @@ use crate::{ id::{BufferId, CommandEncoderId, TextureId}, init_tracker::{MemoryInitKind, TextureInitRange}, resource::{ - DestroyedResourceError, Labeled, MissingBufferUsageError, ParentDevice, ResourceErrorIdent, - Texture, TextureClearMode, + DestroyedResourceError, InvalidResourceError, Labeled, MissingBufferUsageError, + ParentDevice, ResourceErrorIdent, Texture, TextureClearMode, }, snatch::SnatchGuard, track::{TextureSelector, TextureTrackerSetSingle}, @@ -27,10 +27,6 @@ use wgt::{math::align_to, BufferAddress, BufferUsages, ImageSubresourceRange, Te pub enum ClearError { #[error("To use clear_texture the CLEAR_TEXTURE feature needs to be enabled")] MissingClearTextureFeature, - #[error("BufferId {0:?} is invalid")] - InvalidBufferId(BufferId), - #[error("TextureId {0:?} is invalid")] - InvalidTextureId(TextureId), #[error(transparent)] DestroyedResource(#[from] DestroyedResourceError), #[error("{0} can not be cleared")] @@ -75,6 +71,8 @@ whereas subesource range specified start {subresource_base_array_layer} and coun Device(#[from] DeviceError), #[error(transparent)] CommandEncoderError(#[from] CommandEncoderError), + #[error(transparent)] + InvalidResource(#[from] InvalidResourceError), } impl Global { @@ -90,27 +88,18 @@ impl Global { let hub = &self.hub; - let cmd_buf = match hub + let cmd_buf = hub .command_buffers - .get(command_encoder_id.into_command_buffer_id()) - { - Ok(cmd_buf) => cmd_buf, - Err(_) => return Err(CommandEncoderError::Invalid.into()), - }; - cmd_buf.check_recording()?; - - let mut cmd_buf_data = cmd_buf.data.lock(); - let cmd_buf_data = cmd_buf_data.as_mut().unwrap(); + .get(command_encoder_id.into_command_buffer_id()); + let mut cmd_buf_data = cmd_buf.try_get()?; + cmd_buf_data.check_recording()?; #[cfg(feature = "trace")] if let Some(ref mut list) = cmd_buf_data.commands { list.push(TraceCommand::ClearBuffer { dst, offset, size }); } - let dst_buffer = hub - .buffers - .get(dst) - .map_err(|_| ClearError::InvalidBufferId(dst))?; + let dst_buffer = hub.buffers.get(dst).get()?; dst_buffer.same_device_as(cmd_buf.as_ref())?; @@ -163,7 +152,7 @@ impl Global { // actual hal barrier & operation let dst_barrier = dst_pending.map(|pending| pending.into_hal(&dst_buffer, &snatch_guard)); - let cmd_buf_raw = cmd_buf_data.encoder.open()?; + let cmd_buf_raw = cmd_buf_data.encoder.open(&cmd_buf.device)?; unsafe { cmd_buf_raw.transition_buffers(dst_barrier.as_slice()); cmd_buf_raw.clear_buffer(dst_raw, offset..end_offset); @@ -182,17 +171,11 @@ impl Global { let hub = &self.hub; - let cmd_buf = match hub + let cmd_buf = hub .command_buffers - .get(command_encoder_id.into_command_buffer_id()) - { - Ok(cmd_buf) => cmd_buf, - Err(_) => return Err(CommandEncoderError::Invalid.into()), - }; - cmd_buf.check_recording()?; - - let mut cmd_buf_data = cmd_buf.data.lock(); - let cmd_buf_data = cmd_buf_data.as_mut().unwrap(); + .get(command_encoder_id.into_command_buffer_id()); + let mut cmd_buf_data = cmd_buf.try_get()?; + cmd_buf_data.check_recording()?; #[cfg(feature = "trace")] if let Some(ref mut list) = cmd_buf_data.commands { @@ -206,10 +189,7 @@ impl Global { return Err(ClearError::MissingClearTextureFeature); } - let dst_texture = hub - .textures - .get(dst) - .map_err(|_| ClearError::InvalidTextureId(dst))?; + let dst_texture = hub.textures.get(dst).get()?; dst_texture.same_device_as(cmd_buf.as_ref())?; @@ -249,7 +229,7 @@ impl Global { let device = &cmd_buf.device; device.check_is_valid()?; - let (encoder, tracker) = cmd_buf_data.open_encoder_and_tracker()?; + let (encoder, tracker) = cmd_buf_data.open_encoder_and_tracker(&cmd_buf.device)?; let snatch_guard = device.snatchable_lock.read(); clear_texture( diff --git a/third_party/rust/wgpu-core/src/command/compute.rs b/third_party/rust/wgpu-core/src/command/compute.rs index eb929740c80ae..5de03917d21b3 100644 --- a/third_party/rust/wgpu-core/src/command/compute.rs +++ b/third_party/rust/wgpu-core/src/command/compute.rs @@ -17,8 +17,8 @@ use crate::{ init_tracker::{BufferInitTrackerAction, MemoryInitKind}, pipeline::ComputePipeline, resource::{ - self, Buffer, DestroyedResourceError, Labeled, MissingBufferUsageError, ParentDevice, - Trackable, + self, Buffer, DestroyedResourceError, InvalidResourceError, Labeled, + MissingBufferUsageError, ParentDevice, Trackable, }, snatch::SnatchGuard, track::{ResourceUsageCompatibilityError, Tracker, TrackerIndex, UsageScope}, @@ -132,14 +132,8 @@ pub enum ComputePassErrorInner { Encoder(#[from] CommandEncoderError), #[error("Parent encoder is invalid")] InvalidParentEncoder, - #[error("BindGroupId {0:?} is invalid")] - InvalidBindGroupId(id::BindGroupId), #[error("Bind group index {index} is greater than the device's requested `max_bind_group` limit {max}")] BindGroupIndexOutOfRange { index: u32, max: u32 }, - #[error("ComputePipelineId {0:?} is invalid")] - InvalidPipelineId(id::ComputePipelineId), - #[error("QuerySet {0:?} is invalid")] - InvalidQuerySet(id::QuerySetId), #[error(transparent)] DestroyedResource(#[from] DestroyedResourceError), #[error("Indirect buffer uses bytes {offset}..{end_offset} which overruns indirect buffer of size {buffer_size}")] @@ -148,8 +142,6 @@ pub enum ComputePassErrorInner { end_offset: u64, buffer_size: u64, }, - #[error("BufferId {0:?} is invalid")] - InvalidBufferId(id::BufferId), #[error(transparent)] ResourceUsageCompatibility(#[from] ResourceUsageCompatibilityError), #[error(transparent)] @@ -176,6 +168,8 @@ pub enum ComputePassErrorInner { MissingDownlevelFlags(#[from] MissingDownlevelFlags), #[error("The compute pass has already been ended and no further commands can be recorded")] PassEnded, + #[error(transparent)] + InvalidResource(#[from] InvalidResourceError), } /// Error encountered when performing a compute pass. @@ -298,22 +292,21 @@ impl Global { let make_err = |e, arc_desc| (ComputePass::new(None, arc_desc), Some(e)); - let cmd_buf = match hub.command_buffers.get(encoder_id.into_command_buffer_id()) { - Ok(cmd_buf) => cmd_buf, - Err(_) => return make_err(CommandEncoderError::Invalid, arc_desc), - }; + let cmd_buf = hub.command_buffers.get(encoder_id.into_command_buffer_id()); - match cmd_buf.lock_encoder() { + match cmd_buf + .try_get() + .map_err(|e| e.into()) + .and_then(|mut cmd_buf_data| cmd_buf_data.lock_encoder()) + { Ok(_) => {} Err(e) => return make_err(e, arc_desc), }; arc_desc.timestamp_writes = if let Some(tw) = desc.timestamp_writes { - let Ok(query_set) = hub.query_sets.get(tw.query_set) else { - return make_err( - CommandEncoderError::InvalidTimestampWritesQuerySetId(tw.query_set), - arc_desc, - ); + let query_set = match hub.query_sets.get(tw.query_set).get() { + Ok(query_set) => query_set, + Err(e) => return make_err(e.into(), arc_desc), }; Some(ArcPassTimestampWrites { @@ -328,25 +321,8 @@ impl Global { (ComputePass::new(Some(cmd_buf), arc_desc), None) } - pub fn compute_pass_end(&self, pass: &mut ComputePass) -> Result<(), ComputePassError> { - let scope = PassErrorScope::Pass; - - let cmd_buf = pass - .parent - .as_ref() - .ok_or(ComputePassErrorInner::InvalidParentEncoder) - .map_pass_err(scope)?; - - cmd_buf.unlock_encoder().map_pass_err(scope)?; - - let base = pass - .base - .take() - .ok_or(ComputePassErrorInner::PassEnded) - .map_pass_err(scope)?; - self.compute_pass_end_impl(cmd_buf, base, pass.timestamp_writes.take()) - } - + /// Note that this differs from [`Self::compute_pass_end`], it will + /// create a new pass, replay the commands and end the pass. #[doc(hidden)] #[cfg(any(feature = "serde", feature = "replay"))] pub fn compute_pass_end_with_unresolved_commands( @@ -355,19 +331,16 @@ impl Global { base: BasePass, timestamp_writes: Option<&PassTimestampWrites>, ) -> Result<(), ComputePassError> { - let hub = &self.hub; - let scope = PassErrorScope::Pass; - - let cmd_buf = match hub.command_buffers.get(encoder_id.into_command_buffer_id()) { - Ok(cmd_buf) => cmd_buf, - Err(_) => return Err(CommandEncoderError::Invalid).map_pass_err(scope), - }; - cmd_buf.check_recording().map_pass_err(scope)?; + let pass_scope = PassErrorScope::Pass; #[cfg(feature = "trace")] { - let mut cmd_buf_data = cmd_buf.data.lock(); - let cmd_buf_data = cmd_buf_data.as_mut().unwrap(); + let cmd_buf = self + .hub + .command_buffers + .get(encoder_id.into_command_buffer_id()); + let mut cmd_buf_data = cmd_buf.try_get().map_pass_err(pass_scope)?; + if let Some(ref mut list) = cmd_buf_data.commands { list.push(crate::device::trace::Command::RunComputePass { base: BasePass { @@ -382,50 +355,61 @@ impl Global { } } - let commands = - super::ComputeCommand::resolve_compute_command_ids(&self.hub, &base.commands)?; - - let timestamp_writes = if let Some(tw) = timestamp_writes { - Some(ArcPassTimestampWrites { - query_set: hub - .query_sets - .get(tw.query_set) - .map_err(|_| ComputePassErrorInner::InvalidQuerySet(tw.query_set)) - .map_pass_err(scope)?, - beginning_of_pass_write_index: tw.beginning_of_pass_write_index, - end_of_pass_write_index: tw.end_of_pass_write_index, - }) - } else { - None + let BasePass { + label, + commands, + dynamic_offsets, + string_data, + push_constant_data, + } = base; + + let (mut compute_pass, encoder_error) = self.command_encoder_create_compute_pass( + encoder_id, + &ComputePassDescriptor { + label: label.as_deref().map(std::borrow::Cow::Borrowed), + timestamp_writes, + }, + ); + if let Some(err) = encoder_error { + return Err(ComputePassError { + scope: pass_scope, + inner: err.into(), + }); }; - self.compute_pass_end_impl( - &cmd_buf, - BasePass { - label: base.label, - commands, - dynamic_offsets: base.dynamic_offsets, - string_data: base.string_data, - push_constant_data: base.push_constant_data, - }, - timestamp_writes, - ) + compute_pass.base = Some(BasePass { + label, + commands: super::ComputeCommand::resolve_compute_command_ids(&self.hub, &commands)?, + dynamic_offsets, + string_data, + push_constant_data, + }); + + self.compute_pass_end(&mut compute_pass) } - fn compute_pass_end_impl( - &self, - cmd_buf: &CommandBuffer, - base: BasePass, - mut timestamp_writes: Option, - ) -> Result<(), ComputePassError> { + pub fn compute_pass_end(&self, pass: &mut ComputePass) -> Result<(), ComputePassError> { profiling::scope!("CommandEncoder::run_compute_pass"); let pass_scope = PassErrorScope::Pass; + let cmd_buf = pass + .parent + .as_ref() + .ok_or(ComputePassErrorInner::InvalidParentEncoder) + .map_pass_err(pass_scope)?; + + let base = pass + .base + .take() + .ok_or(ComputePassErrorInner::PassEnded) + .map_pass_err(pass_scope)?; + let device = &cmd_buf.device; device.check_is_valid().map_pass_err(pass_scope)?; - let mut cmd_buf_data = cmd_buf.data.lock(); - let cmd_buf_data = cmd_buf_data.as_mut().unwrap(); + let mut cmd_buf_data = cmd_buf.try_get().map_pass_err(pass_scope)?; + cmd_buf_data.unlock_encoder().map_pass_err(pass_scope)?; + let cmd_buf_data = &mut *cmd_buf_data; let encoder = &mut cmd_buf_data.encoder; let status = &mut cmd_buf_data.status; @@ -433,10 +417,10 @@ impl Global { // We automatically keep extending command buffers over time, and because // we want to insert a command buffer _before_ what we're about to record, // we need to make sure to close the previous one. - encoder.close().map_pass_err(pass_scope)?; + encoder.close(&cmd_buf.device).map_pass_err(pass_scope)?; // will be reset to true if recording is done without errors *status = CommandEncoderStatus::Error; - let raw_encoder = encoder.open().map_pass_err(pass_scope)?; + let raw_encoder = encoder.open(&cmd_buf.device).map_pass_err(pass_scope)?; let mut state = State { binder: Binder::new(), @@ -467,9 +451,9 @@ impl Global { state.tracker.textures.set_size(indices.textures.size()); let timestamp_writes: Option> = - if let Some(tw) = timestamp_writes.take() { + if let Some(tw) = pass.timestamp_writes.take() { tw.query_set - .same_device_as(cmd_buf) + .same_device_as(cmd_buf.as_ref()) .map_pass_err(pass_scope)?; let query_set = state.tracker.query_sets.insert_single(tw.query_set); @@ -503,7 +487,7 @@ impl Global { }; let hal_desc = hal::ComputePassDescriptor { - label: hal_label(base.label.as_deref(), self.instance.flags), + label: hal_label(base.label.as_deref(), device.instance_flags), timestamp_writes, }; @@ -617,12 +601,12 @@ impl Global { } = state; // Stop the current command buffer. - encoder.close().map_pass_err(pass_scope)?; + encoder.close(&cmd_buf.device).map_pass_err(pass_scope)?; // Create a new command buffer, which we will insert _before_ the body of the compute pass. // // Use that buffer to insert barriers and clear discarded images. - let transit = encoder.open().map_pass_err(pass_scope)?; + let transit = encoder.open(&cmd_buf.device).map_pass_err(pass_scope)?; fixup_discarded_surfaces( pending_discard_init_fixups.into_iter(), transit, @@ -637,7 +621,9 @@ impl Global { &snatch_guard, ); // Close the command buffer, and swap it with the previous. - encoder.close_and_swap().map_pass_err(pass_scope)?; + encoder + .close_and_swap(&cmd_buf.device) + .map_pass_err(pass_scope)?; Ok(()) } @@ -649,10 +635,8 @@ fn set_bind_group( dynamic_offsets: &[DynamicOffset], index: u32, num_dynamic_offsets: usize, - bind_group: Arc, + bind_group: Option>, ) -> Result<(), ComputePassErrorInner> { - bind_group.same_device_as(cmd_buf)?; - let max_bind_groups = state.device.limits.max_bind_groups; if index >= max_bind_groups { return Err(ComputePassErrorInner::BindGroupIndexOutOfRange { @@ -668,7 +652,16 @@ fn set_bind_group( ); state.dynamic_offset_count += num_dynamic_offsets; + if bind_group.is_none() { + // TODO: Handle bind_group None. + return Ok(()); + } + + let bind_group = bind_group.unwrap(); let bind_group = state.tracker.bind_groups.insert_single(bind_group); + + bind_group.same_device_as(cmd_buf)?; + bind_group.validate_dynamic_bindings(index, &state.temp_offsets)?; state @@ -700,7 +693,7 @@ fn set_bind_group( state.raw_encoder.set_bind_group( pipeline_layout, index + i as u32, - raw_bg, + Some(raw_bg), &e.dynamic_offsets, ); } @@ -745,7 +738,7 @@ fn set_pipeline( state.raw_encoder.set_bind_group( pipeline.layout.raw(), start_index as u32 + i as u32, - raw_bg, + Some(raw_bg), &e.dynamic_offsets, ); } @@ -952,7 +945,7 @@ impl Global { &self, pass: &mut ComputePass, index: u32, - bind_group_id: id::BindGroupId, + bind_group_id: Option, offsets: &[DynamicOffset], ) -> Result<(), ComputePassError> { let scope = PassErrorScope::SetBindGroup; @@ -973,12 +966,18 @@ impl Global { return Ok(()); } - let hub = &self.hub; - let bind_group = hub - .bind_groups - .get(bind_group_id) - .map_err(|_| ComputePassErrorInner::InvalidBindGroupId(bind_group_id)) - .map_pass_err(scope)?; + let mut bind_group = None; + if bind_group_id.is_some() { + let bind_group_id = bind_group_id.unwrap(); + + let hub = &self.hub; + let bg = hub + .bind_groups + .get(bind_group_id) + .get() + .map_pass_err(scope)?; + bind_group = Some(bg); + } base.commands.push(ArcComputeCommand::SetBindGroup { index, @@ -1008,7 +1007,7 @@ impl Global { let pipeline = hub .compute_pipelines .get(pipeline_id) - .map_err(|_| ComputePassErrorInner::InvalidPipelineId(pipeline_id)) + .get() .map_pass_err(scope)?; base.commands.push(ArcComputeCommand::SetPipeline(pipeline)); @@ -1079,11 +1078,7 @@ impl Global { let scope = PassErrorScope::Dispatch { indirect: true }; let base = pass.base_mut(scope)?; - let buffer = hub - .buffers - .get(buffer_id) - .map_err(|_| ComputePassErrorInner::InvalidBufferId(buffer_id)) - .map_pass_err(scope)?; + let buffer = hub.buffers.get(buffer_id).get().map_pass_err(scope)?; base.commands .push(ArcComputeCommand::DispatchIndirect { buffer, offset }); @@ -1150,11 +1145,7 @@ impl Global { let base = pass.base_mut(scope)?; let hub = &self.hub; - let query_set = hub - .query_sets - .get(query_set_id) - .map_err(|_| ComputePassErrorInner::InvalidQuerySet(query_set_id)) - .map_pass_err(scope)?; + let query_set = hub.query_sets.get(query_set_id).get().map_pass_err(scope)?; base.commands.push(ArcComputeCommand::WriteTimestamp { query_set, @@ -1174,11 +1165,7 @@ impl Global { let base = pass.base_mut(scope)?; let hub = &self.hub; - let query_set = hub - .query_sets - .get(query_set_id) - .map_err(|_| ComputePassErrorInner::InvalidQuerySet(query_set_id)) - .map_pass_err(scope)?; + let query_set = hub.query_sets.get(query_set_id).get().map_pass_err(scope)?; base.commands .push(ArcComputeCommand::BeginPipelineStatisticsQuery { diff --git a/third_party/rust/wgpu-core/src/command/compute_command.rs b/third_party/rust/wgpu-core/src/command/compute_command.rs index e16487b7ead7e..67c23d94528c6 100644 --- a/third_party/rust/wgpu-core/src/command/compute_command.rs +++ b/third_party/rust/wgpu-core/src/command/compute_command.rs @@ -13,7 +13,7 @@ pub enum ComputeCommand { SetBindGroup { index: u32, num_dynamic_offsets: usize, - bind_group_id: id::BindGroupId, + bind_group_id: Option, }, SetPipeline(id::ComputePipelineId), @@ -74,7 +74,7 @@ impl ComputeCommand { hub: &crate::hub::Hub, commands: &[ComputeCommand], ) -> Result, super::ComputePassError> { - use super::{ComputePassError, ComputePassErrorInner, PassErrorScope}; + use super::{ComputePassError, PassErrorScope}; let buffers_guard = hub.buffers.read(); let bind_group_guard = hub.bind_groups.read(); @@ -89,23 +89,36 @@ impl ComputeCommand { index, num_dynamic_offsets, bind_group_id, - } => ArcComputeCommand::SetBindGroup { - index, - num_dynamic_offsets, - bind_group: bind_group_guard.get_owned(bind_group_id).map_err(|_| { + } => { + if bind_group_id.is_none() { + return Ok(ArcComputeCommand::SetBindGroup { + index, + num_dynamic_offsets, + bind_group: None, + }); + } + + let bind_group_id = bind_group_id.unwrap(); + let bg = bind_group_guard.get(bind_group_id).get().map_err(|e| { ComputePassError { scope: PassErrorScope::SetBindGroup, - inner: ComputePassErrorInner::InvalidBindGroupId(bind_group_id), + inner: e.into(), } - })?, - }, + })?; + ArcComputeCommand::SetBindGroup { + index, + num_dynamic_offsets, + bind_group: Some(bg), + } + } ComputeCommand::SetPipeline(pipeline_id) => ArcComputeCommand::SetPipeline( pipelines_guard - .get_owned(pipeline_id) - .map_err(|_| ComputePassError { + .get(pipeline_id) + .get() + .map_err(|e| ComputePassError { scope: PassErrorScope::SetPipelineCompute, - inner: ComputePassErrorInner::InvalidPipelineId(pipeline_id), + inner: e.into(), })?, ), @@ -123,10 +136,10 @@ impl ComputeCommand { ComputeCommand::DispatchIndirect { buffer_id, offset } => { ArcComputeCommand::DispatchIndirect { - buffer: buffers_guard.get_owned(buffer_id).map_err(|_| { + buffer: buffers_guard.get(buffer_id).get().map_err(|e| { ComputePassError { scope: PassErrorScope::Dispatch { indirect: true }, - inner: ComputePassErrorInner::InvalidBufferId(buffer_id), + inner: e.into(), } })?, offset, @@ -147,10 +160,10 @@ impl ComputeCommand { query_set_id, query_index, } => ArcComputeCommand::WriteTimestamp { - query_set: query_set_guard.get_owned(query_set_id).map_err(|_| { + query_set: query_set_guard.get(query_set_id).get().map_err(|e| { ComputePassError { scope: PassErrorScope::WriteTimestamp, - inner: ComputePassErrorInner::InvalidQuerySet(query_set_id), + inner: e.into(), } })?, query_index, @@ -160,10 +173,10 @@ impl ComputeCommand { query_set_id, query_index, } => ArcComputeCommand::BeginPipelineStatisticsQuery { - query_set: query_set_guard.get_owned(query_set_id).map_err(|_| { + query_set: query_set_guard.get(query_set_id).get().map_err(|e| { ComputePassError { scope: PassErrorScope::BeginPipelineStatisticsQuery, - inner: ComputePassErrorInner::InvalidQuerySet(query_set_id), + inner: e.into(), } })?, query_index, @@ -185,7 +198,7 @@ pub enum ArcComputeCommand { SetBindGroup { index: u32, num_dynamic_offsets: usize, - bind_group: Arc, + bind_group: Option>, }, SetPipeline(Arc), @@ -215,6 +228,7 @@ pub enum ArcComputeCommand { }, PushDebugGroup { + #[cfg_attr(target_os = "emscripten", allow(dead_code))] color: u32, len: usize, }, @@ -222,6 +236,7 @@ pub enum ArcComputeCommand { PopDebugGroup, InsertDebugMarker { + #[cfg_attr(target_os = "emscripten", allow(dead_code))] color: u32, len: usize, }, diff --git a/third_party/rust/wgpu-core/src/command/draw.rs b/third_party/rust/wgpu-core/src/command/draw.rs index e8578bba0586a..dd54b60f26a90 100644 --- a/third_party/rust/wgpu-core/src/command/draw.rs +++ b/third_party/rust/wgpu-core/src/command/draw.rs @@ -1,6 +1,5 @@ use crate::{ binding_model::{LateMinBufferBindingSizeMismatch, PushConstantUploadError}, - id, resource::{ DestroyedResourceError, MissingBufferUsageError, MissingTextureUsageError, ResourceErrorIdent, @@ -68,22 +67,12 @@ pub enum DrawError { #[derive(Clone, Debug, Error)] #[non_exhaustive] pub enum RenderCommandError { - #[error("BufferId {0:?} is invalid")] - InvalidBufferId(id::BufferId), - #[error("BindGroupId {0:?} is invalid")] - InvalidBindGroupId(id::BindGroupId), - #[error("Render bundle {0:?} is invalid")] - InvalidRenderBundle(id::RenderBundleId), #[error("Bind group index {index} is greater than the device's requested `max_bind_group` limit {max}")] BindGroupIndexOutOfRange { index: u32, max: u32 }, #[error("Vertex buffer index {index} is greater than the device's requested `max_vertex_buffers` limit {max}")] VertexBufferIndexOutOfRange { index: u32, max: u32 }, #[error("Dynamic buffer offset {0} does not respect device's requested `{1}` limit {2}")] UnalignedBufferOffset(u64, &'static str, u32), - #[error("RenderPipelineId {0:?} is invalid")] - InvalidPipelineId(id::RenderPipelineId), - #[error("QuerySet {0:?} is invalid")] - InvalidQuerySet(id::QuerySetId), #[error("Render pipeline targets are incompatible with render pass")] IncompatiblePipelineTargets(#[from] crate::device::RenderPassCompatibilityError), #[error("{0} writes to depth, while the pass has read-only depth access")] diff --git a/third_party/rust/wgpu-core/src/command/mod.rs b/third_party/rust/wgpu-core/src/command/mod.rs index 313bf813a142d..42c80d3ca529b 100644 --- a/third_party/rust/wgpu-core/src/command/mod.rs +++ b/third_party/rust/wgpu-core/src/command/mod.rs @@ -31,7 +31,7 @@ use crate::lock::{rank, Mutex}; use crate::snatch::SnatchGuard; use crate::init_tracker::BufferInitTrackerAction; -use crate::resource::Labeled; +use crate::resource::{InvalidResourceError, Labeled}; use crate::track::{DeviceTracker, Tracker, UsageScope}; use crate::LabelHelpers; use crate::{api_log, global::Global, id, resource_log, Label}; @@ -82,8 +82,8 @@ pub(crate) enum CommandEncoderStatus { /// /// When a `CommandEncoder` is left in this state, we have also /// returned an error result from the function that encountered - /// the problem. Future attempts to use the encoder (that is, - /// calls to [`CommandBuffer::get_encoder`]) will also return + /// the problem. Future attempts to use the encoder (for example, + /// calls to [`CommandBufferMutable::check_recording`]) will also return /// errors. /// /// Calling [`Global::command_encoder_finish`] in this state @@ -172,10 +172,10 @@ impl CommandEncoder { /// [l]: CommandEncoder::list /// [`transition_buffers`]: hal::CommandEncoder::transition_buffers /// [`transition_textures`]: hal::CommandEncoder::transition_textures - fn close_and_swap(&mut self) -> Result<(), DeviceError> { + fn close_and_swap(&mut self, device: &Device) -> Result<(), DeviceError> { if self.is_open { self.is_open = false; - let new = unsafe { self.raw.end_encoding()? }; + let new = unsafe { self.raw.end_encoding() }.map_err(|e| device.handle_hal_error(e))?; self.list.insert(self.list.len() - 1, new); } @@ -192,10 +192,11 @@ impl CommandEncoder { /// On return, the underlying hal encoder is closed. /// /// [l]: CommandEncoder::list - fn close(&mut self) -> Result<(), DeviceError> { + fn close(&mut self, device: &Device) -> Result<(), DeviceError> { if self.is_open { self.is_open = false; - let cmd_buf = unsafe { self.raw.end_encoding()? }; + let cmd_buf = + unsafe { self.raw.end_encoding() }.map_err(|e| device.handle_hal_error(e))?; self.list.push(cmd_buf); } @@ -215,11 +216,15 @@ impl CommandEncoder { /// Begin recording a new command buffer, if we haven't already. /// /// The underlying hal encoder is put in the "recording" state. - pub(crate) fn open(&mut self) -> Result<&mut dyn hal::DynCommandEncoder, DeviceError> { + pub(crate) fn open( + &mut self, + device: &Device, + ) -> Result<&mut dyn hal::DynCommandEncoder, DeviceError> { if !self.is_open { self.is_open = true; let hal_label = self.hal_label.as_deref(); - unsafe { self.raw.begin_encoding(hal_label)? }; + unsafe { self.raw.begin_encoding(hal_label) } + .map_err(|e| device.handle_hal_error(e))?; } Ok(self.raw.as_mut()) @@ -229,9 +234,9 @@ impl CommandEncoder { /// its own label. /// /// The underlying hal encoder is put in the "recording" state. - fn open_pass(&mut self, hal_label: Option<&str>) -> Result<(), DeviceError> { + fn open_pass(&mut self, hal_label: Option<&str>, device: &Device) -> Result<(), DeviceError> { self.is_open = true; - unsafe { self.raw.begin_encoding(hal_label)? }; + unsafe { self.raw.begin_encoding(hal_label) }.map_err(|e| device.handle_hal_error(e))?; Ok(()) } @@ -276,12 +281,113 @@ pub struct CommandBufferMutable { impl CommandBufferMutable { pub(crate) fn open_encoder_and_tracker( &mut self, + device: &Device, ) -> Result<(&mut dyn hal::DynCommandEncoder, &mut Tracker), DeviceError> { - let encoder = self.encoder.open()?; + let encoder = self.encoder.open(device)?; let tracker = &mut self.trackers; Ok((encoder, tracker)) } + + fn lock_encoder_impl(&mut self, lock: bool) -> Result<(), CommandEncoderError> { + match self.status { + CommandEncoderStatus::Recording => { + if lock { + self.status = CommandEncoderStatus::Locked; + } + Ok(()) + } + CommandEncoderStatus::Locked => { + // Any operation on a locked encoder is required to put it into the invalid/error state. + // See https://www.w3.org/TR/webgpu/#encoder-state-locked + self.encoder.discard(); + self.status = CommandEncoderStatus::Error; + Err(CommandEncoderError::Locked) + } + CommandEncoderStatus::Finished => Err(CommandEncoderError::NotRecording), + CommandEncoderStatus::Error => Err(CommandEncoderError::Invalid), + } + } + + /// Checks that the encoder is in the [`CommandEncoderStatus::Recording`] state. + fn check_recording(&mut self) -> Result<(), CommandEncoderError> { + self.lock_encoder_impl(false) + } + + /// Locks the encoder by putting it in the [`CommandEncoderStatus::Locked`] state. + /// + /// Call [`CommandBufferMutable::unlock_encoder`] to put the [`CommandBuffer`] back into the [`CommandEncoderStatus::Recording`] state. + fn lock_encoder(&mut self) -> Result<(), CommandEncoderError> { + self.lock_encoder_impl(true) + } + + /// Unlocks the [`CommandBuffer`] and puts it back into the [`CommandEncoderStatus::Recording`] state. + /// + /// This function is the counterpart to [`CommandBufferMutable::lock_encoder`]. + /// It is only valid to call this function if the encoder is in the [`CommandEncoderStatus::Locked`] state. + fn unlock_encoder(&mut self) -> Result<(), CommandEncoderError> { + match self.status { + CommandEncoderStatus::Recording => Err(CommandEncoderError::Invalid), + CommandEncoderStatus::Locked => { + self.status = CommandEncoderStatus::Recording; + Ok(()) + } + CommandEncoderStatus::Finished => Err(CommandEncoderError::Invalid), + CommandEncoderStatus::Error => Err(CommandEncoderError::Invalid), + } + } + + pub fn check_finished(&self) -> Result<(), CommandEncoderError> { + match self.status { + CommandEncoderStatus::Finished => Ok(()), + _ => Err(CommandEncoderError::Invalid), + } + } + + pub(crate) fn finish(&mut self, device: &Device) -> Result<(), CommandEncoderError> { + match self.status { + CommandEncoderStatus::Recording => { + if let Err(e) = self.encoder.close(device) { + Err(e.into()) + } else { + self.status = CommandEncoderStatus::Finished; + // Note: if we want to stop tracking the swapchain texture view, + // this is the place to do it. + Ok(()) + } + } + CommandEncoderStatus::Locked => { + self.encoder.discard(); + self.status = CommandEncoderStatus::Error; + Err(CommandEncoderError::Locked) + } + CommandEncoderStatus::Finished => Err(CommandEncoderError::NotRecording), + CommandEncoderStatus::Error => { + self.encoder.discard(); + Err(CommandEncoderError::Invalid) + } + } + } + + pub(crate) fn into_baked_commands(self) -> BakedCommands { + BakedCommands { + encoder: self.encoder.raw, + list: self.encoder.list, + trackers: self.trackers, + buffer_memory_init_actions: self.buffer_memory_init_actions, + texture_memory_actions: self.texture_memory_actions, + } + } + + pub(crate) fn destroy(mut self, device: &Device) { + self.encoder.discard(); + unsafe { + self.encoder.raw.reset_all(self.encoder.list); + } + unsafe { + device.raw().destroy_command_encoder(self.encoder.raw); + } + } } /// A buffer of commands to be submitted to the GPU for execution. @@ -313,22 +419,15 @@ pub struct CommandBuffer { /// This `Option` is populated when the command buffer is first created. /// When this is submitted, dropped, or destroyed, its contents are /// extracted into a [`BakedCommands`] by - /// [`CommandBuffer::extract_baked_commands`]. + /// [`CommandBufferMutable::into_baked_commands`]. pub(crate) data: Mutex>, } impl Drop for CommandBuffer { fn drop(&mut self) { resource_log!("Drop {}", self.error_ident()); - if self.data.lock().is_none() { - return; - } - let mut baked = self.extract_baked_commands(); - unsafe { - baked.encoder.reset_all(baked.list); - } - unsafe { - self.device.raw().destroy_command_encoder(baked.encoder); + if let Some(data) = self.data.lock().take() { + data.destroy(&self.device); } } } @@ -368,6 +467,15 @@ impl CommandBuffer { } } + pub(crate) fn new_invalid(device: &Arc, label: &Label) -> Self { + CommandBuffer { + device: device.clone(), + support_clear_texture: device.features.contains(wgt::Features::CLEAR_TEXTURE), + label: label.to_string(), + data: Mutex::new(rank::COMMAND_BUFFER_DATA, None), + } + } + pub(crate) fn insert_barriers_from_tracker( raw: &mut dyn hal::DynCommandEncoder, base: &mut Tracker, @@ -446,80 +554,19 @@ impl CommandBuffer { } impl CommandBuffer { - fn lock_encoder_impl(&self, lock: bool) -> Result<(), CommandEncoderError> { - let mut cmd_buf_data_guard = self.data.lock(); - let cmd_buf_data = cmd_buf_data_guard.as_mut().unwrap(); - match cmd_buf_data.status { - CommandEncoderStatus::Recording => { - if lock { - cmd_buf_data.status = CommandEncoderStatus::Locked; - } - Ok(()) - } - CommandEncoderStatus::Locked => { - // Any operation on a locked encoder is required to put it into the invalid/error state. - // See https://www.w3.org/TR/webgpu/#encoder-state-locked - cmd_buf_data.encoder.discard(); - cmd_buf_data.status = CommandEncoderStatus::Error; - Err(CommandEncoderError::Locked) - } - CommandEncoderStatus::Finished => Err(CommandEncoderError::NotRecording), - CommandEncoderStatus::Error => Err(CommandEncoderError::Invalid), - } + pub fn try_get<'a>( + &'a self, + ) -> Result, InvalidResourceError> { + let g = self.data.lock(); + crate::lock::MutexGuard::try_map(g, |data| data.as_mut()) + .map_err(|_| InvalidResourceError(self.error_ident())) } - /// Checks that the encoder is in the [`CommandEncoderStatus::Recording`] state. - fn check_recording(&self) -> Result<(), CommandEncoderError> { - self.lock_encoder_impl(false) - } - - /// Locks the encoder by putting it in the [`CommandEncoderStatus::Locked`] state. - /// - /// Call [`CommandBuffer::unlock_encoder`] to put the [`CommandBuffer`] back into the [`CommandEncoderStatus::Recording`] state. - fn lock_encoder(&self) -> Result<(), CommandEncoderError> { - self.lock_encoder_impl(true) - } - - /// Unlocks the [`CommandBuffer`] and puts it back into the [`CommandEncoderStatus::Recording`] state. - /// - /// This function is the counterpart to [`CommandBuffer::lock_encoder`]. - /// It is only valid to call this function if the encoder is in the [`CommandEncoderStatus::Locked`] state. - fn unlock_encoder(&self) -> Result<(), CommandEncoderError> { - let mut data_lock = self.data.lock(); - let status = &mut data_lock.as_mut().unwrap().status; - match *status { - CommandEncoderStatus::Recording => Err(CommandEncoderError::Invalid), - CommandEncoderStatus::Locked => { - *status = CommandEncoderStatus::Recording; - Ok(()) - } - CommandEncoderStatus::Finished => Err(CommandEncoderError::Invalid), - CommandEncoderStatus::Error => Err(CommandEncoderError::Invalid), - } - } - - pub fn is_finished(&self) -> bool { - match self.data.lock().as_ref().unwrap().status { - CommandEncoderStatus::Finished => true, - _ => false, - } - } - - pub(crate) fn extract_baked_commands(&mut self) -> BakedCommands { - let data = self.data.lock().take().unwrap(); - BakedCommands { - encoder: data.encoder.raw, - list: data.encoder.list, - trackers: data.trackers, - buffer_memory_init_actions: data.buffer_memory_init_actions, - texture_memory_actions: data.texture_memory_actions, - } - } - - pub(crate) fn from_arc_into_baked(self: Arc) -> BakedCommands { - let mut command_buffer = Arc::into_inner(self) - .expect("CommandBuffer cannot be destroyed because is still in use"); - command_buffer.extract_baked_commands() + pub fn try_take<'a>(&'a self) -> Result { + self.data + .lock() + .take() + .ok_or_else(|| InvalidResourceError(self.error_ident())) } } @@ -591,18 +638,10 @@ pub enum CommandEncoderError { #[error("Command encoder is locked by a previously created render/compute pass. Before recording any new commands, the pass must be ended.")] Locked, - #[error("QuerySet {0:?} for pass timestamp writes is invalid.")] - InvalidTimestampWritesQuerySetId(id::QuerySetId), - #[error("Attachment TextureViewId {0:?} is invalid")] - InvalidAttachmentId(id::TextureViewId), #[error(transparent)] InvalidColorAttachment(#[from] ColorAttachmentError), - #[error("Resolve attachment TextureViewId {0:?} is invalid")] - InvalidResolveTargetId(id::TextureViewId), - #[error("Depth stencil attachment TextureViewId {0:?} is invalid")] - InvalidDepthStencilAttachmentId(id::TextureViewId), - #[error("Occlusion QuerySetId {0:?} is invalid")] - InvalidOcclusionQuerySetId(id::QuerySetId), + #[error(transparent)] + InvalidResource(#[from] InvalidResourceError), } impl Global { @@ -615,34 +654,15 @@ impl Global { let hub = &self.hub; - let error = match hub.command_buffers.get(encoder_id.into_command_buffer_id()) { - Ok(cmd_buf) => { - let mut cmd_buf_data = cmd_buf.data.lock(); - let cmd_buf_data = cmd_buf_data.as_mut().unwrap(); - match cmd_buf_data.status { - CommandEncoderStatus::Recording => { - if let Err(e) = cmd_buf_data.encoder.close() { - Some(e.into()) - } else { - cmd_buf_data.status = CommandEncoderStatus::Finished; - //Note: if we want to stop tracking the swapchain texture view, - // this is the place to do it. - None - } - } - CommandEncoderStatus::Locked => { - cmd_buf_data.encoder.discard(); - cmd_buf_data.status = CommandEncoderStatus::Error; - Some(CommandEncoderError::Locked) - } - CommandEncoderStatus::Finished => Some(CommandEncoderError::NotRecording), - CommandEncoderStatus::Error => { - cmd_buf_data.encoder.discard(); - Some(CommandEncoderError::Invalid) - } - } - } - Err(_) => Some(CommandEncoderError::Invalid), + let cmd_buf = hub.command_buffers.get(encoder_id.into_command_buffer_id()); + + let error = match cmd_buf + .try_get() + .map_err(|e| e.into()) + .and_then(|mut cmd_buf_data| cmd_buf_data.finish(&cmd_buf.device)) + { + Ok(_) => None, + Err(e) => Some(e), }; (encoder_id.into_command_buffer_id(), error) @@ -658,23 +678,19 @@ impl Global { let hub = &self.hub; - let cmd_buf = match hub.command_buffers.get(encoder_id.into_command_buffer_id()) { - Ok(cmd_buf) => cmd_buf, - Err(_) => return Err(CommandEncoderError::Invalid), - }; - cmd_buf.check_recording()?; + let cmd_buf = hub.command_buffers.get(encoder_id.into_command_buffer_id()); + let mut cmd_buf_data = cmd_buf.try_get()?; + cmd_buf_data.check_recording()?; - let mut cmd_buf_data = cmd_buf.data.lock(); - let cmd_buf_data = cmd_buf_data.as_mut().unwrap(); #[cfg(feature = "trace")] if let Some(ref mut list) = cmd_buf_data.commands { list.push(TraceCommand::PushDebugGroup(label.to_string())); } - let cmd_buf_raw = cmd_buf_data.encoder.open()?; - if !self - .instance - .flags + let cmd_buf_raw = cmd_buf_data.encoder.open(&cmd_buf.device)?; + if !cmd_buf + .device + .instance_flags .contains(wgt::InstanceFlags::DISCARD_HAL_LABELS) { unsafe { @@ -694,26 +710,21 @@ impl Global { let hub = &self.hub; - let cmd_buf = match hub.command_buffers.get(encoder_id.into_command_buffer_id()) { - Ok(cmd_buf) => cmd_buf, - Err(_) => return Err(CommandEncoderError::Invalid), - }; - cmd_buf.check_recording()?; - - let mut cmd_buf_data = cmd_buf.data.lock(); - let cmd_buf_data = cmd_buf_data.as_mut().unwrap(); + let cmd_buf = hub.command_buffers.get(encoder_id.into_command_buffer_id()); + let mut cmd_buf_data = cmd_buf.try_get()?; + cmd_buf_data.check_recording()?; #[cfg(feature = "trace")] if let Some(ref mut list) = cmd_buf_data.commands { list.push(TraceCommand::InsertDebugMarker(label.to_string())); } - if !self - .instance - .flags + if !cmd_buf + .device + .instance_flags .contains(wgt::InstanceFlags::DISCARD_HAL_LABELS) { - let cmd_buf_raw = cmd_buf_data.encoder.open()?; + let cmd_buf_raw = cmd_buf_data.encoder.open(&cmd_buf.device)?; unsafe { cmd_buf_raw.insert_debug_marker(label); } @@ -730,24 +741,19 @@ impl Global { let hub = &self.hub; - let cmd_buf = match hub.command_buffers.get(encoder_id.into_command_buffer_id()) { - Ok(cmd_buf) => cmd_buf, - Err(_) => return Err(CommandEncoderError::Invalid), - }; - cmd_buf.check_recording()?; - - let mut cmd_buf_data = cmd_buf.data.lock(); - let cmd_buf_data = cmd_buf_data.as_mut().unwrap(); + let cmd_buf = hub.command_buffers.get(encoder_id.into_command_buffer_id()); + let mut cmd_buf_data = cmd_buf.try_get()?; + cmd_buf_data.check_recording()?; #[cfg(feature = "trace")] if let Some(ref mut list) = cmd_buf_data.commands { list.push(TraceCommand::PopDebugGroup); } - let cmd_buf_raw = cmd_buf_data.encoder.open()?; - if !self - .instance - .flags + let cmd_buf_raw = cmd_buf_data.encoder.open(&cmd_buf.device)?; + if !cmd_buf + .device + .instance_flags .contains(wgt::InstanceFlags::DISCARD_HAL_LABELS) { unsafe { @@ -805,7 +811,7 @@ impl Default for StateChange { #[derive(Debug)] struct BindGroupStateChange { - last_states: [StateChange; hal::MAX_BIND_GROUPS], + last_states: [StateChange>; hal::MAX_BIND_GROUPS], } impl BindGroupStateChange { @@ -817,7 +823,7 @@ impl BindGroupStateChange { fn set_and_check_redundant( &mut self, - bind_group_id: id::BindGroupId, + bind_group_id: Option, index: u32, dynamic_offsets: &mut Vec, offsets: &[wgt::DynamicOffset], diff --git a/third_party/rust/wgpu-core/src/command/query.rs b/third_party/rust/wgpu-core/src/command/query.rs index de5103ac88af9..d783721fb4626 100644 --- a/third_party/rust/wgpu-core/src/command/query.rs +++ b/third_party/rust/wgpu-core/src/command/query.rs @@ -7,7 +7,8 @@ use crate::{ id, init_tracker::MemoryInitKind, resource::{ - DestroyedResourceError, MissingBufferUsageError, ParentDevice, QuerySet, Trackable, + DestroyedResourceError, InvalidResourceError, MissingBufferUsageError, ParentDevice, + QuerySet, Trackable, }, track::{StatelessTracker, TrackerIndex}, FastHashMap, @@ -100,12 +101,10 @@ pub enum QueryError { Use(#[from] QueryUseError), #[error("Error encountered while trying to resolve a query")] Resolve(#[from] ResolveError), - #[error("BufferId {0:?} is invalid")] - InvalidBufferId(id::BufferId), #[error(transparent)] DestroyedResource(#[from] DestroyedResourceError), - #[error("QuerySetId {0:?} is invalid or destroyed")] - InvalidQuerySetId(id::QuerySetId), + #[error(transparent)] + InvalidResource(#[from] InvalidResourceError), } /// Error encountered while trying to use queries @@ -319,22 +318,16 @@ impl Global { ) -> Result<(), QueryError> { let hub = &self.hub; - let cmd_buf = match hub + let cmd_buf = hub .command_buffers - .get(command_encoder_id.into_command_buffer_id()) - { - Ok(cmd_buf) => cmd_buf, - Err(_) => return Err(CommandEncoderError::Invalid.into()), - }; - cmd_buf.check_recording()?; + .get(command_encoder_id.into_command_buffer_id()); + let mut cmd_buf_data = cmd_buf.try_get()?; + cmd_buf_data.check_recording()?; cmd_buf .device .require_features(wgt::Features::TIMESTAMP_QUERY_INSIDE_ENCODERS)?; - let mut cmd_buf_data = cmd_buf.data.lock(); - let cmd_buf_data = cmd_buf_data.as_mut().unwrap(); - #[cfg(feature = "trace")] if let Some(ref mut list) = cmd_buf_data.commands { list.push(TraceCommand::WriteTimestamp { @@ -343,20 +336,14 @@ impl Global { }); } - let encoder = &mut cmd_buf_data.encoder; - let tracker = &mut cmd_buf_data.trackers; - - let raw_encoder = encoder.open()?; + let raw_encoder = cmd_buf_data.encoder.open(&cmd_buf.device)?; - let query_set = hub - .query_sets - .get(query_set_id) - .map_err(|_| QueryError::InvalidQuerySetId(query_set_id))?; - - let query_set = tracker.query_sets.insert_single(query_set); + let query_set = hub.query_sets.get(query_set_id).get()?; query_set.validate_and_write_timestamp(raw_encoder, query_index, None)?; + cmd_buf_data.trackers.query_sets.insert_single(query_set); + Ok(()) } @@ -371,17 +358,11 @@ impl Global { ) -> Result<(), QueryError> { let hub = &self.hub; - let cmd_buf = match hub + let cmd_buf = hub .command_buffers - .get(command_encoder_id.into_command_buffer_id()) - { - Ok(cmd_buf) => cmd_buf, - Err(_) => return Err(CommandEncoderError::Invalid.into()), - }; - cmd_buf.check_recording()?; - - let mut cmd_buf_data = cmd_buf.data.lock(); - let cmd_buf_data = cmd_buf_data.as_mut().unwrap(); + .get(command_encoder_id.into_command_buffer_id()); + let mut cmd_buf_data = cmd_buf.try_get()?; + cmd_buf_data.check_recording()?; #[cfg(feature = "trace")] if let Some(ref mut list) = cmd_buf_data.commands { @@ -394,32 +375,20 @@ impl Global { }); } - let encoder = &mut cmd_buf_data.encoder; - let tracker = &mut cmd_buf_data.trackers; - let buffer_memory_init_actions = &mut cmd_buf_data.buffer_memory_init_actions; - let raw_encoder = encoder.open()?; - if destination_offset % wgt::QUERY_RESOLVE_BUFFER_ALIGNMENT != 0 { return Err(QueryError::Resolve(ResolveError::BufferOffsetAlignment)); } - let query_set = hub - .query_sets - .get(query_set_id) - .map_err(|_| QueryError::InvalidQuerySetId(query_set_id))?; - - let query_set = tracker.query_sets.insert_single(query_set); + let query_set = hub.query_sets.get(query_set_id).get()?; query_set.same_device_as(cmd_buf.as_ref())?; - let dst_buffer = hub - .buffers - .get(destination) - .map_err(|_| QueryError::InvalidBufferId(destination))?; + let dst_buffer = hub.buffers.get(destination).get()?; dst_buffer.same_device_as(cmd_buf.as_ref())?; - let dst_pending = tracker + let dst_pending = cmd_buf_data + .trackers .buffers .set_single(&dst_buffer, hal::BufferUses::COPY_DST); @@ -465,14 +434,16 @@ impl Global { } // TODO(https://github.com/gfx-rs/wgpu/issues/3993): Need to track initialization state. - buffer_memory_init_actions.extend(dst_buffer.initialization_status.read().create_action( - &dst_buffer, - buffer_start_offset..buffer_end_offset, - MemoryInitKind::ImplicitlyInitialized, - )); + cmd_buf_data.buffer_memory_init_actions.extend( + dst_buffer.initialization_status.read().create_action( + &dst_buffer, + buffer_start_offset..buffer_end_offset, + MemoryInitKind::ImplicitlyInitialized, + ), + ); let raw_dst_buffer = dst_buffer.try_raw(&snatch_guard)?; - + let raw_encoder = cmd_buf_data.encoder.open(&cmd_buf.device)?; unsafe { raw_encoder.transition_buffers(dst_barrier.as_slice()); raw_encoder.copy_query_results( @@ -484,6 +455,8 @@ impl Global { ); } + cmd_buf_data.trackers.query_sets.insert_single(query_set); + Ok(()) } } diff --git a/third_party/rust/wgpu-core/src/command/render.rs b/third_party/rust/wgpu-core/src/command/render.rs index e4d93b042ebb9..b6680333c2cb9 100644 --- a/third_party/rust/wgpu-core/src/command/render.rs +++ b/third_party/rust/wgpu-core/src/command/render.rs @@ -4,6 +4,7 @@ use crate::command::{ }; use crate::init_tracker::BufferInitTrackerAction; use crate::pipeline::RenderPipeline; +use crate::resource::InvalidResourceError; use crate::snatch::SnatchGuard; use crate::{ api_log, @@ -582,14 +583,6 @@ pub enum RenderPassErrorInner { InvalidParentEncoder, #[error("The format of the depth-stencil attachment ({0:?}) is not a depth-stencil format")] InvalidDepthStencilAttachmentFormat(wgt::TextureFormat), - #[error("Buffer {0:?} is invalid or destroyed")] - InvalidBuffer(id::BufferId), - #[error("Render pipeline {0:?} is invalid")] - InvalidPipeline(id::RenderPipelineId), - #[error("QuerySet {0:?} is invalid")] - InvalidQuerySet(id::QuerySetId), - #[error("Render bundle {0:?} is invalid")] - InvalidRenderBundle(id::RenderBundleId), #[error("The format of the {location} ({format:?}) is not resolvable")] UnsupportedResolveTargetFormat { location: AttachmentErrorLocation, @@ -635,8 +628,6 @@ pub enum RenderPassErrorInner { SurfaceTextureDropped, #[error("Not enough memory left for render pass")] OutOfMemory, - #[error("The bind group at index {0:?} is invalid")] - InvalidBindGroup(u32), #[error("Unable to clear non-present/read-only depth")] InvalidDepthOps, #[error("Unable to clear non-present/read-only stencil")] @@ -705,6 +696,8 @@ pub enum RenderPassErrorInner { DestroyedResource(#[from] DestroyedResourceError), #[error("The compute pass has already been ended and no further commands can be recorded")] PassEnded, + #[error(transparent)] + InvalidResource(#[from] InvalidResourceError), } impl From for RenderPassErrorInner { @@ -1362,14 +1355,10 @@ impl Global { channel, }) = color_attachment { - let view = texture_views - .get_owned(*view_id) - .map_err(|_| CommandEncoderError::InvalidAttachmentId(*view_id))?; + let view = texture_views.get(*view_id).get()?; let resolve_target = if let Some(resolve_target_id) = resolve_target { - let rt_arc = texture_views.get_owned(*resolve_target_id).map_err(|_| { - CommandEncoderError::InvalidResolveTargetId(*resolve_target_id) - })?; + let rt_arc = texture_views.get(*resolve_target_id).get()?; Some(rt_arc) } else { @@ -1390,13 +1379,7 @@ impl Global { arc_desc.depth_stencil_attachment = if let Some(depth_stencil_attachment) = desc.depth_stencil_attachment { - let view = texture_views - .get_owned(depth_stencil_attachment.view) - .map_err(|_| { - CommandEncoderError::InvalidDepthStencilAttachmentId( - depth_stencil_attachment.view, - ) - })?; + let view = texture_views.get(depth_stencil_attachment.view).get()?; Some(ArcRenderPassDepthStencilAttachment { view, @@ -1408,9 +1391,7 @@ impl Global { }; arc_desc.timestamp_writes = if let Some(tw) = desc.timestamp_writes { - let query_set = query_sets.get_owned(tw.query_set).map_err(|_| { - CommandEncoderError::InvalidTimestampWritesQuerySetId(tw.query_set) - })?; + let query_set = query_sets.get(tw.query_set).get()?; Some(ArcPassTimestampWrites { query_set, @@ -1423,9 +1404,7 @@ impl Global { arc_desc.occlusion_query_set = if let Some(occlusion_query_set) = desc.occlusion_query_set { - let query_set = query_sets.get_owned(occlusion_query_set).map_err(|_| { - CommandEncoderError::InvalidOcclusionQuerySetId(occlusion_query_set) - })?; + let query_set = query_sets.get(occlusion_query_set).get()?; Some(query_set) } else { @@ -1446,12 +1425,13 @@ impl Global { let make_err = |e, arc_desc| (RenderPass::new(None, arc_desc), Some(e)); - let cmd_buf = match hub.command_buffers.get(encoder_id.into_command_buffer_id()) { - Ok(cmd_buf) => cmd_buf, - Err(_) => return make_err(CommandEncoderError::Invalid, arc_desc), - }; + let cmd_buf = hub.command_buffers.get(encoder_id.into_command_buffer_id()); - match cmd_buf.lock_encoder() { + match cmd_buf + .try_get() + .map_err(|e| e.into()) + .and_then(|mut cmd_buf_data| cmd_buf_data.lock_encoder()) + { Ok(_) => {} Err(e) => return make_err(e, arc_desc), }; @@ -1461,6 +1441,8 @@ impl Global { (RenderPass::new(Some(cmd_buf), arc_desc), err) } + /// Note that this differs from [`Self::render_pass_end`], it will + /// create a new pass, replay the commands and end the pass. #[doc(hidden)] #[cfg(any(feature = "serde", feature = "replay"))] pub fn render_pass_end_with_unresolved_commands( @@ -1476,15 +1458,11 @@ impl Global { #[cfg(feature = "trace")] { - let hub = &self.hub; - - let cmd_buf = match hub.command_buffers.get(encoder_id.into_command_buffer_id()) { - Ok(cmd_buf) => cmd_buf, - Err(_) => return Err(CommandEncoderError::Invalid).map_pass_err(pass_scope)?, - }; - - let mut cmd_buf_data = cmd_buf.data.lock(); - let cmd_buf_data = cmd_buf_data.as_mut().unwrap(); + let cmd_buf = self + .hub + .command_buffers + .get(encoder_id.into_command_buffer_id()); + let mut cmd_buf_data = cmd_buf.try_get().map_pass_err(pass_scope)?; if let Some(ref mut list) = cmd_buf_data.commands { list.push(crate::device::trace::Command::RunRenderPass { @@ -1528,29 +1506,26 @@ impl Global { }); }; - let hub = &self.hub; render_pass.base = Some(BasePass { label, - commands: super::RenderCommand::resolve_render_command_ids(hub, &commands)?, + commands: super::RenderCommand::resolve_render_command_ids(&self.hub, &commands)?, dynamic_offsets, string_data, push_constant_data, }); - if let Some(err) = encoder_error { - Err(RenderPassError { - scope: pass_scope, - inner: err.into(), - }) - } else { - self.render_pass_end(&mut render_pass) - } + self.render_pass_end(&mut render_pass) } - #[doc(hidden)] pub fn render_pass_end(&self, pass: &mut RenderPass) -> Result<(), RenderPassError> { let pass_scope = PassErrorScope::Pass; + let cmd_buf = pass + .parent + .as_ref() + .ok_or(RenderPassErrorInner::InvalidParentEncoder) + .map_pass_err(pass_scope)?; + let base = pass .base .take() @@ -1562,20 +1537,16 @@ impl Global { base.label.as_deref().unwrap_or("") ); - let Some(cmd_buf) = pass.parent.as_ref() else { - return Err(RenderPassErrorInner::InvalidParentEncoder).map_pass_err(pass_scope); - }; - cmd_buf.unlock_encoder().map_pass_err(pass_scope)?; - - let hal_label = hal_label(base.label.as_deref(), self.instance.flags); + let mut cmd_buf_data = cmd_buf.try_get().map_pass_err(pass_scope)?; + cmd_buf_data.unlock_encoder().map_pass_err(pass_scope)?; + let cmd_buf_data = &mut *cmd_buf_data; let device = &cmd_buf.device; let snatch_guard = &device.snatchable_lock.read(); - let (scope, pending_discard_init_fixups) = { - let mut cmd_buf_data = cmd_buf.data.lock(); - let cmd_buf_data = cmd_buf_data.as_mut().unwrap(); + let hal_label = hal_label(base.label.as_deref(), device.instance_flags); + let (scope, pending_discard_init_fixups) = { device.check_is_valid().map_pass_err(pass_scope)?; let encoder = &mut cmd_buf_data.encoder; @@ -1588,10 +1559,12 @@ impl Global { // We automatically keep extending command buffers over time, and because // we want to insert a command buffer _before_ what we're about to record, // we need to make sure to close the previous one. - encoder.close().map_pass_err(pass_scope)?; + encoder.close(&cmd_buf.device).map_pass_err(pass_scope)?; // We will reset this to `Recording` if we succeed, acts as a fail-safe. *status = CommandEncoderStatus::Error; - encoder.open_pass(hal_label).map_pass_err(pass_scope)?; + encoder + .open_pass(hal_label, &cmd_buf.device) + .map_pass_err(pass_scope)?; let info = RenderPassInfo::start( device, @@ -1894,19 +1867,16 @@ impl Global { .finish(state.raw_encoder, state.snatch_guard) .map_pass_err(pass_scope)?; - encoder.close().map_pass_err(pass_scope)?; + encoder.close(&cmd_buf.device).map_pass_err(pass_scope)?; (trackers, pending_discard_init_fixups) }; - let mut cmd_buf_data = cmd_buf.data.lock(); - let cmd_buf_data = cmd_buf_data.as_mut().unwrap(); - let encoder = &mut cmd_buf_data.encoder; let status = &mut cmd_buf_data.status; let tracker = &mut cmd_buf_data.trackers; { - let transit = encoder.open().map_pass_err(pass_scope)?; + let transit = encoder.open(&cmd_buf.device).map_pass_err(pass_scope)?; fixup_discarded_surfaces( pending_discard_init_fixups.into_iter(), @@ -1922,7 +1892,9 @@ impl Global { } *status = CommandEncoderStatus::Recording; - encoder.close_and_swap().map_pass_err(pass_scope)?; + encoder + .close_and_swap(&cmd_buf.device) + .map_pass_err(pass_scope)?; Ok(()) } @@ -1934,12 +1906,16 @@ fn set_bind_group( dynamic_offsets: &[DynamicOffset], index: u32, num_dynamic_offsets: usize, - bind_group: Arc, + bind_group: Option>, ) -> Result<(), RenderPassErrorInner> { - api_log!( - "RenderPass::set_bind_group {index} {}", - bind_group.error_ident() - ); + if bind_group.is_none() { + api_log!("RenderPass::set_bind_group {index} None"); + } else { + api_log!( + "RenderPass::set_bind_group {index} {}", + bind_group.as_ref().unwrap().error_ident() + ); + } let max_bind_groups = state.device.limits.max_bind_groups; if index >= max_bind_groups { @@ -1957,6 +1933,12 @@ fn set_bind_group( ); state.dynamic_offset_count += num_dynamic_offsets; + if bind_group.is_none() { + // TODO: Handle bind_group None. + return Ok(()); + } + + let bind_group = bind_group.unwrap(); let bind_group = state.tracker.bind_groups.insert_single(bind_group); bind_group.same_device_as(cmd_buf.as_ref())?; @@ -1999,7 +1981,7 @@ fn set_bind_group( state.raw_encoder.set_bind_group( pipeline_layout, index + i as u32, - raw_bg, + Some(raw_bg), &e.dynamic_offsets, ); } @@ -2073,7 +2055,7 @@ fn set_pipeline( state.raw_encoder.set_bind_group( pipeline.layout.raw(), start_index as u32 + i as u32, - raw_bg, + Some(raw_bg), &e.dynamic_offsets, ); } @@ -2760,11 +2742,7 @@ impl Global { buffer_id: id::Id, ) -> Result, RenderPassError> { let hub = &self.hub; - let buffer = hub - .buffers - .get(buffer_id) - .map_err(|_| RenderPassErrorInner::InvalidBuffer(buffer_id)) - .map_pass_err(scope)?; + let buffer = hub.buffers.get(buffer_id).get().map_pass_err(scope)?; Ok(buffer) } @@ -2775,11 +2753,7 @@ impl Global { query_set_id: id::Id, ) -> Result, RenderPassError> { let hub = &self.hub; - let query_set = hub - .query_sets - .get(query_set_id) - .map_err(|_| RenderPassErrorInner::InvalidQuerySet(query_set_id)) - .map_pass_err(scope)?; + let query_set = hub.query_sets.get(query_set_id).get().map_pass_err(scope)?; Ok(query_set) } @@ -2788,7 +2762,7 @@ impl Global { &self, pass: &mut RenderPass, index: u32, - bind_group_id: id::BindGroupId, + bind_group_id: Option, offsets: &[DynamicOffset], ) -> Result<(), RenderPassError> { let scope = PassErrorScope::SetBindGroup; @@ -2808,12 +2782,18 @@ impl Global { return Ok(()); } - let hub = &self.hub; - let bind_group = hub - .bind_groups - .get(bind_group_id) - .map_err(|_| RenderPassErrorInner::InvalidBindGroup(index)) - .map_pass_err(scope)?; + let mut bind_group = None; + if bind_group_id.is_some() { + let bind_group_id = bind_group_id.unwrap(); + + let hub = &self.hub; + let bg = hub + .bind_groups + .get(bind_group_id) + .get() + .map_pass_err(scope)?; + bind_group = Some(bg); + } base.commands.push(ArcRenderCommand::SetBindGroup { index, @@ -2843,7 +2823,7 @@ impl Global { let pipeline = hub .render_pipelines .get(pipeline_id) - .map_err(|_| RenderPassErrorInner::InvalidPipeline(pipeline_id)) + .get() .map_pass_err(scope)?; base.commands.push(ArcRenderCommand::SetPipeline(pipeline)); @@ -3154,23 +3134,11 @@ impl Global { }; let base = pass.base_mut(scope)?; - // Don't use resolve_render_pass_buffer_id here, because we don't want to take the read-lock twice. - let hub = &self.hub; - let buffers = hub.buffers.read(); - let buffer = buffers - .get_owned(buffer_id) - .map_err(|_| RenderPassErrorInner::InvalidBuffer(buffer_id)) - .map_pass_err(scope)?; - let count_buffer = buffers - .get_owned(buffer_id) - .map_err(|_| RenderPassErrorInner::InvalidBuffer(count_buffer_id)) - .map_pass_err(scope)?; - base.commands .push(ArcRenderCommand::MultiDrawIndirectCount { - buffer, + buffer: self.resolve_render_pass_buffer_id(scope, buffer_id)?, offset, - count_buffer, + count_buffer: self.resolve_render_pass_buffer_id(scope, count_buffer_id)?, count_buffer_offset, max_count, indexed: false, @@ -3194,24 +3162,11 @@ impl Global { }; let base = pass.base_mut(scope)?; - // Don't use resolve_render_pass_buffer_id here, because we don't want to take the read-lock twice. - let hub = &self.hub; - let buffers = hub.buffers.read(); - let buffer = buffers - .get_owned(buffer_id) - .map_err(|_| RenderPassErrorInner::InvalidBuffer(buffer_id)) - .map_pass_err(scope)?; - - let count_buffer = buffers - .get_owned(buffer_id) - .map_err(|_| RenderPassErrorInner::InvalidBuffer(count_buffer_id)) - .map_pass_err(scope)?; - base.commands .push(ArcRenderCommand::MultiDrawIndirectCount { - buffer, + buffer: self.resolve_render_pass_buffer_id(scope, buffer_id)?, offset, - count_buffer, + count_buffer: self.resolve_render_pass_buffer_id(scope, count_buffer_id)?, count_buffer_offset, max_count, indexed: true, @@ -3355,10 +3310,7 @@ impl Global { let bundles = hub.render_bundles.read(); for &bundle_id in render_bundle_ids { - let bundle = bundles - .get_owned(bundle_id) - .map_err(|_| RenderPassErrorInner::InvalidRenderBundle(bundle_id)) - .map_pass_err(scope)?; + let bundle = bundles.get(bundle_id).get().map_pass_err(scope)?; base.commands.push(ArcRenderCommand::ExecuteBundle(bundle)); } diff --git a/third_party/rust/wgpu-core/src/command/render_command.rs b/third_party/rust/wgpu-core/src/command/render_command.rs index 891ee3cfbc8e3..d4e2689d27e78 100644 --- a/third_party/rust/wgpu-core/src/command/render_command.rs +++ b/third_party/rust/wgpu-core/src/command/render_command.rs @@ -17,7 +17,7 @@ pub enum RenderCommand { SetBindGroup { index: u32, num_dynamic_offsets: usize, - bind_group_id: id::BindGroupId, + bind_group_id: Option, }, SetPipeline(id::RenderPipelineId), SetIndexBuffer { @@ -129,9 +129,7 @@ impl RenderCommand { hub: &crate::hub::Hub, commands: &[RenderCommand], ) -> Result, super::RenderPassError> { - use super::{ - DrawKind, PassErrorScope, RenderCommandError, RenderPassError, RenderPassErrorInner, - }; + use super::{DrawKind, PassErrorScope, RenderPassError}; let buffers_guard = hub.buffers.read(); let bind_group_guard = hub.bind_groups.read(); @@ -139,240 +137,253 @@ impl RenderCommand { let pipelines_guard = hub.render_pipelines.read(); let render_bundles_guard = hub.render_bundles.read(); - let resolved_commands: Vec = commands - .iter() - .map(|c| -> Result { - Ok(match *c { - RenderCommand::SetBindGroup { - index, - num_dynamic_offsets, - bind_group_id, - } => ArcRenderCommand::SetBindGroup { - index, - num_dynamic_offsets, - bind_group: bind_group_guard.get_owned(bind_group_id).map_err(|_| { - RenderPassError { - scope: PassErrorScope::SetBindGroup, - inner: RenderPassErrorInner::InvalidBindGroup(index), + let resolved_commands: Vec = + commands + .iter() + .map(|c| -> Result { + Ok(match *c { + RenderCommand::SetBindGroup { + index, + num_dynamic_offsets, + bind_group_id, + } => { + if bind_group_id.is_none() { + return Ok(ArcRenderCommand::SetBindGroup { + index, + num_dynamic_offsets, + bind_group: None, + }); } - })?, - }, - - RenderCommand::SetPipeline(pipeline_id) => ArcRenderCommand::SetPipeline( - pipelines_guard - .get_owned(pipeline_id) - .map_err(|_| RenderPassError { - scope: PassErrorScope::SetPipelineRender, - inner: RenderCommandError::InvalidPipelineId(pipeline_id).into(), - })?, - ), - - RenderCommand::SetPushConstant { - offset, - size_bytes, - values_offset, - stages, - } => ArcRenderCommand::SetPushConstant { - offset, - size_bytes, - values_offset, - stages, - }, - - RenderCommand::PushDebugGroup { color, len } => { - ArcRenderCommand::PushDebugGroup { color, len } - } - - RenderCommand::PopDebugGroup => ArcRenderCommand::PopDebugGroup, - - RenderCommand::InsertDebugMarker { color, len } => { - ArcRenderCommand::InsertDebugMarker { color, len } - } - - RenderCommand::WriteTimestamp { - query_set_id, - query_index, - } => ArcRenderCommand::WriteTimestamp { - query_set: query_set_guard.get_owned(query_set_id).map_err(|_| { - RenderPassError { - scope: PassErrorScope::WriteTimestamp, - inner: RenderPassErrorInner::InvalidQuerySet(query_set_id), - } - })?, - query_index, - }, - - RenderCommand::BeginPipelineStatisticsQuery { - query_set_id, - query_index, - } => ArcRenderCommand::BeginPipelineStatisticsQuery { - query_set: query_set_guard.get_owned(query_set_id).map_err(|_| { - RenderPassError { - scope: PassErrorScope::BeginPipelineStatisticsQuery, - inner: RenderPassErrorInner::InvalidQuerySet(query_set_id), - } - })?, - query_index, - }, - - RenderCommand::EndPipelineStatisticsQuery => { - ArcRenderCommand::EndPipelineStatisticsQuery - } - - RenderCommand::SetIndexBuffer { - buffer_id, - index_format, - offset, - size, - } => ArcRenderCommand::SetIndexBuffer { - buffer: buffers_guard.get_owned(buffer_id).map_err(|_| { - RenderPassError { - scope: PassErrorScope::SetIndexBuffer, - inner: RenderCommandError::InvalidBufferId(buffer_id).into(), - } - })?, - index_format, - offset, - size, - }, - - RenderCommand::SetVertexBuffer { - slot, - buffer_id, - offset, - size, - } => ArcRenderCommand::SetVertexBuffer { - slot, - buffer: buffers_guard.get_owned(buffer_id).map_err(|_| { - RenderPassError { - scope: PassErrorScope::SetVertexBuffer, - inner: RenderCommandError::InvalidBufferId(buffer_id).into(), - } - })?, - offset, - size, - }, - - RenderCommand::SetBlendConstant(color) => { - ArcRenderCommand::SetBlendConstant(color) - } - - RenderCommand::SetStencilReference(reference) => { - ArcRenderCommand::SetStencilReference(reference) - } - - RenderCommand::SetViewport { - rect, - depth_min, - depth_max, - } => ArcRenderCommand::SetViewport { - rect, - depth_min, - depth_max, - }, - - RenderCommand::SetScissor(scissor) => ArcRenderCommand::SetScissor(scissor), - - RenderCommand::Draw { - vertex_count, - instance_count, - first_vertex, - first_instance, - } => ArcRenderCommand::Draw { - vertex_count, - instance_count, - first_vertex, - first_instance, - }, - - RenderCommand::DrawIndexed { - index_count, - instance_count, - first_index, - base_vertex, - first_instance, - } => ArcRenderCommand::DrawIndexed { - index_count, - instance_count, - first_index, - base_vertex, - first_instance, - }, - - RenderCommand::MultiDrawIndirect { - buffer_id, - offset, - count, - indexed, - } => ArcRenderCommand::MultiDrawIndirect { - buffer: buffers_guard.get_owned(buffer_id).map_err(|_| { - RenderPassError { - scope: PassErrorScope::Draw { - kind: if count.is_some() { - DrawKind::MultiDrawIndirect - } else { - DrawKind::DrawIndirect - }, - indexed, - }, - inner: RenderCommandError::InvalidBufferId(buffer_id).into(), + + let bind_group_id = bind_group_id.unwrap(); + let bg = bind_group_guard.get(bind_group_id).get().map_err(|e| { + RenderPassError { + scope: PassErrorScope::SetBindGroup, + inner: e.into(), + } + })?; + + ArcRenderCommand::SetBindGroup { + index, + num_dynamic_offsets, + bind_group: Some(bg), } - })?, - offset, - count, - indexed, - }, - - RenderCommand::MultiDrawIndirectCount { - buffer_id, - offset, - count_buffer_id, - count_buffer_offset, - max_count, - indexed, - } => { - let scope = PassErrorScope::Draw { - kind: DrawKind::MultiDrawIndirectCount, + } + + RenderCommand::SetPipeline(pipeline_id) => ArcRenderCommand::SetPipeline( + pipelines_guard.get(pipeline_id).get().map_err(|e| { + RenderPassError { + scope: PassErrorScope::SetPipelineRender, + inner: e.into(), + } + })?, + ), + + RenderCommand::SetPushConstant { + offset, + size_bytes, + values_offset, + stages, + } => ArcRenderCommand::SetPushConstant { + offset, + size_bytes, + values_offset, + stages, + }, + + RenderCommand::PushDebugGroup { color, len } => { + ArcRenderCommand::PushDebugGroup { color, len } + } + + RenderCommand::PopDebugGroup => ArcRenderCommand::PopDebugGroup, + + RenderCommand::InsertDebugMarker { color, len } => { + ArcRenderCommand::InsertDebugMarker { color, len } + } + + RenderCommand::WriteTimestamp { + query_set_id, + query_index, + } => ArcRenderCommand::WriteTimestamp { + query_set: query_set_guard.get(query_set_id).get().map_err(|e| { + RenderPassError { + scope: PassErrorScope::WriteTimestamp, + inner: e.into(), + } + })?, + query_index, + }, + + RenderCommand::BeginPipelineStatisticsQuery { + query_set_id, + query_index, + } => ArcRenderCommand::BeginPipelineStatisticsQuery { + query_set: query_set_guard.get(query_set_id).get().map_err(|e| { + RenderPassError { + scope: PassErrorScope::BeginPipelineStatisticsQuery, + inner: e.into(), + } + })?, + query_index, + }, + + RenderCommand::EndPipelineStatisticsQuery => { + ArcRenderCommand::EndPipelineStatisticsQuery + } + + RenderCommand::SetIndexBuffer { + buffer_id, + index_format, + offset, + size, + } => ArcRenderCommand::SetIndexBuffer { + buffer: buffers_guard.get(buffer_id).get().map_err(|e| { + RenderPassError { + scope: PassErrorScope::SetIndexBuffer, + inner: e.into(), + } + })?, + index_format, + offset, + size, + }, + + RenderCommand::SetVertexBuffer { + slot, + buffer_id, + offset, + size, + } => ArcRenderCommand::SetVertexBuffer { + slot, + buffer: buffers_guard.get(buffer_id).get().map_err(|e| { + RenderPassError { + scope: PassErrorScope::SetVertexBuffer, + inner: e.into(), + } + })?, + offset, + size, + }, + + RenderCommand::SetBlendConstant(color) => { + ArcRenderCommand::SetBlendConstant(color) + } + + RenderCommand::SetStencilReference(reference) => { + ArcRenderCommand::SetStencilReference(reference) + } + + RenderCommand::SetViewport { + rect, + depth_min, + depth_max, + } => ArcRenderCommand::SetViewport { + rect, + depth_min, + depth_max, + }, + + RenderCommand::SetScissor(scissor) => ArcRenderCommand::SetScissor(scissor), + + RenderCommand::Draw { + vertex_count, + instance_count, + first_vertex, + first_instance, + } => ArcRenderCommand::Draw { + vertex_count, + instance_count, + first_vertex, + first_instance, + }, + + RenderCommand::DrawIndexed { + index_count, + instance_count, + first_index, + base_vertex, + first_instance, + } => ArcRenderCommand::DrawIndexed { + index_count, + instance_count, + first_index, + base_vertex, + first_instance, + }, + + RenderCommand::MultiDrawIndirect { + buffer_id, + offset, + count, indexed, - }; - ArcRenderCommand::MultiDrawIndirectCount { - buffer: buffers_guard.get_owned(buffer_id).map_err(|_| { + } => ArcRenderCommand::MultiDrawIndirect { + buffer: buffers_guard.get(buffer_id).get().map_err(|e| { RenderPassError { - scope, - inner: RenderCommandError::InvalidBufferId(buffer_id).into(), + scope: PassErrorScope::Draw { + kind: if count.is_some() { + DrawKind::MultiDrawIndirect + } else { + DrawKind::DrawIndirect + }, + indexed, + }, + inner: e.into(), } })?, offset, - count_buffer: buffers_guard.get_owned(count_buffer_id).map_err( - |_| RenderPassError { - scope, - inner: RenderCommandError::InvalidBufferId(count_buffer_id) - .into(), - }, - )?, + count, + indexed, + }, + + RenderCommand::MultiDrawIndirectCount { + buffer_id, + offset, + count_buffer_id, count_buffer_offset, max_count, indexed, + } => { + let scope = PassErrorScope::Draw { + kind: DrawKind::MultiDrawIndirectCount, + indexed, + }; + ArcRenderCommand::MultiDrawIndirectCount { + buffer: buffers_guard.get(buffer_id).get().map_err(|e| { + RenderPassError { + scope, + inner: e.into(), + } + })?, + offset, + count_buffer: buffers_guard.get(count_buffer_id).get().map_err( + |e| RenderPassError { + scope, + inner: e.into(), + }, + )?, + count_buffer_offset, + max_count, + indexed, + } } - } - RenderCommand::BeginOcclusionQuery { query_index } => { - ArcRenderCommand::BeginOcclusionQuery { query_index } - } + RenderCommand::BeginOcclusionQuery { query_index } => { + ArcRenderCommand::BeginOcclusionQuery { query_index } + } - RenderCommand::EndOcclusionQuery => ArcRenderCommand::EndOcclusionQuery, + RenderCommand::EndOcclusionQuery => ArcRenderCommand::EndOcclusionQuery, - RenderCommand::ExecuteBundle(bundle) => ArcRenderCommand::ExecuteBundle( - render_bundles_guard - .get_owned(bundle) - .map_err(|_| RenderPassError { - scope: PassErrorScope::ExecuteBundle, - inner: RenderCommandError::InvalidRenderBundle(bundle).into(), + RenderCommand::ExecuteBundle(bundle) => ArcRenderCommand::ExecuteBundle( + render_bundles_guard.get(bundle).get().map_err(|e| { + RenderPassError { + scope: PassErrorScope::ExecuteBundle, + inner: e.into(), + } })?, - ), + ), + }) }) - }) - .collect::, RenderPassError>>()?; + .collect::, RenderPassError>>()?; Ok(resolved_commands) } } @@ -384,7 +395,7 @@ pub enum ArcRenderCommand { SetBindGroup { index: u32, num_dynamic_offsets: usize, - bind_group: Arc, + bind_group: Option>, }, SetPipeline(Arc), SetIndexBuffer { @@ -464,11 +475,13 @@ pub enum ArcRenderCommand { indexed: bool, }, PushDebugGroup { + #[cfg_attr(target_os = "emscripten", allow(dead_code))] color: u32, len: usize, }, PopDebugGroup, InsertDebugMarker { + #[cfg_attr(target_os = "emscripten", allow(dead_code))] color: u32, len: usize, }, diff --git a/third_party/rust/wgpu-core/src/command/transfer.rs b/third_party/rust/wgpu-core/src/command/transfer.rs index de5ef9ed8486a..72eae50c255e4 100644 --- a/third_party/rust/wgpu-core/src/command/transfer.rs +++ b/third_party/rust/wgpu-core/src/command/transfer.rs @@ -12,11 +12,11 @@ use crate::{ TextureInitTrackerAction, }, resource::{ - DestroyedResourceError, MissingBufferUsageError, MissingTextureUsageError, ParentDevice, - Texture, TextureErrorDimension, + DestroyedResourceError, InvalidResourceError, MissingBufferUsageError, + MissingTextureUsageError, ParentDevice, Texture, TextureErrorDimension, }, snatch::SnatchGuard, - track::{TextureSelector, Tracker}, + track::TextureSelector, }; use arrayvec::ArrayVec; @@ -25,7 +25,7 @@ use wgt::{BufferAddress, BufferUsages, Extent3d, TextureUsages}; use std::sync::Arc; -use super::{memory_init::CommandBufferTextureMemoryActions, ClearError, CommandEncoder}; +use super::{ClearError, CommandBufferMutable}; pub type ImageCopyBuffer = wgt::ImageCopyBuffer; pub type ImageCopyTexture = wgt::ImageCopyTexture; @@ -41,10 +41,6 @@ pub enum CopySide { #[derive(Clone, Debug, Error)] #[non_exhaustive] pub enum TransferError { - #[error("BufferId {0:?} is invalid")] - InvalidBufferId(BufferId), - #[error("TextureId {0:?} is invalid")] - InvalidTextureId(TextureId), #[error("Source and destination cannot be the same buffer")] SameSourceDestinationBuffer, #[error(transparent)] @@ -150,6 +146,8 @@ pub enum CopyError { Transfer(#[from] TransferError), #[error(transparent)] DestroyedResource(#[from] DestroyedResourceError), + #[error(transparent)] + InvalidResource(#[from] InvalidResourceError), } impl From for CopyError { @@ -408,9 +406,7 @@ pub(crate) fn validate_texture_copy_range( fn handle_texture_init( init_kind: MemoryInitKind, - encoder: &mut CommandEncoder, - trackers: &mut Tracker, - texture_memory_actions: &mut CommandBufferTextureMemoryActions, + cmd_buf_data: &mut CommandBufferMutable, device: &Device, copy_texture: &ImageCopyTexture, copy_size: &Extent3d, @@ -428,11 +424,13 @@ fn handle_texture_init( }; // Register the init action. - let immediate_inits = texture_memory_actions.register_init_action(&{ init_action }); + let immediate_inits = cmd_buf_data + .texture_memory_actions + .register_init_action(&{ init_action }); // In rare cases we may need to insert an init operation immediately onto the command buffer. if !immediate_inits.is_empty() { - let cmd_buf_raw = encoder.open()?; + let cmd_buf_raw = cmd_buf_data.encoder.open(device)?; for init in immediate_inits { clear_texture( &init.texture, @@ -441,7 +439,7 @@ fn handle_texture_init( layer_range: init.layer..(init.layer + 1), }, cmd_buf_raw, - &mut trackers.textures, + &mut cmd_buf_data.trackers.textures, &device.alignments, device.zero_buffer.as_ref(), snatch_guard, @@ -457,9 +455,7 @@ fn handle_texture_init( /// Ensure the source texture of a transfer is in the right initialization /// state, and record the state for after the transfer operation. fn handle_src_texture_init( - encoder: &mut CommandEncoder, - trackers: &mut Tracker, - texture_memory_actions: &mut CommandBufferTextureMemoryActions, + cmd_buf_data: &mut CommandBufferMutable, device: &Device, source: &ImageCopyTexture, copy_size: &Extent3d, @@ -468,9 +464,7 @@ fn handle_src_texture_init( ) -> Result<(), TransferError> { handle_texture_init( MemoryInitKind::NeedsInitializedMemory, - encoder, - trackers, - texture_memory_actions, + cmd_buf_data, device, source, copy_size, @@ -485,9 +479,7 @@ fn handle_src_texture_init( /// Ensure the destination texture of a transfer is in the right initialization /// state, and record the state for after the transfer operation. fn handle_dst_texture_init( - encoder: &mut CommandEncoder, - trackers: &mut Tracker, - texture_memory_actions: &mut CommandBufferTextureMemoryActions, + cmd_buf_data: &mut CommandBufferMutable, device: &Device, destination: &ImageCopyTexture, copy_size: &Extent3d, @@ -510,9 +502,7 @@ fn handle_dst_texture_init( handle_texture_init( dst_init_kind, - encoder, - trackers, - texture_memory_actions, + cmd_buf_data, device, destination, copy_size, @@ -542,17 +532,11 @@ impl Global { } let hub = &self.hub; - let cmd_buf = match hub + let cmd_buf = hub .command_buffers - .get(command_encoder_id.into_command_buffer_id()) - { - Ok(cmd_buf) => cmd_buf, - Err(_) => return Err(CommandEncoderError::Invalid.into()), - }; - cmd_buf.check_recording()?; - - let mut cmd_buf_data = cmd_buf.data.lock(); - let cmd_buf_data = cmd_buf_data.as_mut().unwrap(); + .get(command_encoder_id.into_command_buffer_id()); + let mut cmd_buf_data = cmd_buf.try_get()?; + cmd_buf_data.check_recording()?; let device = &cmd_buf.device; device.check_is_valid()?; @@ -570,10 +554,7 @@ impl Global { let snatch_guard = device.snatchable_lock.read(); - let src_buffer = hub - .buffers - .get(source) - .map_err(|_| TransferError::InvalidBufferId(source))?; + let src_buffer = hub.buffers.get(source).get()?; src_buffer.same_device_as(cmd_buf.as_ref())?; @@ -589,10 +570,7 @@ impl Global { // expecting only a single barrier let src_barrier = src_pending.map(|pending| pending.into_hal(&src_buffer, &snatch_guard)); - let dst_buffer = hub - .buffers - .get(destination) - .map_err(|_| TransferError::InvalidBufferId(destination))?; + let dst_buffer = hub.buffers.get(destination).get()?; dst_buffer.same_device_as(cmd_buf.as_ref())?; @@ -684,7 +662,7 @@ impl Global { dst_offset: destination_offset, size: wgt::BufferSize::new(size).unwrap(), }; - let cmd_buf_raw = cmd_buf_data.encoder.open()?; + let cmd_buf_raw = cmd_buf_data.encoder.open(&cmd_buf.device)?; let barriers = src_barrier .into_iter() .chain(dst_barrier) @@ -712,21 +690,15 @@ impl Global { let hub = &self.hub; - let cmd_buf = match hub + let cmd_buf = hub .command_buffers - .get(command_encoder_id.into_command_buffer_id()) - { - Ok(cmd_buf) => cmd_buf, - Err(_) => return Err(CommandEncoderError::Invalid.into()), - }; - cmd_buf.check_recording()?; + .get(command_encoder_id.into_command_buffer_id()); + let mut cmd_buf_data = cmd_buf.try_get()?; + cmd_buf_data.check_recording()?; let device = &cmd_buf.device; device.check_is_valid()?; - let mut cmd_buf_data = cmd_buf.data.lock(); - let cmd_buf_data = cmd_buf_data.as_mut().unwrap(); - #[cfg(feature = "trace")] if let Some(ref mut list) = cmd_buf_data.commands { list.push(TraceCommand::CopyBufferToTexture { @@ -736,20 +708,12 @@ impl Global { }); } - let encoder = &mut cmd_buf_data.encoder; - let tracker = &mut cmd_buf_data.trackers; - let buffer_memory_init_actions = &mut cmd_buf_data.buffer_memory_init_actions; - let texture_memory_actions = &mut cmd_buf_data.texture_memory_actions; - if copy_size.width == 0 || copy_size.height == 0 || copy_size.depth_or_array_layers == 0 { log::trace!("Ignoring copy_buffer_to_texture of size 0"); return Ok(()); } - let dst_texture = hub - .textures - .get(destination.texture) - .map_err(|_| TransferError::InvalidTextureId(destination.texture))?; + let dst_texture = hub.textures.get(destination.texture).get()?; dst_texture.same_device_as(cmd_buf.as_ref())?; @@ -768,9 +732,7 @@ impl Global { // have an easier time inserting "immediate-inits" that may be required // by prior discards in rare cases. handle_dst_texture_init( - encoder, - tracker, - texture_memory_actions, + &mut cmd_buf_data, device, destination, copy_size, @@ -778,14 +740,12 @@ impl Global { &snatch_guard, )?; - let src_buffer = hub - .buffers - .get(source.buffer) - .map_err(|_| TransferError::InvalidBufferId(source.buffer))?; + let src_buffer = hub.buffers.get(source.buffer).get()?; src_buffer.same_device_as(cmd_buf.as_ref())?; - let src_pending = tracker + let src_pending = cmd_buf_data + .trackers .buffers .set_single(&src_buffer, hal::BufferUses::COPY_SRC); @@ -795,10 +755,11 @@ impl Global { .map_err(TransferError::MissingBufferUsage)?; let src_barrier = src_pending.map(|pending| pending.into_hal(&src_buffer, &snatch_guard)); - let dst_pending = - tracker - .textures - .set_single(&dst_texture, dst_range, hal::TextureUses::COPY_DST); + let dst_pending = cmd_buf_data.trackers.textures.set_single( + &dst_texture, + dst_range, + hal::TextureUses::COPY_DST, + ); let dst_raw = dst_texture.try_raw(&snatch_guard)?; dst_texture .check_usage(TextureUsages::COPY_DST) @@ -835,11 +796,13 @@ impl Global { .map_err(TransferError::from)?; } - buffer_memory_init_actions.extend(src_buffer.initialization_status.read().create_action( - &src_buffer, - source.layout.offset..(source.layout.offset + required_buffer_bytes_in_copy), - MemoryInitKind::NeedsInitializedMemory, - )); + cmd_buf_data.buffer_memory_init_actions.extend( + src_buffer.initialization_status.read().create_action( + &src_buffer, + source.layout.offset..(source.layout.offset + required_buffer_bytes_in_copy), + MemoryInitKind::NeedsInitializedMemory, + ), + ); let regions = (0..array_layer_count) .map(|rel_array_layer| { @@ -855,7 +818,7 @@ impl Global { }) .collect::>(); - let cmd_buf_raw = encoder.open()?; + let cmd_buf_raw = cmd_buf_data.encoder.open(&cmd_buf.device)?; unsafe { cmd_buf_raw.transition_textures(&dst_barrier); cmd_buf_raw.transition_buffers(src_barrier.as_slice()); @@ -880,21 +843,15 @@ impl Global { let hub = &self.hub; - let cmd_buf = match hub + let cmd_buf = hub .command_buffers - .get(command_encoder_id.into_command_buffer_id()) - { - Ok(cmd_buf) => cmd_buf, - Err(_) => return Err(CommandEncoderError::Invalid.into()), - }; - cmd_buf.check_recording()?; + .get(command_encoder_id.into_command_buffer_id()); + let mut cmd_buf_data = cmd_buf.try_get()?; + cmd_buf_data.check_recording()?; let device = &cmd_buf.device; device.check_is_valid()?; - let mut cmd_buf_data = cmd_buf.data.lock(); - let cmd_buf_data = cmd_buf_data.as_mut().unwrap(); - #[cfg(feature = "trace")] if let Some(ref mut list) = cmd_buf_data.commands { list.push(TraceCommand::CopyTextureToBuffer { @@ -903,20 +860,13 @@ impl Global { size: *copy_size, }); } - let encoder = &mut cmd_buf_data.encoder; - let tracker = &mut cmd_buf_data.trackers; - let buffer_memory_init_actions = &mut cmd_buf_data.buffer_memory_init_actions; - let texture_memory_actions = &mut cmd_buf_data.texture_memory_actions; if copy_size.width == 0 || copy_size.height == 0 || copy_size.depth_or_array_layers == 0 { log::trace!("Ignoring copy_texture_to_buffer of size 0"); return Ok(()); } - let src_texture = hub - .textures - .get(source.texture) - .map_err(|_| TransferError::InvalidTextureId(source.texture))?; + let src_texture = hub.textures.get(source.texture).get()?; src_texture.same_device_as(cmd_buf.as_ref())?; @@ -931,9 +881,7 @@ impl Global { // have an easier time inserting "immediate-inits" that may be required // by prior discards in rare cases. handle_src_texture_init( - encoder, - tracker, - texture_memory_actions, + &mut cmd_buf_data, device, source, copy_size, @@ -941,10 +889,11 @@ impl Global { &snatch_guard, )?; - let src_pending = - tracker - .textures - .set_single(&src_texture, src_range, hal::TextureUses::COPY_SRC); + let src_pending = cmd_buf_data.trackers.textures.set_single( + &src_texture, + src_range, + hal::TextureUses::COPY_SRC, + ); let src_raw = src_texture.try_raw(&snatch_guard)?; src_texture .check_usage(TextureUsages::COPY_SRC) @@ -966,14 +915,12 @@ impl Global { .map(|pending| pending.into_hal(src_raw)) .collect::>(); - let dst_buffer = hub - .buffers - .get(destination.buffer) - .map_err(|_| TransferError::InvalidBufferId(destination.buffer))?; + let dst_buffer = hub.buffers.get(destination.buffer).get()?; dst_buffer.same_device_as(cmd_buf.as_ref())?; - let dst_pending = tracker + let dst_pending = cmd_buf_data + .trackers .buffers .set_single(&dst_buffer, hal::BufferUses::COPY_DST); @@ -1011,11 +958,14 @@ impl Global { .map_err(TransferError::from)?; } - buffer_memory_init_actions.extend(dst_buffer.initialization_status.read().create_action( - &dst_buffer, - destination.layout.offset..(destination.layout.offset + required_buffer_bytes_in_copy), - MemoryInitKind::ImplicitlyInitialized, - )); + cmd_buf_data.buffer_memory_init_actions.extend( + dst_buffer.initialization_status.read().create_action( + &dst_buffer, + destination.layout.offset + ..(destination.layout.offset + required_buffer_bytes_in_copy), + MemoryInitKind::ImplicitlyInitialized, + ), + ); let regions = (0..array_layer_count) .map(|rel_array_layer| { @@ -1030,7 +980,7 @@ impl Global { } }) .collect::>(); - let cmd_buf_raw = encoder.open()?; + let cmd_buf_raw = cmd_buf_data.encoder.open(&cmd_buf.device)?; unsafe { cmd_buf_raw.transition_buffers(dst_barrier.as_slice()); cmd_buf_raw.transition_textures(&src_barrier); @@ -1060,23 +1010,17 @@ impl Global { let hub = &self.hub; - let cmd_buf = match hub + let cmd_buf = hub .command_buffers - .get(command_encoder_id.into_command_buffer_id()) - { - Ok(cmd_buf) => cmd_buf, - Err(_) => return Err(CommandEncoderError::Invalid.into()), - }; - cmd_buf.check_recording()?; + .get(command_encoder_id.into_command_buffer_id()); + let mut cmd_buf_data = cmd_buf.try_get()?; + cmd_buf_data.check_recording()?; let device = &cmd_buf.device; device.check_is_valid()?; let snatch_guard = device.snatchable_lock.read(); - let mut cmd_buf_data = cmd_buf.data.lock(); - let cmd_buf_data = cmd_buf_data.as_mut().unwrap(); - #[cfg(feature = "trace")] if let Some(ref mut list) = cmd_buf_data.commands { list.push(TraceCommand::CopyTextureToTexture { @@ -1085,23 +1029,14 @@ impl Global { size: *copy_size, }); } - let encoder = &mut cmd_buf_data.encoder; - let tracker = &mut cmd_buf_data.trackers; - let texture_memory_actions = &mut cmd_buf_data.texture_memory_actions; if copy_size.width == 0 || copy_size.height == 0 || copy_size.depth_or_array_layers == 0 { log::trace!("Ignoring copy_texture_to_texture of size 0"); return Ok(()); } - let src_texture = hub - .textures - .get(source.texture) - .map_err(|_| TransferError::InvalidTextureId(source.texture))?; - let dst_texture = hub - .textures - .get(destination.texture) - .map_err(|_| TransferError::InvalidTextureId(source.texture))?; + let src_texture = hub.textures.get(source.texture).get()?; + let dst_texture = hub.textures.get(destination.texture).get()?; src_texture.same_device_as(cmd_buf.as_ref())?; dst_texture.same_device_as(cmd_buf.as_ref())?; @@ -1143,9 +1078,7 @@ impl Global { // have an easier time inserting "immediate-inits" that may be required // by prior discards in rare cases. handle_src_texture_init( - encoder, - tracker, - texture_memory_actions, + &mut cmd_buf_data, device, source, copy_size, @@ -1153,9 +1086,7 @@ impl Global { &snatch_guard, )?; handle_dst_texture_init( - encoder, - tracker, - texture_memory_actions, + &mut cmd_buf_data, device, destination, copy_size, @@ -1209,7 +1140,7 @@ impl Global { } }) .collect::>(); - let cmd_buf_raw = cmd_buf_data.encoder.open()?; + let cmd_buf_raw = cmd_buf_data.encoder.open(&cmd_buf.device)?; unsafe { cmd_buf_raw.transition_textures(&barriers); cmd_buf_raw.copy_texture_to_texture( diff --git a/third_party/rust/wgpu-core/src/device/global.rs b/third_party/rust/wgpu-core/src/device/global.rs index d9f983d1a8166..4b7143e556ced 100644 --- a/third_party/rust/wgpu-core/src/device/global.rs +++ b/third_party/rust/wgpu-core/src/device/global.rs @@ -6,7 +6,8 @@ use crate::{ self, BindGroupEntry, BindingResource, BufferBinding, ResolvedBindGroupDescriptor, ResolvedBindGroupEntry, ResolvedBindingResource, ResolvedBufferBinding, }, - command, conv, + command::{self, CommandBuffer}, + conv, device::{bgl, life::WaitIdleError, DeviceError, DeviceLostClosure, DeviceLostReason}, global::Global, hal_api::HalApi, @@ -19,14 +20,19 @@ use crate::{ present, resource::{ self, BufferAccessError, BufferAccessResult, BufferMapOperation, CreateBufferError, + Fallible, }, storage::Storage, - Label, + Label, LabelHelpers, }; use wgt::{BufferAddress, TextureFormat}; -use std::{borrow::Cow, ptr::NonNull, sync::atomic::Ordering}; +use std::{ + borrow::Cow, + ptr::NonNull, + sync::{atomic::Ordering, Arc}, +}; use super::{ImplicitPipelineIds, UserClosures}; @@ -35,18 +41,10 @@ impl Global { &self, adapter_id: AdapterId, surface_id: SurfaceId, - ) -> Result { - let hub = &self.hub; - - let surface_guard = self.surfaces.read(); - let adapter_guard = hub.adapters.read(); - let adapter = adapter_guard - .get(adapter_id) - .map_err(|_| instance::IsSurfaceSupportedError::InvalidAdapter)?; - let surface = surface_guard - .get(surface_id) - .map_err(|_| instance::IsSurfaceSupportedError::InvalidSurface)?; - Ok(adapter.is_surface_supported(surface)) + ) -> bool { + let surface = self.surfaces.get(surface_id); + let adapter = self.hub.adapters.get(adapter_id); + adapter.is_surface_supported(&surface) } pub fn surface_get_capabilities( @@ -71,63 +69,30 @@ impl Global { }) } - fn fetch_adapter_and_surface< - F: FnOnce(&Adapter, &Surface) -> Result, - B, - >( + fn fetch_adapter_and_surface B, B>( &self, surface_id: SurfaceId, adapter_id: AdapterId, get_supported_callback: F, - ) -> Result { - let hub = &self.hub; - - let surface_guard = self.surfaces.read(); - let adapter_guard = hub.adapters.read(); - let adapter = adapter_guard - .get(adapter_id) - .map_err(|_| instance::GetSurfaceSupportError::InvalidAdapter)?; - let surface = surface_guard - .get(surface_id) - .map_err(|_| instance::GetSurfaceSupportError::InvalidSurface)?; - - get_supported_callback(adapter, surface) + ) -> B { + let surface = self.surfaces.get(surface_id); + let adapter = self.hub.adapters.get(adapter_id); + get_supported_callback(&adapter, &surface) } - pub fn device_features(&self, device_id: DeviceId) -> Result { - let hub = &self.hub; - - let device = hub - .devices - .get(device_id) - .map_err(|_| DeviceError::InvalidDeviceId)?; - - Ok(device.features) + pub fn device_features(&self, device_id: DeviceId) -> wgt::Features { + let device = self.hub.devices.get(device_id); + device.features } - pub fn device_limits(&self, device_id: DeviceId) -> Result { - let hub = &self.hub; - - let device = hub - .devices - .get(device_id) - .map_err(|_| DeviceError::InvalidDeviceId)?; - - Ok(device.limits.clone()) + pub fn device_limits(&self, device_id: DeviceId) -> wgt::Limits { + let device = self.hub.devices.get(device_id); + device.limits.clone() } - pub fn device_downlevel_properties( - &self, - device_id: DeviceId, - ) -> Result { - let hub = &self.hub; - - let device = hub - .devices - .get(device_id) - .map_err(|_| DeviceError::InvalidDeviceId)?; - - Ok(device.downlevel.clone()) + pub fn device_downlevel_properties(&self, device_id: DeviceId) -> wgt::DownlevelCapabilities { + let device = self.hub.devices.get(device_id); + device.downlevel.clone() } pub fn device_create_buffer( @@ -142,12 +107,7 @@ impl Global { let fid = hub.buffers.prepare(device_id.backend(), id_in); let error = 'error: { - let device = match hub.devices.get(device_id) { - Ok(device) => device, - Err(_) => { - break 'error DeviceError::InvalidDeviceId.into(); - } - }; + let device = self.hub.devices.get(device_id); #[cfg(feature = "trace")] if let Some(ref mut trace) = *device.trace.lock() { @@ -166,7 +126,7 @@ impl Global { } }; - let id = fid.assign(buffer); + let id = fid.assign(Fallible::Valid(buffer)); api_log!( "Device::create_buffer({:?}{}) -> {id:?}", @@ -181,7 +141,7 @@ impl Global { return (id, None); }; - let id = fid.assign_error(); + let id = fid.assign(Fallible::Invalid(Arc::new(desc.label.to_string()))); (id, Some(error)) } @@ -213,32 +173,37 @@ impl Global { /// [`device_create_buffer`]: Global::device_create_buffer /// [`usage`]: https://www.w3.org/TR/webgpu/#dom-gputexturedescriptor-usage /// [`wgpu_types::BufferUsages`]: wgt::BufferUsages - pub fn create_buffer_error(&self, backend: wgt::Backend, id_in: Option) { - let hub = &self.hub; - let fid = hub.buffers.prepare(backend, id_in); - - fid.assign_error(); + pub fn create_buffer_error( + &self, + backend: wgt::Backend, + id_in: Option, + desc: &resource::BufferDescriptor, + ) { + let fid = self.hub.buffers.prepare(backend, id_in); + fid.assign(Fallible::Invalid(Arc::new(desc.label.to_string()))); } pub fn create_render_bundle_error( &self, backend: wgt::Backend, id_in: Option, + desc: &command::RenderBundleDescriptor, ) { - let hub = &self.hub; - let fid = hub.render_bundles.prepare(backend, id_in); - - fid.assign_error(); + let fid = self.hub.render_bundles.prepare(backend, id_in); + fid.assign(Fallible::Invalid(Arc::new(desc.label.to_string()))); } /// Assign `id_in` an error with the given `label`. /// /// See `create_buffer_error` for more context and explanation. - pub fn create_texture_error(&self, backend: wgt::Backend, id_in: Option) { - let hub = &self.hub; - let fid = hub.textures.prepare(backend, id_in); - - fid.assign_error(); + pub fn create_texture_error( + &self, + backend: wgt::Backend, + id_in: Option, + desc: &resource::TextureDescriptor, + ) { + let fid = self.hub.textures.prepare(backend, id_in); + fid.assign(Fallible::Invalid(Arc::new(desc.label.to_string()))); } #[cfg(feature = "replay")] @@ -250,10 +215,7 @@ impl Global { ) -> BufferAccessResult { let hub = &self.hub; - let buffer = hub - .buffers - .get(buffer_id) - .map_err(|_| BufferAccessError::InvalidBufferId(buffer_id))?; + let buffer = hub.buffers.get(buffer_id).get()?; let device = &buffer.device; @@ -270,21 +232,27 @@ impl Global { let snatch_guard = device.snatchable_lock.read(); let raw_buf = buffer.try_raw(&snatch_guard)?; - unsafe { - let mapping = device + + let mapping = unsafe { + device .raw() .map_buffer(raw_buf, offset..offset + data.len() as u64) - .map_err(DeviceError::from)?; - std::ptr::copy_nonoverlapping(data.as_ptr(), mapping.ptr.as_ptr(), data.len()); - if !mapping.is_coherent { - #[allow(clippy::single_range_in_vec_init)] + } + .map_err(|e| device.handle_hal_error(e))?; + + unsafe { std::ptr::copy_nonoverlapping(data.as_ptr(), mapping.ptr.as_ptr(), data.len()) }; + + if !mapping.is_coherent { + #[allow(clippy::single_range_in_vec_init)] + unsafe { device .raw() - .flush_mapped_ranges(raw_buf, &[offset..offset + data.len() as u64]); - } - device.raw().unmap_buffer(raw_buf); + .flush_mapped_ranges(raw_buf, &[offset..offset + data.len() as u64]) + }; } + unsafe { device.raw().unmap_buffer(raw_buf) }; + Ok(()) } @@ -294,10 +262,7 @@ impl Global { let hub = &self.hub; - let buffer = hub - .buffers - .get(buffer_id) - .map_err(|_| resource::DestroyError::Invalid)?; + let buffer = hub.buffers.get(buffer_id).get()?; #[cfg(feature = "trace")] if let Some(trace) = buffer.device.trace.lock().as_mut() { @@ -318,9 +283,9 @@ impl Global { let hub = &self.hub; - let buffer = match hub.buffers.unregister(buffer_id) { - Some(buffer) => buffer, - None => { + let buffer = match hub.buffers.remove(buffer_id).get() { + Ok(buffer) => buffer, + Err(_) => { return; } }; @@ -349,10 +314,7 @@ impl Global { let fid = hub.textures.prepare(device_id.backend(), id_in); let error = 'error: { - let device = match hub.devices.get(device_id) { - Ok(device) => device, - Err(_) => break 'error DeviceError::InvalidDeviceId.into(), - }; + let device = self.hub.devices.get(device_id); #[cfg(feature = "trace")] if let Some(ref mut trace) = *device.trace.lock() { @@ -364,7 +326,7 @@ impl Global { Err(error) => break 'error error, }; - let id = fid.assign(texture); + let id = fid.assign(Fallible::Valid(texture)); api_log!("Device::create_texture({desc:?}) -> {id:?}"); return (id, None); @@ -372,7 +334,7 @@ impl Global { log::error!("Device::create_texture error: {error}"); - let id = fid.assign_error(); + let id = fid.assign(Fallible::Invalid(Arc::new(desc.label.to_string()))); (id, Some(error)) } @@ -395,10 +357,7 @@ impl Global { let fid = hub.textures.prepare(device_id.backend(), id_in); let error = 'error: { - let device = match hub.devices.get(device_id) { - Ok(device) => device, - Err(_) => break 'error DeviceError::InvalidDeviceId.into(), - }; + let device = self.hub.devices.get(device_id); // NB: Any change done through the raw texture handle will not be // recorded in the replay @@ -412,7 +371,7 @@ impl Global { Err(error) => break 'error error, }; - let id = fid.assign(texture); + let id = fid.assign(Fallible::Valid(texture)); api_log!("Device::create_texture({desc:?}) -> {id:?}"); return (id, None); @@ -420,7 +379,7 @@ impl Global { log::error!("Device::create_texture error: {error}"); - let id = fid.assign_error(); + let id = fid.assign(Fallible::Invalid(Arc::new(desc.label.to_string()))); (id, Some(error)) } @@ -441,31 +400,21 @@ impl Global { let hub = &self.hub; let fid = hub.buffers.prepare(A::VARIANT, id_in); - let error = 'error: { - let device = match hub.devices.get(device_id) { - Ok(device) => device, - Err(_) => break 'error DeviceError::InvalidDeviceId.into(), - }; - - // NB: Any change done through the raw buffer handle will not be - // recorded in the replay - #[cfg(feature = "trace")] - if let Some(trace) = device.trace.lock().as_mut() { - trace.add(trace::Action::CreateBuffer(fid.id(), desc.clone())); - } - - let buffer = device.create_buffer_from_hal(Box::new(hal_buffer), desc); + let device = self.hub.devices.get(device_id); - let id = fid.assign(buffer); - api_log!("Device::create_buffer -> {id:?}"); + // NB: Any change done through the raw buffer handle will not be + // recorded in the replay + #[cfg(feature = "trace")] + if let Some(trace) = device.trace.lock().as_mut() { + trace.add(trace::Action::CreateBuffer(fid.id(), desc.clone())); + } - return (id, None); - }; + let buffer = device.create_buffer_from_hal(Box::new(hal_buffer), desc); - log::error!("Device::create_buffer error: {error}"); + let id = fid.assign(buffer); + api_log!("Device::create_buffer -> {id:?}"); - let id = fid.assign_error(); - (id, Some(error)) + (id, None) } pub fn texture_destroy(&self, texture_id: id::TextureId) -> Result<(), resource::DestroyError> { @@ -474,10 +423,7 @@ impl Global { let hub = &self.hub; - let texture = hub - .textures - .get(texture_id) - .map_err(|_| resource::DestroyError::Invalid)?; + let texture = hub.textures.get(texture_id).get()?; #[cfg(feature = "trace")] if let Some(trace) = texture.device.trace.lock().as_mut() { @@ -493,9 +439,10 @@ impl Global { let hub = &self.hub; - if let Some(_texture) = hub.textures.unregister(texture_id) { - #[cfg(feature = "trace")] - if let Some(t) = _texture.device.trace.lock().as_mut() { + let _texture = hub.textures.remove(texture_id); + #[cfg(feature = "trace")] + if let Ok(texture) = _texture.get() { + if let Some(t) = texture.device.trace.lock().as_mut() { t.add(trace::Action::DestroyTexture(texture_id)); } } @@ -514,11 +461,9 @@ impl Global { let fid = hub.texture_views.prepare(texture_id.backend(), id_in); let error = 'error: { - let texture = match hub.textures.get(texture_id) { + let texture = match hub.textures.get(texture_id).get() { Ok(texture) => texture, - Err(_) => { - break 'error resource::CreateTextureViewError::InvalidTextureId(texture_id) - } + Err(e) => break 'error e.into(), }; let device = &texture.device; @@ -536,7 +481,7 @@ impl Global { Err(e) => break 'error e, }; - let id = fid.assign(view); + let id = fid.assign(Fallible::Valid(view)); api_log!("Texture::create_view({texture_id:?}) -> {id:?}"); @@ -544,7 +489,7 @@ impl Global { }; log::error!("Texture::create_view({texture_id:?}) error: {error}"); - let id = fid.assign_error(); + let id = fid.assign(Fallible::Invalid(Arc::new(desc.label.to_string()))); (id, Some(error)) } @@ -557,9 +502,11 @@ impl Global { let hub = &self.hub; - if let Some(_view) = hub.texture_views.unregister(texture_view_id) { - #[cfg(feature = "trace")] - if let Some(t) = _view.device.trace.lock().as_mut() { + let _view = hub.texture_views.remove(texture_view_id); + + #[cfg(feature = "trace")] + if let Ok(view) = _view.get() { + if let Some(t) = view.device.trace.lock().as_mut() { t.add(trace::Action::DestroyTextureView(texture_view_id)); } } @@ -578,10 +525,7 @@ impl Global { let fid = hub.samplers.prepare(device_id.backend(), id_in); let error = 'error: { - let device = match hub.devices.get(device_id) { - Ok(device) => device, - Err(_) => break 'error DeviceError::InvalidDeviceId.into(), - }; + let device = self.hub.devices.get(device_id); #[cfg(feature = "trace")] if let Some(ref mut trace) = *device.trace.lock() { @@ -593,13 +537,13 @@ impl Global { Err(e) => break 'error e, }; - let id = fid.assign(sampler); + let id = fid.assign(Fallible::Valid(sampler)); api_log!("Device::create_sampler -> {id:?}"); return (id, None); }; - let id = fid.assign_error(); + let id = fid.assign(Fallible::Invalid(Arc::new(desc.label.to_string()))); (id, Some(error)) } @@ -609,9 +553,11 @@ impl Global { let hub = &self.hub; - if let Some(_sampler) = hub.samplers.unregister(sampler_id) { - #[cfg(feature = "trace")] - if let Some(t) = _sampler.device.trace.lock().as_mut() { + let _sampler = hub.samplers.remove(sampler_id); + + #[cfg(feature = "trace")] + if let Ok(sampler) = _sampler.get() { + if let Some(t) = sampler.device.trace.lock().as_mut() { t.add(trace::Action::DestroySampler(sampler_id)); } } @@ -632,10 +578,7 @@ impl Global { let fid = hub.bind_group_layouts.prepare(device_id.backend(), id_in); let error = 'error: { - let device = match hub.devices.get(device_id) { - Ok(device) => device, - Err(_) => break 'error DeviceError::InvalidDeviceId.into(), - }; + let device = self.hub.devices.get(device_id); #[cfg(feature = "trace")] if let Some(ref mut trace) = *device.trace.lock() { @@ -666,14 +609,13 @@ impl Global { Err(e) => break 'error e, }; - let id = fid.assign(layout.clone()); + let id = fid.assign(Fallible::Valid(layout.clone())); api_log!("Device::create_bind_group_layout -> {id:?}"); return (id, None); }; - let fid = hub.bind_group_layouts.prepare(device_id.backend(), id_in); - let id = fid.assign_error(); + let id = fid.assign(Fallible::Invalid(Arc::new(desc.label.to_string()))); (id, Some(error)) } @@ -683,9 +625,11 @@ impl Global { let hub = &self.hub; - if let Some(_layout) = hub.bind_group_layouts.unregister(bind_group_layout_id) { - #[cfg(feature = "trace")] - if let Some(t) = _layout.device.trace.lock().as_mut() { + let _layout = hub.bind_group_layouts.remove(bind_group_layout_id); + + #[cfg(feature = "trace")] + if let Ok(layout) = _layout.get() { + if let Some(t) = layout.device.trace.lock().as_mut() { t.add(trace::Action::DestroyBindGroupLayout(bind_group_layout_id)); } } @@ -706,10 +650,7 @@ impl Global { let fid = hub.pipeline_layouts.prepare(device_id.backend(), id_in); let error = 'error: { - let device = match hub.devices.get(device_id) { - Ok(device) => device, - Err(_) => break 'error DeviceError::InvalidDeviceId.into(), - }; + let device = self.hub.devices.get(device_id); #[cfg(feature = "trace")] if let Some(ref mut trace) = *device.trace.lock() { @@ -720,19 +661,13 @@ impl Global { let bind_group_layouts_guard = hub.bind_group_layouts.read(); desc.bind_group_layouts .iter() - .map(|bgl_id| { - bind_group_layouts_guard.get_owned(*bgl_id).map_err(|_| { - binding_model::CreatePipelineLayoutError::InvalidBindGroupLayoutId( - *bgl_id, - ) - }) - }) + .map(|bgl_id| bind_group_layouts_guard.get(*bgl_id).get()) .collect::, _>>() }; let bind_group_layouts = match bind_group_layouts { Ok(bind_group_layouts) => bind_group_layouts, - Err(e) => break 'error e, + Err(e) => break 'error e.into(), }; let desc = binding_model::ResolvedPipelineLayoutDescriptor { @@ -746,12 +681,12 @@ impl Global { Err(e) => break 'error e, }; - let id = fid.assign(layout); + let id = fid.assign(Fallible::Valid(layout)); api_log!("Device::create_pipeline_layout -> {id:?}"); return (id, None); }; - let id = fid.assign_error(); + let id = fid.assign(Fallible::Invalid(Arc::new(desc.label.to_string()))); (id, Some(error)) } @@ -760,9 +695,12 @@ impl Global { api_log!("PipelineLayout::drop {pipeline_layout_id:?}"); let hub = &self.hub; - if let Some(_layout) = hub.pipeline_layouts.unregister(pipeline_layout_id) { - #[cfg(feature = "trace")] - if let Some(t) = _layout.device.trace.lock().as_mut() { + + let _layout = hub.pipeline_layouts.remove(pipeline_layout_id); + + #[cfg(feature = "trace")] + if let Ok(layout) = _layout.get() { + if let Some(t) = layout.device.trace.lock().as_mut() { t.add(trace::Action::DestroyPipelineLayout(pipeline_layout_id)); } } @@ -780,76 +718,77 @@ impl Global { let fid = hub.bind_groups.prepare(device_id.backend(), id_in); let error = 'error: { - let device = match hub.devices.get(device_id) { - Ok(device) => device, - Err(_) => break 'error DeviceError::InvalidDeviceId.into(), - }; + let device = self.hub.devices.get(device_id); #[cfg(feature = "trace")] if let Some(ref mut trace) = *device.trace.lock() { trace.add(trace::Action::CreateBindGroup(fid.id(), desc.clone())); } - let layout = match hub.bind_group_layouts.get(desc.layout) { + let layout = match hub.bind_group_layouts.get(desc.layout).get() { Ok(layout) => layout, - Err(..) => break 'error binding_model::CreateBindGroupError::InvalidLayout, + Err(e) => break 'error e.into(), }; - fn map_entry<'a>( + fn resolve_entry<'a>( e: &BindGroupEntry<'a>, - buffer_storage: &Storage, - sampler_storage: &Storage, - texture_view_storage: &Storage, + buffer_storage: &Storage>, + sampler_storage: &Storage>, + texture_view_storage: &Storage>, ) -> Result, binding_model::CreateBindGroupError> { - let map_buffer = |bb: &BufferBinding| { + let resolve_buffer = |bb: &BufferBinding| { buffer_storage - .get_owned(bb.buffer_id) + .get(bb.buffer_id) + .get() .map(|buffer| ResolvedBufferBinding { buffer, offset: bb.offset, size: bb.size, }) - .map_err(|_| { - binding_model::CreateBindGroupError::InvalidBufferId(bb.buffer_id) - }) + .map_err(binding_model::CreateBindGroupError::from) }; - let map_sampler = |id: &id::SamplerId| { + let resolve_sampler = |id: &id::SamplerId| { sampler_storage - .get_owned(*id) - .map_err(|_| binding_model::CreateBindGroupError::InvalidSamplerId(*id)) + .get(*id) + .get() + .map_err(binding_model::CreateBindGroupError::from) }; - let map_view = |id: &id::TextureViewId| { + let resolve_view = |id: &id::TextureViewId| { texture_view_storage - .get_owned(*id) - .map_err(|_| binding_model::CreateBindGroupError::InvalidTextureViewId(*id)) + .get(*id) + .get() + .map_err(binding_model::CreateBindGroupError::from) }; let resource = match e.resource { BindingResource::Buffer(ref buffer) => { - ResolvedBindingResource::Buffer(map_buffer(buffer)?) + ResolvedBindingResource::Buffer(resolve_buffer(buffer)?) } BindingResource::BufferArray(ref buffers) => { let buffers = buffers .iter() - .map(map_buffer) + .map(resolve_buffer) .collect::, _>>()?; ResolvedBindingResource::BufferArray(Cow::Owned(buffers)) } BindingResource::Sampler(ref sampler) => { - ResolvedBindingResource::Sampler(map_sampler(sampler)?) + ResolvedBindingResource::Sampler(resolve_sampler(sampler)?) } BindingResource::SamplerArray(ref samplers) => { let samplers = samplers .iter() - .map(map_sampler) + .map(resolve_sampler) .collect::, _>>()?; ResolvedBindingResource::SamplerArray(Cow::Owned(samplers)) } BindingResource::TextureView(ref view) => { - ResolvedBindingResource::TextureView(map_view(view)?) + ResolvedBindingResource::TextureView(resolve_view(view)?) } BindingResource::TextureViewArray(ref views) => { - let views = views.iter().map(map_view).collect::, _>>()?; + let views = views + .iter() + .map(resolve_view) + .collect::, _>>()?; ResolvedBindingResource::TextureViewArray(Cow::Owned(views)) } }; @@ -865,7 +804,7 @@ impl Global { let sampler_guard = hub.samplers.read(); desc.entries .iter() - .map(|e| map_entry(e, &buffer_guard, &sampler_guard, &texture_view_guard)) + .map(|e| resolve_entry(e, &buffer_guard, &sampler_guard, &texture_view_guard)) .collect::, _>>() }; let entries = match entries { @@ -884,14 +823,14 @@ impl Global { Err(e) => break 'error e, }; - let id = fid.assign(bind_group); + let id = fid.assign(Fallible::Valid(bind_group)); api_log!("Device::create_bind_group -> {id:?}"); return (id, None); }; - let id = fid.assign_error(); + let id = fid.assign(Fallible::Invalid(Arc::new(desc.label.to_string()))); (id, Some(error)) } @@ -901,8 +840,10 @@ impl Global { let hub = &self.hub; - if let Some(_bind_group) = hub.bind_groups.unregister(bind_group_id) { - #[cfg(feature = "trace")] + let _bind_group = hub.bind_groups.remove(bind_group_id); + + #[cfg(feature = "trace")] + if let Ok(_bind_group) = _bind_group.get() { if let Some(t) = _bind_group.device.trace.lock().as_mut() { t.add(trace::Action::DestroyBindGroup(bind_group_id)); } @@ -939,10 +880,7 @@ impl Global { let fid = hub.shader_modules.prepare(device_id.backend(), id_in); let error = 'error: { - let device = match hub.devices.get(device_id) { - Ok(device) => device, - Err(_) => break 'error DeviceError::InvalidDeviceId.into(), - }; + let device = self.hub.devices.get(device_id); #[cfg(feature = "trace")] if let Some(ref mut trace) = *device.trace.lock() { @@ -981,14 +919,14 @@ impl Global { Err(e) => break 'error e, }; - let id = fid.assign(shader); + let id = fid.assign(Fallible::Valid(shader)); api_log!("Device::create_shader_module -> {id:?}"); return (id, None); }; log::error!("Device::create_shader_module error: {error}"); - let id = fid.assign_error(); + let id = fid.assign(Fallible::Invalid(Arc::new(desc.label.to_string()))); (id, Some(error)) } @@ -1014,10 +952,7 @@ impl Global { let fid = hub.shader_modules.prepare(device_id.backend(), id_in); let error = 'error: { - let device = match hub.devices.get(device_id) { - Ok(device) => device, - Err(_) => break 'error DeviceError::InvalidDeviceId.into(), - }; + let device = self.hub.devices.get(device_id); #[cfg(feature = "trace")] if let Some(ref mut trace) = *device.trace.lock() { @@ -1035,14 +970,14 @@ impl Global { Ok(shader) => shader, Err(e) => break 'error e, }; - let id = fid.assign(shader); + let id = fid.assign(Fallible::Valid(shader)); api_log!("Device::create_shader_module_spirv -> {id:?}"); return (id, None); }; log::error!("Device::create_shader_module_spirv error: {error}"); - let id = fid.assign_error(); + let id = fid.assign(Fallible::Invalid(Arc::new(desc.label.to_string()))); (id, Some(error)) } @@ -1052,12 +987,13 @@ impl Global { let hub = &self.hub; - if let Some(shader_module) = hub.shader_modules.unregister(shader_module_id) { - #[cfg(feature = "trace")] + let _shader_module = hub.shader_modules.remove(shader_module_id); + + #[cfg(feature = "trace")] + if let Ok(shader_module) = _shader_module.get() { if let Some(t) = shader_module.device.trace.lock().as_mut() { t.add(trace::Action::DestroyShaderModule(shader_module_id)); } - drop(shader_module) } } @@ -1075,12 +1011,9 @@ impl Global { id_in.map(|id| id.into_command_buffer_id()), ); - let error = 'error: { - let device = match hub.devices.get(device_id) { - Ok(device) => device, - Err(_) => break 'error DeviceError::InvalidDeviceId, - }; + let device = self.hub.devices.get(device_id); + let error = 'error: { let command_buffer = match device.create_command_encoder(&desc.label) { Ok(command_buffer) => command_buffer, Err(e) => break 'error e, @@ -1091,7 +1024,7 @@ impl Global { return (id.into_command_encoder_id(), None); }; - let id = fid.assign_error(); + let id = fid.assign(Arc::new(CommandBuffer::new_invalid(&device, &desc.label))); (id.into_command_encoder_id(), Some(error)) } @@ -1101,12 +1034,9 @@ impl Global { let hub = &self.hub; - if let Some(cmd_buf) = hub + let _cmd_buf = hub .command_buffers - .unregister(command_encoder_id.into_command_buffer_id()) - { - cmd_buf.data.lock().as_mut().unwrap().encoder.discard(); - } + .remove(command_encoder_id.into_command_buffer_id()); } pub fn command_buffer_drop(&self, command_buffer_id: id::CommandBufferId) { @@ -1147,14 +1077,7 @@ impl Global { .prepare(bundle_encoder.parent().backend(), id_in); let error = 'error: { - let device = match hub.devices.get(bundle_encoder.parent()) { - Ok(device) => device, - Err(_) => { - break 'error command::RenderBundleError::from_device_error( - DeviceError::InvalidDeviceId, - ); - } - }; + let device = self.hub.devices.get(bundle_encoder.parent()); #[cfg(feature = "trace")] if let Some(ref mut trace) = *device.trace.lock() { @@ -1175,13 +1098,13 @@ impl Global { Err(e) => break 'error e, }; - let id = fid.assign(render_bundle); + let id = fid.assign(Fallible::Valid(render_bundle)); api_log!("RenderBundleEncoder::finish -> {id:?}"); return (id, None); }; - let id = fid.assign_error(); + let id = fid.assign(Fallible::Invalid(Arc::new(desc.label.to_string()))); (id, Some(error)) } @@ -1191,9 +1114,11 @@ impl Global { let hub = &self.hub; - if let Some(_bundle) = hub.render_bundles.unregister(render_bundle_id) { - #[cfg(feature = "trace")] - if let Some(t) = _bundle.device.trace.lock().as_mut() { + let _bundle = hub.render_bundles.remove(render_bundle_id); + + #[cfg(feature = "trace")] + if let Ok(bundle) = _bundle.get() { + if let Some(t) = bundle.device.trace.lock().as_mut() { t.add(trace::Action::DestroyRenderBundle(render_bundle_id)); } } @@ -1211,10 +1136,7 @@ impl Global { let fid = hub.query_sets.prepare(device_id.backend(), id_in); let error = 'error: { - let device = match hub.devices.get(device_id) { - Ok(device) => device, - Err(_) => break 'error DeviceError::InvalidDeviceId.into(), - }; + let device = self.hub.devices.get(device_id); #[cfg(feature = "trace")] if let Some(ref mut trace) = *device.trace.lock() { @@ -1229,13 +1151,13 @@ impl Global { Err(err) => break 'error err, }; - let id = fid.assign(query_set); + let id = fid.assign(Fallible::Valid(query_set)); api_log!("Device::create_query_set -> {id:?}"); return (id, None); }; - let id = fid.assign_error(); + let id = fid.assign(Fallible::Invalid(Arc::new(desc.label.to_string()))); (id, Some(error)) } @@ -1245,9 +1167,11 @@ impl Global { let hub = &self.hub; - if let Some(_query_set) = hub.query_sets.unregister(query_set_id) { - #[cfg(feature = "trace")] - if let Some(trace) = _query_set.device.trace.lock().as_mut() { + let _query_set = hub.query_sets.remove(query_set_id); + + #[cfg(feature = "trace")] + if let Ok(query_set) = _query_set.get() { + if let Some(trace) = query_set.device.trace.lock().as_mut() { trace.add(trace::Action::DestroyQuerySet(query_set_id)); } } @@ -1279,10 +1203,7 @@ impl Global { break 'error pipeline::ImplicitLayoutError::MissingImplicitPipelineIds.into(); } - let device = match hub.devices.get(device_id) { - Ok(device) => device, - Err(_) => break 'error DeviceError::InvalidDeviceId.into(), - }; + let device = self.hub.devices.get(device_id); #[cfg(feature = "trace")] if let Some(ref mut trace) = *device.trace.lock() { @@ -1295,37 +1216,30 @@ impl Global { let layout = desc .layout - .map(|layout| { - hub.pipeline_layouts - .get(layout) - .map_err(|_| pipeline::CreateRenderPipelineError::InvalidLayout) - }) + .map(|layout| hub.pipeline_layouts.get(layout).get()) .transpose(); let layout = match layout { Ok(layout) => layout, - Err(e) => break 'error e, + Err(e) => break 'error e.into(), }; let cache = desc .cache - .map(|cache| { - hub.pipeline_caches - .get(cache) - .map_err(|_| pipeline::CreateRenderPipelineError::InvalidCache) - }) + .map(|cache| hub.pipeline_caches.get(cache).get()) .transpose(); let cache = match cache { Ok(cache) => cache, - Err(e) => break 'error e, + Err(e) => break 'error e.into(), }; let vertex = { let module = hub .shader_modules .get(desc.vertex.stage.module) - .map_err(|_| pipeline::CreateRenderPipelineError::Stage { + .get() + .map_err(|e| pipeline::CreateRenderPipelineError::Stage { stage: wgt::ShaderStages::VERTEX, - error: crate::validation::StageError::InvalidModule, + error: e.into(), }); let module = match module { Ok(module) => module, @@ -1347,12 +1261,14 @@ impl Global { }; let fragment = if let Some(ref state) = desc.fragment { - let module = hub.shader_modules.get(state.stage.module).map_err(|_| { - pipeline::CreateRenderPipelineError::Stage { + let module = hub + .shader_modules + .get(state.stage.module) + .get() + .map_err(|e| pipeline::CreateRenderPipelineError::Stage { stage: wgt::ShaderStages::FRAGMENT, - error: crate::validation::StageError::InvalidModule, - } - }); + error: e.into(), + }); let module = match module { Ok(module) => module, Err(e) => break 'error e, @@ -1406,7 +1322,7 @@ impl Global { let mut pipeline_layout_guard = hub.pipeline_layouts.write(); let mut bgl_guard = hub.bind_group_layouts.write(); - pipeline_layout_guard.insert(ids.root_id, pipeline.layout.clone()); + pipeline_layout_guard.insert(ids.root_id, Fallible::Valid(pipeline.layout.clone())); let mut group_ids = ids.group_ids.iter(); // NOTE: If the first iterator is longer than the second, the `.zip()` impl will still advance the // the first iterator before realizing that the second iterator has finished. @@ -1418,29 +1334,29 @@ impl Global { .iter() .zip(&mut group_ids) { - bgl_guard.insert(*bgl_id, bgl.clone()); + bgl_guard.insert(*bgl_id, Fallible::Valid(bgl.clone())); } for bgl_id in group_ids { - bgl_guard.insert_error(*bgl_id); + bgl_guard.insert(*bgl_id, Fallible::Invalid(Arc::new(String::new()))); } } - let id = fid.assign(pipeline); + let id = fid.assign(Fallible::Valid(pipeline)); api_log!("Device::create_render_pipeline -> {id:?}"); return (id, None); }; - let id = fid.assign_error(); + let id = fid.assign(Fallible::Invalid(Arc::new(desc.label.to_string()))); // We also need to assign errors to the implicit pipeline layout and the // implicit bind group layouts. if let Some(ids) = implicit_context { let mut pipeline_layout_guard = hub.pipeline_layouts.write(); let mut bgl_guard = hub.bind_group_layouts.write(); - pipeline_layout_guard.insert_error(ids.root_id); + pipeline_layout_guard.insert(ids.root_id, Fallible::Invalid(Arc::new(String::new()))); for bgl_id in ids.group_ids { - bgl_guard.insert_error(bgl_id); + bgl_guard.insert(bgl_id, Fallible::Invalid(Arc::new(String::new()))); } } @@ -1462,16 +1378,15 @@ impl Global { ) { let hub = &self.hub; + let fid = hub.bind_group_layouts.prepare(pipeline_id.backend(), id_in); + let error = 'error: { - let pipeline = match hub.render_pipelines.get(pipeline_id) { + let pipeline = match hub.render_pipelines.get(pipeline_id).get() { Ok(pipeline) => pipeline, - Err(_) => break 'error binding_model::GetBindGroupLayoutError::InvalidPipeline, + Err(e) => break 'error e.into(), }; let id = match pipeline.layout.bind_group_layouts.get(index as usize) { - Some(bg) => hub - .bind_group_layouts - .prepare(pipeline_id.backend(), id_in) - .assign(bg.clone()), + Some(bg) => fid.assign(Fallible::Valid(bg.clone())), None => { break 'error binding_model::GetBindGroupLayoutError::InvalidGroupIndex(index) } @@ -1479,10 +1394,7 @@ impl Global { return (id, None); }; - let id = hub - .bind_group_layouts - .prepare(pipeline_id.backend(), id_in) - .assign_error(); + let id = fid.assign(Fallible::Invalid(Arc::new(String::new()))); (id, Some(error)) } @@ -1492,9 +1404,11 @@ impl Global { let hub = &self.hub; - if let Some(_pipeline) = hub.render_pipelines.unregister(render_pipeline_id) { - #[cfg(feature = "trace")] - if let Some(t) = _pipeline.device.trace.lock().as_mut() { + let _pipeline = hub.render_pipelines.remove(render_pipeline_id); + + #[cfg(feature = "trace")] + if let Ok(pipeline) = _pipeline.get() { + if let Some(t) = pipeline.device.trace.lock().as_mut() { t.add(trace::Action::DestroyRenderPipeline(render_pipeline_id)); } } @@ -1526,10 +1440,7 @@ impl Global { break 'error pipeline::ImplicitLayoutError::MissingImplicitPipelineIds.into(); } - let device = match hub.devices.get(device_id) { - Ok(device) => device, - Err(_) => break 'error DeviceError::InvalidDeviceId.into(), - }; + let device = self.hub.devices.get(device_id); #[cfg(feature = "trace")] if let Some(ref mut trace) = *device.trace.lock() { @@ -1542,34 +1453,23 @@ impl Global { let layout = desc .layout - .map(|layout| { - hub.pipeline_layouts - .get(layout) - .map_err(|_| pipeline::CreateComputePipelineError::InvalidLayout) - }) + .map(|layout| hub.pipeline_layouts.get(layout).get()) .transpose(); let layout = match layout { Ok(layout) => layout, - Err(e) => break 'error e, + Err(e) => break 'error e.into(), }; let cache = desc .cache - .map(|cache| { - hub.pipeline_caches - .get(cache) - .map_err(|_| pipeline::CreateComputePipelineError::InvalidCache) - }) + .map(|cache| hub.pipeline_caches.get(cache).get()) .transpose(); let cache = match cache { Ok(cache) => cache, - Err(e) => break 'error e, + Err(e) => break 'error e.into(), }; - let module = hub - .shader_modules - .get(desc.stage.module) - .map_err(|_| crate::validation::StageError::InvalidModule); + let module = hub.shader_modules.get(desc.stage.module).get(); let module = match module { Ok(module) => module, Err(e) => break 'error e.into(), @@ -1608,7 +1508,7 @@ impl Global { let mut pipeline_layout_guard = hub.pipeline_layouts.write(); let mut bgl_guard = hub.bind_group_layouts.write(); - pipeline_layout_guard.insert(ids.root_id, pipeline.layout.clone()); + pipeline_layout_guard.insert(ids.root_id, Fallible::Valid(pipeline.layout.clone())); let mut group_ids = ids.group_ids.iter(); // NOTE: If the first iterator is longer than the second, the `.zip()` impl will still advance the // the first iterator before realizing that the second iterator has finished. @@ -1620,29 +1520,29 @@ impl Global { .iter() .zip(&mut group_ids) { - bgl_guard.insert(*bgl_id, bgl.clone()); + bgl_guard.insert(*bgl_id, Fallible::Valid(bgl.clone())); } for bgl_id in group_ids { - bgl_guard.insert_error(*bgl_id); + bgl_guard.insert(*bgl_id, Fallible::Invalid(Arc::new(String::new()))); } } - let id = fid.assign(pipeline); + let id = fid.assign(Fallible::Valid(pipeline)); api_log!("Device::create_compute_pipeline -> {id:?}"); return (id, None); }; - let id = fid.assign_error(); + let id = fid.assign(Fallible::Invalid(Arc::new(desc.label.to_string()))); // We also need to assign errors to the implicit pipeline layout and the // implicit bind group layouts. if let Some(ids) = implicit_context { let mut pipeline_layout_guard = hub.pipeline_layouts.write(); let mut bgl_guard = hub.bind_group_layouts.write(); - pipeline_layout_guard.insert_error(ids.root_id); + pipeline_layout_guard.insert(ids.root_id, Fallible::Invalid(Arc::new(String::new()))); for bgl_id in ids.group_ids { - bgl_guard.insert_error(bgl_id); + bgl_guard.insert(bgl_id, Fallible::Invalid(Arc::new(String::new()))); } } @@ -1662,17 +1562,16 @@ impl Global { ) { let hub = &self.hub; + let fid = hub.bind_group_layouts.prepare(pipeline_id.backend(), id_in); + let error = 'error: { - let pipeline = match hub.compute_pipelines.get(pipeline_id) { + let pipeline = match hub.compute_pipelines.get(pipeline_id).get() { Ok(pipeline) => pipeline, - Err(_) => break 'error binding_model::GetBindGroupLayoutError::InvalidPipeline, + Err(e) => break 'error e.into(), }; let id = match pipeline.layout.bind_group_layouts.get(index as usize) { - Some(bg) => hub - .bind_group_layouts - .prepare(pipeline_id.backend(), id_in) - .assign(bg.clone()), + Some(bg) => fid.assign(Fallible::Valid(bg.clone())), None => { break 'error binding_model::GetBindGroupLayoutError::InvalidGroupIndex(index) } @@ -1681,10 +1580,7 @@ impl Global { return (id, None); }; - let id = hub - .bind_group_layouts - .prepare(pipeline_id.backend(), id_in) - .assign_error(); + let id = fid.assign(Fallible::Invalid(Arc::new(String::new()))); (id, Some(error)) } @@ -1694,9 +1590,11 @@ impl Global { let hub = &self.hub; - if let Some(_pipeline) = hub.compute_pipelines.unregister(compute_pipeline_id) { - #[cfg(feature = "trace")] - if let Some(t) = _pipeline.device.trace.lock().as_mut() { + let _pipeline = hub.compute_pipelines.remove(compute_pipeline_id); + + #[cfg(feature = "trace")] + if let Ok(pipeline) = _pipeline.get() { + if let Some(t) = pipeline.device.trace.lock().as_mut() { t.add(trace::Action::DestroyComputePipeline(compute_pipeline_id)); } } @@ -1720,11 +1618,7 @@ impl Global { let fid = hub.pipeline_caches.prepare(device_id.backend(), id_in); let error: pipeline::CreatePipelineCacheError = 'error: { - let device = match hub.devices.get(device_id) { - Ok(device) => device, - // TODO: Handle error properly - Err(crate::storage::InvalidId) => break 'error DeviceError::InvalidDeviceId.into(), - }; + let device = self.hub.devices.get(device_id); #[cfg(feature = "trace")] if let Some(ref mut trace) = *device.trace.lock() { @@ -1737,7 +1631,7 @@ impl Global { let cache = unsafe { device.create_pipeline_cache(desc) }; match cache { Ok(cache) => { - let id = fid.assign(cache); + let id = fid.assign(Fallible::Valid(cache)); api_log!("Device::create_pipeline_cache -> {id:?}"); return (id, None); } @@ -1745,7 +1639,7 @@ impl Global { } }; - let id = fid.assign_error(); + let id = fid.assign(Fallible::Invalid(Arc::new(desc.label.to_string()))); (id, Some(error)) } @@ -1756,12 +1650,13 @@ impl Global { let hub = &self.hub; - if let Some(cache) = hub.pipeline_caches.unregister(pipeline_cache_id) { - #[cfg(feature = "trace")] + let _cache = hub.pipeline_caches.remove(pipeline_cache_id); + + #[cfg(feature = "trace")] + if let Ok(cache) = _cache.get() { if let Some(t) = cache.device.trace.lock().as_mut() { t.add(trace::Action::DestroyPipelineCache(pipeline_cache_id)); } - drop(cache) } } @@ -1892,13 +1787,7 @@ impl Global { // User callbacks must not be called while we are holding locks. let user_callbacks; { - let hub = &self.hub; - let surface_guard = self.surfaces.read(); - - let device = match hub.devices.get(device_id) { - Ok(device) => device, - Err(_) => break 'error DeviceError::InvalidDeviceId.into(), - }; + let device = self.hub.devices.get(device_id); #[cfg(feature = "trace")] if let Some(ref mut trace) = *device.trace.lock() { @@ -1909,10 +1798,7 @@ impl Global { break 'error e.into(); } - let surface = match surface_guard.get(surface_id) { - Ok(surface) => surface, - Err(_) => break 'error E::InvalidSurface, - }; + let surface = self.surfaces.get(surface_id); let caps = match surface.get_capabilities(&device.adapter) { Ok(caps) => caps, @@ -2003,7 +1889,9 @@ impl Global { hal::SurfaceError::Outdated | hal::SurfaceError::Lost => { E::InvalidSurface } - hal::SurfaceError::Device(error) => E::Device(error.into()), + hal::SurfaceError::Device(error) => { + E::Device(device.handle_hal_error(error)) + } hal::SurfaceError::Other(message) => { log::error!("surface configuration failed: {}", message); E::InvalidSurface @@ -2037,11 +1925,7 @@ impl Global { ) -> Result { api_log!("Device::poll {maintain:?}"); - let hub = &self.hub; - let device = hub - .devices - .get(device_id) - .map_err(|_| DeviceError::InvalidDeviceId)?; + let device = self.hub.devices.get(device_id); let DevicePoll { closures, @@ -2148,38 +2032,26 @@ impl Global { Ok(all_queue_empty) } - pub fn device_start_capture(&self, id: DeviceId) { + pub fn device_start_capture(&self, device_id: DeviceId) { api_log!("Device::start_capture"); - let hub = &self.hub; + let device = self.hub.devices.get(device_id); - if let Ok(device) = hub.devices.get(id) { - if !device.is_valid() { - return; - } - unsafe { device.raw().start_capture() }; + if !device.is_valid() { + return; } + unsafe { device.raw().start_capture() }; } - pub fn device_stop_capture(&self, id: DeviceId) { + pub fn device_stop_capture(&self, device_id: DeviceId) { api_log!("Device::stop_capture"); - let hub = &self.hub; + let device = self.hub.devices.get(device_id); - if let Ok(device) = hub.devices.get(id) { - if !device.is_valid() { - return; - } - unsafe { device.raw().stop_capture() }; + if !device.is_valid() { + return; } - } - - // This is a test-only function to force the device into an - // invalid state by inserting an error value in its place in - // the registry. - pub fn device_make_invalid(&self, device_id: DeviceId) { - let hub = &self.hub; - hub.devices.force_replace_with_error(device_id); + unsafe { device.raw().stop_capture() }; } pub fn pipeline_cache_get_data(&self, id: id::PipelineCacheId) -> Option> { @@ -2187,7 +2059,7 @@ impl Global { api_log!("PipelineCache::get_data"); let hub = &self.hub; - if let Ok(cache) = hub.pipeline_caches.get(id) { + if let Ok(cache) = hub.pipeline_caches.get(id).get() { // TODO: Is this check needed? if !cache.device.is_valid() { return None; @@ -2215,22 +2087,20 @@ impl Global { profiling::scope!("Device::drop"); api_log!("Device::drop {device_id:?}"); - let hub = &self.hub; - if let Some(device) = hub.devices.unregister(device_id) { - let device_lost_closure = device.lock_life().device_lost_closure.take(); - if let Some(closure) = device_lost_closure { - closure.call(DeviceLostReason::Dropped, String::from("Device dropped.")); - } + let device = self.hub.devices.remove(device_id); + let device_lost_closure = device.lock_life().device_lost_closure.take(); + if let Some(closure) = device_lost_closure { + closure.call(DeviceLostReason::Dropped, String::from("Device dropped.")); + } - // The things `Device::prepare_to_die` takes care are mostly - // unnecessary here. We know our queue is empty, so we don't - // need to wait for submissions or triage them. We know we were - // just polled, so `life_tracker.free_resources` is empty. - debug_assert!(device.lock_life().queue_empty()); - device.pending_writes.lock().deactivate(); + // The things `Device::prepare_to_die` takes care are mostly + // unnecessary here. We know our queue is empty, so we don't + // need to wait for submissions or triage them. We know we were + // just polled, so `life_tracker.free_resources` is empty. + debug_assert!(device.lock_life().queue_empty()); + device.pending_writes.lock().deactivate(); - drop(device); - } + drop(device); } // This closure will be called exactly once during "lose the device", @@ -2240,71 +2110,47 @@ impl Global { device_id: DeviceId, device_lost_closure: DeviceLostClosure, ) { - let hub = &self.hub; - - if let Ok(device) = hub.devices.get(device_id) { - let mut life_tracker = device.lock_life(); - if let Some(existing_closure) = life_tracker.device_lost_closure.take() { - // It's important to not hold the lock while calling the closure. - drop(life_tracker); - existing_closure.call(DeviceLostReason::ReplacedCallback, "".to_string()); - life_tracker = device.lock_life(); - } - life_tracker.device_lost_closure = Some(device_lost_closure); - } else { - // No device? Okay. Just like we have to call any existing closure - // before we drop it, we need to call this closure before we exit - // this function, because there's no device that is ever going to - // call it. - device_lost_closure.call(DeviceLostReason::DeviceInvalid, "".to_string()); + let device = self.hub.devices.get(device_id); + + let mut life_tracker = device.lock_life(); + if let Some(existing_closure) = life_tracker.device_lost_closure.take() { + // It's important to not hold the lock while calling the closure. + drop(life_tracker); + existing_closure.call(DeviceLostReason::ReplacedCallback, "".to_string()); + life_tracker = device.lock_life(); } + life_tracker.device_lost_closure = Some(device_lost_closure); } pub fn device_destroy(&self, device_id: DeviceId) { api_log!("Device::destroy {device_id:?}"); - let hub = &self.hub; - - if let Ok(device) = hub.devices.get(device_id) { - // Follow the steps at - // https://gpuweb.github.io/gpuweb/#dom-gpudevice-destroy. - // It's legal to call destroy multiple times, but if the device - // is already invalid, there's nothing more to do. There's also - // no need to return an error. - if !device.is_valid() { - return; - } + let device = self.hub.devices.get(device_id); - // The last part of destroy is to lose the device. The spec says - // delay that until all "currently-enqueued operations on any - // queue on this device are completed." This is accomplished by - // setting valid to false, and then relying upon maintain to - // check for empty queues and a DeviceLostClosure. At that time, - // the DeviceLostClosure will be called with "destroyed" as the - // reason. - device.valid.store(false, Ordering::Relaxed); + // Follow the steps at + // https://gpuweb.github.io/gpuweb/#dom-gpudevice-destroy. + // It's legal to call destroy multiple times, but if the device + // is already invalid, there's nothing more to do. There's also + // no need to return an error. + if !device.is_valid() { + return; } - } - - pub fn device_mark_lost(&self, device_id: DeviceId, message: &str) { - api_log!("Device::mark_lost {device_id:?}"); - - let hub = &self.hub; - if let Ok(device) = hub.devices.get(device_id) { - device.lose(message); - } + // The last part of destroy is to lose the device. The spec says + // delay that until all "currently-enqueued operations on any + // queue on this device are completed." This is accomplished by + // setting valid to false, and then relying upon maintain to + // check for empty queues and a DeviceLostClosure. At that time, + // the DeviceLostClosure will be called with "destroyed" as the + // reason. + device.valid.store(false, Ordering::Release); } pub fn device_get_internal_counters(&self, device_id: DeviceId) -> wgt::InternalCounters { - let hub = &self.hub; - if let Ok(device) = hub.devices.get(device_id) { - wgt::InternalCounters { - hal: device.get_hal_counters(), - core: wgt::CoreCounters {}, - } - } else { - Default::default() + let device = self.hub.devices.get(device_id); + wgt::InternalCounters { + hal: device.get_hal_counters(), + core: wgt::CoreCounters {}, } } @@ -2312,21 +2158,15 @@ impl Global { &self, device_id: DeviceId, ) -> Option { - let hub = &self.hub; - hub.devices - .get(device_id) - .ok() - .and_then(|device| device.generate_allocator_report()) + let device = self.hub.devices.get(device_id); + device.generate_allocator_report() } pub fn queue_drop(&self, queue_id: QueueId) { profiling::scope!("Queue::drop"); api_log!("Queue::drop {queue_id:?}"); - let hub = &self.hub; - if let Some(queue) = hub.queues.unregister(queue_id) { - drop(queue); - } + self.hub.queues.remove(queue_id); } pub fn buffer_map_async( @@ -2342,9 +2182,9 @@ impl Global { let hub = &self.hub; let op_and_err = 'error: { - let buffer = match hub.buffers.get(buffer_id) { + let buffer = match hub.buffers.get(buffer_id).get() { Ok(buffer) => buffer, - Err(_) => break 'error Some((op, BufferAccessError::InvalidBufferId(buffer_id))), + Err(e) => break 'error Some((op, e.into())), }; buffer.map_async(offset, size, op).err() @@ -2375,10 +2215,7 @@ impl Global { let hub = &self.hub; - let buffer = hub - .buffers - .get(buffer_id) - .map_err(|_| BufferAccessError::InvalidBufferId(buffer_id))?; + let buffer = hub.buffers.get(buffer_id).get()?; { let snatch_guard = buffer.device.snatchable_lock.read(); @@ -2451,10 +2288,7 @@ impl Global { let hub = &self.hub; - let buffer = hub - .buffers - .get(buffer_id) - .map_err(|_| BufferAccessError::InvalidBufferId(buffer_id))?; + let buffer = hub.buffers.get(buffer_id).get()?; let snatch_guard = buffer.device.snatchable_lock.read(); buffer.check_destroyed(&snatch_guard)?; diff --git a/third_party/rust/wgpu-core/src/device/mod.rs b/third_party/rust/wgpu-core/src/device/mod.rs index 777dd262abcc2..97ce639fe7425 100644 --- a/third_party/rust/wgpu-core/src/device/mod.rs +++ b/third_party/rust/wgpu-core/src/device/mod.rs @@ -308,7 +308,7 @@ fn map_buffer( let raw_buffer = buffer.try_raw(snatch_guard)?; let mapping = unsafe { raw.map_buffer(raw_buffer, offset..offset + size) - .map_err(DeviceError::from)? + .map_err(|e| buffer.device.handle_hal_error(e))? }; if !mapping.is_coherent && kind == HostMap::Read { @@ -336,19 +336,41 @@ fn map_buffer( let mapped = unsafe { std::slice::from_raw_parts_mut(mapping.ptr.as_ptr(), size as usize) }; - for uninitialized in buffer - .initialization_status - .write() - .drain(offset..(size + offset)) + // We can't call flush_mapped_ranges in this case, so we can't drain the uninitialized ranges either + if !mapping.is_coherent + && kind == HostMap::Read + && !buffer.usage.contains(wgt::BufferUsages::MAP_WRITE) { - // The mapping's pointer is already offset, however we track the - // uninitialized range relative to the buffer's start. - let fill_range = - (uninitialized.start - offset) as usize..(uninitialized.end - offset) as usize; - mapped[fill_range].fill(0); - - if !mapping.is_coherent && kind == HostMap::Read { - unsafe { raw.flush_mapped_ranges(raw_buffer, &[uninitialized]) }; + for uninitialized in buffer + .initialization_status + .write() + .uninitialized(offset..(size + offset)) + { + // The mapping's pointer is already offset, however we track the + // uninitialized range relative to the buffer's start. + let fill_range = + (uninitialized.start - offset) as usize..(uninitialized.end - offset) as usize; + mapped[fill_range].fill(0); + } + } else { + for uninitialized in buffer + .initialization_status + .write() + .drain(offset..(size + offset)) + { + // The mapping's pointer is already offset, however we track the + // uninitialized range relative to the buffer's start. + let fill_range = + (uninitialized.start - offset) as usize..(uninitialized.end - offset) as usize; + mapped[fill_range].fill(0); + + // NOTE: This is only possible when MAPPABLE_PRIMARY_BUFFERS is enabled. + if !mapping.is_coherent + && kind == HostMap::Read + && buffer.usage.contains(wgt::BufferUsages::MAP_WRITE) + { + unsafe { raw.flush_mapped_ranges(raw_buffer, &[uninitialized]) }; + } } } @@ -392,19 +414,20 @@ pub enum DeviceError { OutOfMemory, #[error("Creation of a resource failed for a reason other than running out of memory.")] ResourceCreationFailed, - #[error("DeviceId is invalid")] - InvalidDeviceId, #[error(transparent)] DeviceMismatch(#[from] Box), } -impl From for DeviceError { - fn from(error: hal::DeviceError) -> Self { +impl DeviceError { + /// Only use this function in contexts where there is no `Device`. + /// + /// Use [`Device::handle_hal_error`] otherwise. + pub fn from_hal(error: hal::DeviceError) -> Self { match error { - hal::DeviceError::Lost => DeviceError::Lost, - hal::DeviceError::OutOfMemory => DeviceError::OutOfMemory, - hal::DeviceError::ResourceCreationFailed => DeviceError::ResourceCreationFailed, - hal::DeviceError::Unexpected => DeviceError::Lost, + hal::DeviceError::Lost => Self::Lost, + hal::DeviceError::OutOfMemory => Self::OutOfMemory, + hal::DeviceError::ResourceCreationFailed => Self::ResourceCreationFailed, + hal::DeviceError::Unexpected => Self::Lost, } } } @@ -439,15 +462,11 @@ impl ImplicitPipelineIds<'_> { root_id: hub .pipeline_layouts .prepare(backend, Some(self.root_id)) - .into_id(), + .id(), group_ids: self .group_ids .iter() - .map(|id_in| { - hub.bind_group_layouts - .prepare(backend, Some(*id_in)) - .into_id() - }) + .map(|id_in| hub.bind_group_layouts.prepare(backend, Some(*id_in)).id()) .collect(), } } diff --git a/third_party/rust/wgpu-core/src/device/queue.rs b/third_party/rust/wgpu-core/src/device/queue.rs index e516e0dac7876..1710c05919427 100644 --- a/third_party/rust/wgpu-core/src/device/queue.rs +++ b/third_party/rust/wgpu-core/src/device/queue.rs @@ -4,7 +4,8 @@ use crate::{ api_log, command::{ extract_texture_selector, validate_linear_texture_data, validate_texture_copy_range, - ClearError, CommandAllocator, CommandBuffer, CopySide, ImageCopyTexture, TransferError, + ClearError, CommandAllocator, CommandBuffer, CommandEncoderError, CopySide, + ImageCopyTexture, TransferError, }, conv, device::{DeviceError, WaitIdleError}, @@ -16,8 +17,8 @@ use crate::{ lock::RwLockWriteGuard, resource::{ Buffer, BufferAccessError, BufferMapState, DestroyedBuffer, DestroyedResourceError, - DestroyedTexture, FlushedStagingBuffer, Labeled, ParentDevice, ResourceErrorIdent, - StagingBuffer, Texture, TextureInner, Trackable, + DestroyedTexture, FlushedStagingBuffer, InvalidResourceError, Labeled, ParentDevice, + ResourceErrorIdent, StagingBuffer, Texture, TextureInner, Trackable, }, resource_log, track::{self, Tracker, TrackerIndex}, @@ -269,17 +270,20 @@ impl PendingWrites { fn pre_submit( &mut self, command_allocator: &CommandAllocator, - device: &dyn hal::DynDevice, - queue: &dyn hal::DynQueue, + device: &Device, + queue: &Queue, ) -> Result, DeviceError> { if self.is_recording { let pending_buffers = mem::take(&mut self.dst_buffers); let pending_textures = mem::take(&mut self.dst_textures); - let cmd_buf = unsafe { self.command_encoder.end_encoding()? }; + let cmd_buf = unsafe { self.command_encoder.end_encoding() } + .map_err(|e| device.handle_hal_error(e))?; self.is_recording = false; - let new_encoder = command_allocator.acquire_encoder(device, queue)?; + let new_encoder = command_allocator + .acquire_encoder(device.raw(), queue.raw()) + .map_err(|e| device.handle_hal_error(e))?; let encoder = EncoderInFlight { raw: mem::replace(&mut self.command_encoder, new_encoder), @@ -318,15 +322,9 @@ impl PendingWrites { } } -#[derive(Clone, Debug, Error)] -#[error("Queue is invalid")] -pub struct InvalidQueue; - #[derive(Clone, Debug, Error)] #[non_exhaustive] pub enum QueueWriteError { - #[error("QueueId is invalid")] - InvalidQueueId, #[error(transparent)] Queue(#[from] DeviceError), #[error(transparent)] @@ -335,13 +333,13 @@ pub enum QueueWriteError { MemoryInitFailure(#[from] ClearError), #[error(transparent)] DestroyedResource(#[from] DestroyedResourceError), + #[error(transparent)] + InvalidResource(#[from] InvalidResourceError), } #[derive(Clone, Debug, Error)] #[non_exhaustive] pub enum QueueSubmitError { - #[error("QueueId is invalid")] - InvalidQueueId, #[error(transparent)] Queue(#[from] DeviceError), #[error(transparent)] @@ -356,6 +354,10 @@ pub enum QueueSubmitError { SurfaceUnconfigured, #[error("GPU got stuck :(")] StuckGpu, + #[error(transparent)] + InvalidResource(#[from] InvalidResourceError), + #[error(transparent)] + CommandEncoder(#[from] CommandEncoderError), } //TODO: move out common parts of write_xxx. @@ -373,15 +375,9 @@ impl Global { let hub = &self.hub; - let buffer = hub - .buffers - .get(buffer_id) - .map_err(|_| TransferError::InvalidBufferId(buffer_id))?; + let buffer = hub.buffers.get(buffer_id).get()?; - let queue = hub - .queues - .get(queue_id) - .map_err(|_| QueueWriteError::InvalidQueueId)?; + let queue = hub.queues.get(queue_id); let device = &queue.device; @@ -441,10 +437,7 @@ impl Global { profiling::scope!("Queue::create_staging_buffer"); let hub = &self.hub; - let queue = hub - .queues - .get(queue_id) - .map_err(|_| QueueWriteError::InvalidQueueId)?; + let queue = hub.queues.get(queue_id); let device = &queue.device; @@ -452,7 +445,7 @@ impl Global { let ptr = unsafe { staging_buffer.ptr() }; let fid = hub.staging_buffers.prepare(queue_id.backend(), id_in); - let id = fid.assign(Arc::new(staging_buffer)); + let id = fid.assign(staging_buffer); resource_log!("Queue::create_staging_buffer {id:?}"); Ok((id, ptr)) @@ -468,18 +461,11 @@ impl Global { profiling::scope!("Queue::write_staging_buffer"); let hub = &self.hub; - let queue = hub - .queues - .get(queue_id) - .map_err(|_| QueueWriteError::InvalidQueueId)?; + let queue = hub.queues.get(queue_id); let device = &queue.device; - let staging_buffer = hub - .staging_buffers - .unregister(staging_buffer_id) - .and_then(Arc::into_inner) - .ok_or_else(|| QueueWriteError::Transfer(TransferError::InvalidBufferId(buffer_id)))?; + let staging_buffer = hub.staging_buffers.remove(staging_buffer_id); let mut pending_writes = device.pending_writes.lock(); @@ -512,10 +498,7 @@ impl Global { profiling::scope!("Queue::validate_write_buffer"); let hub = &self.hub; - let buffer = hub - .buffers - .get(buffer_id) - .map_err(|_| TransferError::InvalidBufferId(buffer_id))?; + let buffer = hub.buffers.get(buffer_id).get()?; self.queue_validate_write_buffer_impl(&buffer, buffer_offset, buffer_size)?; @@ -558,10 +541,7 @@ impl Global { ) -> Result<(), QueueWriteError> { let hub = &self.hub; - let dst = hub - .buffers - .get(buffer_id) - .map_err(|_| TransferError::InvalidBufferId(buffer_id))?; + let dst = hub.buffers.get(buffer_id).get()?; let transition = { let mut trackers = device.trackers.lock(); @@ -618,10 +598,7 @@ impl Global { let hub = &self.hub; - let queue = hub - .queues - .get(queue_id) - .map_err(|_| QueueWriteError::InvalidQueueId)?; + let queue = hub.queues.get(queue_id); let device = &queue.device; @@ -641,10 +618,7 @@ impl Global { return Ok(()); } - let dst = hub - .textures - .get(destination.texture) - .map_err(|_| TransferError::InvalidTextureId(destination.texture))?; + let dst = hub.textures.get(destination.texture).get()?; dst.same_device_as(queue.as_ref())?; @@ -735,12 +709,6 @@ impl Global { let snatch_guard = device.snatchable_lock.read(); - // Re-get `dst` immutably here, so that the mutable borrow of the - // `texture_guard.get` above ends in time for the `clear_texture` - // call above. Since we've held `texture_guard` the whole time, we know - // the texture hasn't gone away in the mean time, so we can unwrap. - let dst = hub.textures.get(destination.texture).unwrap(); - let dst_raw = dst.try_raw(&snatch_guard)?; let (block_width, block_height) = dst.desc.format.block_dimensions(); @@ -859,10 +827,7 @@ impl Global { let hub = &self.hub; - let queue = hub - .queues - .get(queue_id) - .map_err(|_| QueueWriteError::InvalidQueueId)?; + let queue = hub.queues.get(queue_id); let device = &queue.device; @@ -890,7 +855,7 @@ impl Global { let src_width = source.source.width(); let src_height = source.source.height(); - let dst = hub.textures.get(destination.texture).unwrap(); + let dst = hub.textures.get(destination.texture).get()?; if !conv::is_valid_external_image_copy_dst_texture_format(dst.desc.format) { return Err( @@ -1069,10 +1034,7 @@ impl Global { let (submit_index, callbacks) = { let hub = &self.hub; - let queue = hub - .queues - .get(queue_id) - .map_err(|_| QueueSubmitError::InvalidQueueId)?; + let queue = hub.queues.get(queue_id); let device = &queue.device; @@ -1093,115 +1055,77 @@ impl Global { let mut submit_surface_textures_owned = FastHashMap::default(); { - let mut command_buffer_guard = hub.command_buffers.write(); + let command_buffer_guard = hub.command_buffers.read(); if !command_buffer_ids.is_empty() { profiling::scope!("prepare"); + let mut first_error = None; + //TODO: if multiple command buffers are submitted, we can re-use the last // native command buffer of the previous chain instead of always creating // a temporary one, since the chains are not finished. // finish all the command buffers first - for &cmb_id in command_buffer_ids { + for command_buffer_id in command_buffer_ids { profiling::scope!("process command buffer"); // we reset the used surface textures every time we use // it, so make sure to set_size on it. used_surface_textures.set_size(device.tracker_indices.textures.size()); + let command_buffer = command_buffer_guard.get(*command_buffer_id); + + // Note that we are required to invalidate all command buffers in both the success and failure paths. + // This is why we `continue` and don't early return via `?`. #[allow(unused_mut)] - let mut cmdbuf = match command_buffer_guard.replace_with_error(cmb_id) { - Ok(cmdbuf) => cmdbuf, - Err(_) => continue, - }; + let mut cmd_buf_data = command_buffer.try_take(); #[cfg(feature = "trace")] if let Some(ref mut trace) = *device.trace.lock() { - trace.add(Action::Submit( - submit_index, - cmdbuf - .data - .lock() - .as_mut() - .unwrap() - .commands - .take() - .unwrap(), - )); - } - - cmdbuf.same_device_as(queue.as_ref())?; - - if !cmdbuf.is_finished() { - let cmdbuf = Arc::into_inner(cmdbuf).expect( - "Command buffer cannot be destroyed because is still in use", - ); - device.destroy_command_buffer(cmdbuf); - continue; + if let Ok(ref mut cmd_buf_data) = cmd_buf_data { + trace.add(Action::Submit( + submit_index, + cmd_buf_data.commands.take().unwrap(), + )); + } } - { - profiling::scope!("check resource state"); - - let cmd_buf_data = cmdbuf.data.lock(); - let cmd_buf_trackers = &cmd_buf_data.as_ref().unwrap().trackers; - - // update submission IDs - { - profiling::scope!("buffers"); - for buffer in cmd_buf_trackers.buffers.used_resources() { - buffer.check_destroyed(&snatch_guard)?; - - match *buffer.map_state.lock() { - BufferMapState::Idle => (), - _ => { - return Err(QueueSubmitError::BufferStillMapped( - buffer.error_ident(), - )) - } - } + let mut baked = match cmd_buf_data { + Ok(cmd_buf_data) => { + let res = validate_command_buffer( + &command_buffer, + &queue, + &cmd_buf_data, + &snatch_guard, + &mut submit_surface_textures_owned, + &mut used_surface_textures, + ); + if let Err(err) = res { + first_error.get_or_insert(err); + cmd_buf_data.destroy(&command_buffer.device); + continue; } + cmd_buf_data.into_baked_commands() } - { - profiling::scope!("textures"); - for texture in cmd_buf_trackers.textures.used_resources() { - let should_extend = match texture.try_inner(&snatch_guard)? { - TextureInner::Native { .. } => false, - TextureInner::Surface { .. } => { - // Compare the Arcs by pointer as Textures don't implement Eq. - submit_surface_textures_owned - .insert(Arc::as_ptr(&texture), texture.clone()); - - true - } - }; - if should_extend { - unsafe { - used_surface_textures - .merge_single( - &texture, - None, - hal::TextureUses::PRESENT, - ) - .unwrap(); - }; - } - } + Err(err) => { + first_error.get_or_insert(err.into()); + continue; } + }; + + if first_error.is_some() { + continue; } - let mut baked = cmdbuf.from_arc_into_baked(); // execute resource transitions unsafe { - baked - .encoder - .begin_encoding(hal_label( - Some("(wgpu internal) Transit"), - device.instance_flags, - )) - .map_err(DeviceError::from)? - }; + baked.encoder.begin_encoding(hal_label( + Some("(wgpu internal) Transit"), + device.instance_flags, + )) + } + .map_err(|e| device.handle_hal_error(e))?; //Note: locking the trackers has to be done after the storages let mut trackers = device.trackers.lock(); @@ -1224,14 +1148,12 @@ impl Global { // but here we have a command encoder by hand, so it's easier to use it. if !used_surface_textures.is_empty() { unsafe { - baked - .encoder - .begin_encoding(hal_label( - Some("(wgpu internal) Present"), - device.instance_flags, - )) - .map_err(DeviceError::from)? - }; + baked.encoder.begin_encoding(hal_label( + Some("(wgpu internal) Present"), + device.instance_flags, + )) + } + .map_err(|e| device.handle_hal_error(e))?; let texture_barriers = trackers .textures .set_from_usage_scope_and_drain_transitions( @@ -1256,6 +1178,10 @@ impl Global { pending_textures: FastHashMap::default(), }); } + + if let Some(first_error) = first_error { + return Err(first_error); + } } } @@ -1299,7 +1225,7 @@ impl Global { } if let Some(pending_execution) = - pending_writes.pre_submit(&device.command_allocator, device.raw(), queue.raw())? + pending_writes.pre_submit(&device.command_allocator, device, &queue)? { active_executions.insert(0, pending_execution); } @@ -1324,15 +1250,13 @@ impl Global { } unsafe { - queue - .raw() - .submit( - &hal_command_buffers, - &submit_surface_textures, - (fence.as_mut(), submit_index), - ) - .map_err(DeviceError::from)?; + queue.raw().submit( + &hal_command_buffers, + &submit_surface_textures, + (fence.as_mut(), submit_index), + ) } + .map_err(|e| device.handle_hal_error(e))?; // Advance the successful submission index. device @@ -1372,27 +1296,71 @@ impl Global { Ok(submit_index) } - pub fn queue_get_timestamp_period(&self, queue_id: QueueId) -> Result { - let hub = &self.hub; - match hub.queues.get(queue_id) { - Ok(queue) => Ok(unsafe { queue.raw().get_timestamp_period() }), - Err(_) => Err(InvalidQueue), - } + pub fn queue_get_timestamp_period(&self, queue_id: QueueId) -> f32 { + let queue = self.hub.queues.get(queue_id); + unsafe { queue.raw().get_timestamp_period() } } pub fn queue_on_submitted_work_done( &self, queue_id: QueueId, closure: SubmittedWorkDoneClosure, - ) -> Result<(), InvalidQueue> { + ) { api_log!("Queue::on_submitted_work_done {queue_id:?}"); //TODO: flush pending writes - let hub = &self.hub; - match hub.queues.get(queue_id) { - Ok(queue) => queue.device.lock_life().add_work_done_closure(closure), - Err(_) => return Err(InvalidQueue), + let queue = self.hub.queues.get(queue_id); + queue.device.lock_life().add_work_done_closure(closure); + } +} + +fn validate_command_buffer( + command_buffer: &CommandBuffer, + queue: &Queue, + cmd_buf_data: &crate::command::CommandBufferMutable, + snatch_guard: &crate::snatch::SnatchGuard<'_>, + submit_surface_textures_owned: &mut FastHashMap<*const Texture, Arc>, + used_surface_textures: &mut track::TextureUsageScope, +) -> Result<(), QueueSubmitError> { + command_buffer.same_device_as(queue)?; + cmd_buf_data.check_finished()?; + + { + profiling::scope!("check resource state"); + + { + profiling::scope!("buffers"); + for buffer in cmd_buf_data.trackers.buffers.used_resources() { + buffer.check_destroyed(snatch_guard)?; + + match *buffer.map_state.lock() { + BufferMapState::Idle => (), + _ => return Err(QueueSubmitError::BufferStillMapped(buffer.error_ident())), + } + } + } + { + profiling::scope!("textures"); + for texture in cmd_buf_data.trackers.textures.used_resources() { + let should_extend = match texture.try_inner(snatch_guard)? { + TextureInner::Native { .. } => false, + TextureInner::Surface { .. } => { + // Compare the Arcs by pointer as Textures don't implement Eq. + submit_surface_textures_owned + .insert(Arc::as_ptr(&texture), texture.clone()); + + true + } + }; + if should_extend { + unsafe { + used_surface_textures + .merge_single(&texture, None, hal::TextureUses::PRESENT) + .unwrap(); + }; + } + } } - Ok(()) } + Ok(()) } diff --git a/third_party/rust/wgpu-core/src/device/resource.rs b/third_party/rust/wgpu-core/src/device/resource.rs index 45a5a58f4ef05..1cd8ef0fe75e9 100644 --- a/third_party/rust/wgpu-core/src/device/resource.rs +++ b/third_party/rust/wgpu-core/src/device/resource.rs @@ -21,7 +21,7 @@ use crate::{ pipeline, pool::ResourcePool, resource::{ - self, Buffer, Labeled, ParentDevice, QuerySet, Sampler, StagingBuffer, Texture, + self, Buffer, Fallible, Labeled, ParentDevice, QuerySet, Sampler, StagingBuffer, Texture, TextureView, TextureViewNotRenderableReason, TrackingData, }, resource_log, @@ -39,7 +39,9 @@ use once_cell::sync::OnceCell; use smallvec::SmallVec; use thiserror::Error; -use wgt::{DeviceLostReason, TextureFormat, TextureSampleType, TextureViewDimension}; +use wgt::{ + math::align_to, DeviceLostReason, TextureFormat, TextureSampleType, TextureViewDimension, +}; use std::{ borrow::Cow, @@ -225,31 +227,29 @@ impl Device { desc: &DeviceDescriptor, trace_path: Option<&std::path::Path>, instance_flags: wgt::InstanceFlags, - ) -> Result { + ) -> Result { #[cfg(not(feature = "trace"))] if let Some(_) = trace_path { log::error!("Feature 'trace' is not enabled"); } - let fence = - unsafe { raw_device.create_fence() }.map_err(|_| CreateDeviceError::OutOfMemory)?; + let fence = unsafe { raw_device.create_fence() }.map_err(DeviceError::from_hal)?; let command_allocator = command::CommandAllocator::new(); let pending_encoder = command_allocator .acquire_encoder(raw_device.as_ref(), raw_queue) - .map_err(|_| CreateDeviceError::OutOfMemory)?; + .map_err(DeviceError::from_hal)?; let mut pending_writes = PendingWrites::new(pending_encoder); // Create zeroed buffer used for texture clears. let zero_buffer = unsafe { - raw_device - .create_buffer(&hal::BufferDescriptor { - label: hal_label(Some("(wgpu internal) zero init buffer"), instance_flags), - size: ZERO_BUFFER_SIZE, - usage: hal::BufferUses::COPY_SRC | hal::BufferUses::COPY_DST, - memory_flags: hal::MemoryFlags::empty(), - }) - .map_err(DeviceError::from)? - }; + raw_device.create_buffer(&hal::BufferDescriptor { + label: hal_label(Some("(wgpu internal) zero init buffer"), instance_flags), + size: ZERO_BUFFER_SIZE, + usage: hal::BufferUses::COPY_SRC | hal::BufferUses::COPY_DST, + memory_flags: hal::MemoryFlags::empty(), + }) + } + .map_err(DeviceError::from_hal)?; pending_writes.activate(); unsafe { pending_writes @@ -337,6 +337,18 @@ impl Device { } } + pub fn handle_hal_error(&self, error: hal::DeviceError) -> DeviceError { + match error { + hal::DeviceError::OutOfMemory => {} + hal::DeviceError::Lost + | hal::DeviceError::ResourceCreationFailed + | hal::DeviceError::Unexpected => { + self.lose(&error.to_string()); + } + } + DeviceError::from_hal(error) + } + pub(crate) fn release_queue(&self, queue: Box) { assert!(self.queue_to_drop.set(queue).is_ok()); } @@ -439,11 +451,8 @@ impl Device { wgt::Maintain::Wait => self .last_successful_submission_index .load(Ordering::Acquire), - wgt::Maintain::Poll => unsafe { - self.raw() - .get_fence_value(fence.as_ref()) - .map_err(DeviceError::from)? - }, + wgt::Maintain::Poll => unsafe { self.raw().get_fence_value(fence.as_ref()) } + .map_err(|e| self.handle_hal_error(e))?, }; // If necessary, wait for that submission to complete. @@ -451,8 +460,8 @@ impl Device { unsafe { self.raw() .wait(fence.as_ref(), submission_index, CLEANUP_WAIT_MS) - .map_err(DeviceError::from)? - }; + } + .map_err(|e| self.handle_hal_error(e))?; } log::trace!("Device::maintain: waiting for submission index {submission_index}"); @@ -586,7 +595,8 @@ impl Device { usage, memory_flags: hal::MemoryFlags::empty(), }; - let buffer = unsafe { self.raw().create_buffer(&hal_desc) }.map_err(DeviceError::from)?; + let buffer = + unsafe { self.raw().create_buffer(&hal_desc) }.map_err(|e| self.handle_hal_error(e))?; let buffer = Buffer { raw: Snatchable::new(buffer), @@ -662,6 +672,8 @@ impl Device { .describe_format_features(desc.format) .map_err(|error| resource::CreateTextureError::MissingFeatures(desc.format, error))?; + unsafe { self.raw().add_raw_texture(&*hal_texture) }; + let texture = Texture::new( self, resource::TextureInner::Native { raw: hal_texture }, @@ -682,11 +694,13 @@ impl Device { Ok(texture) } - pub fn create_buffer_from_hal( + pub(crate) fn create_buffer_from_hal( self: &Arc, hal_buffer: Box, desc: &resource::BufferDescriptor, - ) -> Arc { + ) -> Fallible { + unsafe { self.raw().add_raw_buffer(&*hal_buffer) }; + let buffer = Buffer { raw: Snatchable::new(hal_buffer), device: self.clone(), @@ -709,7 +723,7 @@ impl Device { .buffers .insert_single(&buffer, hal::BufferUses::empty()); - buffer + Fallible::Valid(buffer) } pub(crate) fn create_texture( @@ -929,11 +943,8 @@ impl Device { view_formats: hal_view_formats, }; - let raw_texture = unsafe { - self.raw() - .create_texture(&hal_desc) - .map_err(DeviceError::from)? - }; + let raw_texture = unsafe { self.raw().create_texture(&hal_desc) } + .map_err(|e| self.handle_hal_error(e))?; let clear_mode = if hal_usage .intersects(hal::TextureUses::DEPTH_STENCIL_WRITE | hal::TextureUses::COLOR_TARGET) @@ -976,7 +987,7 @@ impl Device { unsafe { self.raw().create_texture_view(raw_texture.as_ref(), &desc) } - .map_err(DeviceError::from)?, + .map_err(|e| self.handle_hal_error(e))?, )); }; } @@ -1282,11 +1293,8 @@ impl Device { range: resolved_range, }; - let raw = unsafe { - self.raw() - .create_texture_view(texture_raw, &hal_desc) - .map_err(|_| resource::CreateTextureViewError::OutOfMemory)? - }; + let raw = unsafe { self.raw().create_texture_view(texture_raw, &hal_desc) } + .map_err(|e| self.handle_hal_error(e))?; let selector = TextureSelector { mips: desc.range.base_mip_level..mip_level_end, @@ -1417,11 +1425,8 @@ impl Device { border_color: desc.border_color, }; - let raw = unsafe { - self.raw() - .create_sampler(&hal_desc) - .map_err(DeviceError::from)? - }; + let raw = unsafe { self.raw().create_sampler(&hal_desc) } + .map_err(|e| self.handle_hal_error(e))?; let sampler = Sampler { raw: ManuallyDrop::new(raw), @@ -1545,7 +1550,7 @@ impl Device { Err(error) => { return Err(match error { hal::ShaderError::Device(error) => { - pipeline::CreateShaderModuleError::Device(error.into()) + pipeline::CreateShaderModuleError::Device(self.handle_hal_error(error)) } hal::ShaderError::Compilation(ref msg) => { log::error!("Shader error: {}", msg); @@ -1586,7 +1591,7 @@ impl Device { Err(error) => { return Err(match error { hal::ShaderError::Device(error) => { - pipeline::CreateShaderModuleError::Device(error.into()) + pipeline::CreateShaderModuleError::Device(self.handle_hal_error(error)) } hal::ShaderError::Compilation(ref msg) => { log::error!("Shader error: {}", msg); @@ -1618,7 +1623,8 @@ impl Device { let encoder = self .command_allocator - .acquire_encoder(self.raw(), queue.raw())?; + .acquire_encoder(self.raw(), queue.raw()) + .map_err(|e| self.handle_hal_error(e))?; let command_buffer = command::CommandBuffer::new(encoder, self, label); @@ -1629,7 +1635,7 @@ impl Device { /// Generate information about late-validated buffer bindings for pipelines. //TODO: should this be combined with `get_introspection_bind_group_layouts` in some way? - pub(crate) fn make_late_sized_buffer_groups( + fn make_late_sized_buffer_groups( shader_binding_sizes: &FastHashMap, layout: &binding_model::PipelineLayout, ) -> ArrayVec { @@ -1850,11 +1856,9 @@ impl Device { flags: bgl_flags, entries: &hal_bindings, }; - let raw = unsafe { - self.raw() - .create_bind_group_layout(&hal_desc) - .map_err(DeviceError::from)? - }; + + let raw = unsafe { self.raw().create_bind_group_layout(&hal_desc) } + .map_err(|e| self.handle_hal_error(e))?; let mut count_validator = binding_model::BindingTypeMaxCountValidator::default(); for entry in entry_map.values() { @@ -1881,8 +1885,8 @@ impl Device { Ok(bgl) } - pub(crate) fn create_buffer_binding<'a>( - self: &Arc, + fn create_buffer_binding<'a>( + &self, bb: &'a binding_model::ResolvedBufferBinding, binding: u32, decl: &wgt::BindGroupLayoutEntry, @@ -1890,7 +1894,6 @@ impl Device { dynamic_binding_info: &mut Vec, late_buffer_binding_sizes: &mut FastHashMap, used: &mut BindGroupStates, - limits: &wgt::Limits, snatch_guard: &'a SnatchGuard<'a>, ) -> Result, binding_model::CreateBindGroupError> { @@ -1915,7 +1918,7 @@ impl Device { wgt::BufferBindingType::Uniform => ( wgt::BufferUsages::UNIFORM, hal::BufferUses::UNIFORM, - limits.max_uniform_buffer_binding_size, + self.limits.max_uniform_buffer_binding_size, ), wgt::BufferBindingType::Storage { read_only } => ( wgt::BufferUsages::STORAGE, @@ -1924,12 +1927,12 @@ impl Device { } else { hal::BufferUses::STORAGE_READ_WRITE }, - limits.max_storage_buffer_binding_size, + self.limits.max_storage_buffer_binding_size, ), }; let (align, align_limit_name) = - binding_model::buffer_binding_type_alignment(limits, binding_ty); + binding_model::buffer_binding_type_alignment(&self.limits, binding_ty); if bb.offset % align as u64 != 0 { return Err(Error::UnalignedBufferOffset( bb.offset, @@ -2005,10 +2008,21 @@ impl Device { late_buffer_binding_sizes.insert(binding, late_size); } + // This was checked against the device's alignment requirements above, + // which should always be a multiple of `COPY_BUFFER_ALIGNMENT`. assert_eq!(bb.offset % wgt::COPY_BUFFER_ALIGNMENT, 0); + + // `wgpu_hal` only restricts shader access to bound buffer regions with + // a certain resolution. For the sake of lazy initialization, round up + // the size of the bound range to reflect how much of the buffer is + // actually going to be visible to the shader. + let bounds_check_alignment = + binding_model::buffer_binding_type_bounds_check_alignment(&self.alignments, binding_ty); + let visible_size = align_to(bind_size, bounds_check_alignment); + used_buffer_ranges.extend(buffer.initialization_status.read().create_action( buffer, - bb.offset..bb.offset + bind_size, + bb.offset..bb.offset + visible_size, MemoryInitKind::NeedsInitializedMemory, )); @@ -2020,7 +2034,7 @@ impl Device { } fn create_sampler_binding<'a>( - self: &Arc, + &self, used: &mut BindGroupStates, binding: u32, decl: &wgt::BindGroupLayoutEntry, @@ -2068,8 +2082,8 @@ impl Device { Ok(sampler.raw()) } - pub(crate) fn create_texture_binding<'a>( - self: &Arc, + fn create_texture_binding<'a>( + &self, binding: u32, decl: &wgt::BindGroupLayoutEntry, view: &'a Arc, @@ -2167,7 +2181,6 @@ impl Device { &mut dynamic_binding_info, &mut late_buffer_binding_sizes, &mut used, - &self.limits, &snatch_guard, )?; @@ -2189,7 +2202,6 @@ impl Device { &mut dynamic_binding_info, &mut late_buffer_binding_sizes, &mut used, - &self.limits, &snatch_guard, )?; hal_buffers.push(bb); @@ -2276,11 +2288,8 @@ impl Device { textures: &hal_textures, acceleration_structures: &[], }; - let raw = unsafe { - self.raw() - .create_bind_group(&hal_desc) - .map_err(DeviceError::from)? - }; + let raw = unsafe { self.raw().create_bind_group(&hal_desc) } + .map_err(|e| self.handle_hal_error(e))?; // collect in the order of BGL iteration let late_buffer_binding_sizes = layout @@ -2325,7 +2334,7 @@ impl Device { Ok(bind_group) } - pub(crate) fn check_array_binding( + fn check_array_binding( features: wgt::Features, count: Option, num_bindings: usize, @@ -2358,8 +2367,8 @@ impl Device { Ok(()) } - pub(crate) fn texture_use_parameters( - self: &Arc, + fn texture_use_parameters( + &self, binding: u32, decl: &wgt::BindGroupLayoutEntry, view: &TextureView, @@ -2574,11 +2583,8 @@ impl Device { push_constant_ranges: desc.push_constant_ranges.as_ref(), }; - let raw = unsafe { - self.raw() - .create_pipeline_layout(&hal_desc) - .map_err(DeviceError::from)? - }; + let raw = unsafe { self.raw().create_pipeline_layout(&hal_desc) } + .map_err(|e| self.handle_hal_error(e))?; drop(raw_bind_group_layouts); @@ -2732,7 +2738,7 @@ impl Device { unsafe { self.raw().create_compute_pipeline(&pipeline_desc) }.map_err( |err| match err { hal::PipelineError::Device(error) => { - pipeline::CreateComputePipelineError::Device(error.into()) + pipeline::CreateComputePipelineError::Device(self.handle_hal_error(error)) } hal::PipelineError::Linkage(_stages, msg) => { pipeline::CreateComputePipelineError::Internal(msg) @@ -3312,7 +3318,7 @@ impl Device { unsafe { self.raw().create_render_pipeline(&pipeline_desc) }.map_err( |err| match err { hal::PipelineError::Device(error) => { - pipeline::CreateRenderPipelineError::Device(error.into()) + pipeline::CreateRenderPipelineError::Device(self.handle_hal_error(error)) } hal::PipelineError::Linkage(stage, msg) => { pipeline::CreateRenderPipelineError::Internal { stage, error: msg } @@ -3435,7 +3441,9 @@ impl Device { }; let raw = match unsafe { self.raw().create_pipeline_cache(&cache_desc) } { Ok(raw) => raw, - Err(e) => return Err(e.into()), + Err(e) => match e { + hal::PipelineCacheError::Device(e) => return Err(self.handle_hal_error(e).into()), + }, }; let cache = pipeline::PipelineCache { device: self.clone(), @@ -3449,10 +3457,7 @@ impl Device { Ok(cache) } - pub(crate) fn get_texture_format_features( - &self, - format: TextureFormat, - ) -> wgt::TextureFormatFeatures { + fn get_texture_format_features(&self, format: TextureFormat) -> wgt::TextureFormatFeatures { // Variant of adapter.get_texture_format_features that takes device features into account use wgt::TextureFormatFeatureFlags as tfsc; let mut format_features = self.adapter.get_texture_format_features(format); @@ -3466,7 +3471,7 @@ impl Device { format_features } - pub(crate) fn describe_format_features( + fn describe_format_features( &self, format: TextureFormat, ) -> Result { @@ -3495,9 +3500,11 @@ impl Device { submission_index: crate::SubmissionIndex, ) -> Result<(), DeviceError> { let fence = self.fence.read(); - let last_done_index = unsafe { self.raw().get_fence_value(fence.as_ref())? }; + let last_done_index = unsafe { self.raw().get_fence_value(fence.as_ref()) } + .map_err(|e| self.handle_hal_error(e))?; if last_done_index < submission_index { - unsafe { self.raw().wait(fence.as_ref(), submission_index, !0)? }; + unsafe { self.raw().wait(fence.as_ref(), submission_index, !0) } + .map_err(|e| self.handle_hal_error(e))?; drop(fence); let closures = self .lock_life() @@ -3556,7 +3563,7 @@ impl Device { Ok(query_set) } - pub(crate) fn lose(&self, message: &str) { + fn lose(&self, message: &str) { // Follow the steps at https://gpuweb.github.io/gpuweb/#lose-the-device. // Mark the device explicitly as invalid. This is checked in various @@ -3623,16 +3630,6 @@ impl Device { } impl Device { - pub(crate) fn destroy_command_buffer(&self, mut cmd_buf: command::CommandBuffer) { - let mut baked = cmd_buf.extract_baked_commands(); - unsafe { - baked.encoder.reset_all(baked.list); - } - unsafe { - self.raw().destroy_command_encoder(baked.encoder); - } - } - /// Wait for idle and remove resources that we can, before we die. pub(crate) fn prepare_to_die(&self) { self.pending_writes.lock().deactivate(); diff --git a/third_party/rust/wgpu-core/src/global.rs b/third_party/rust/wgpu-core/src/global.rs index 4d79a81e3baf7..bb672612e4954 100644 --- a/third_party/rust/wgpu-core/src/global.rs +++ b/third_party/rust/wgpu-core/src/global.rs @@ -1,3 +1,5 @@ +use std::sync::Arc; + use crate::{ hal_api::HalApi, hub::{Hub, HubReport}, @@ -23,7 +25,7 @@ impl GlobalReport { pub struct Global { pub instance: Instance, - pub(crate) surfaces: Registry, + pub(crate) surfaces: Registry>, pub(crate) hub: Hub, } diff --git a/third_party/rust/wgpu-core/src/hub.rs b/third_party/rust/wgpu-core/src/hub.rs index 5cbb736301f30..f4e8b9c756c58 100644 --- a/third_party/rust/wgpu-core/src/hub.rs +++ b/third_party/rust/wgpu-core/src/hub.rs @@ -107,10 +107,10 @@ use crate::{ instance::{Adapter, Surface}, pipeline::{ComputePipeline, PipelineCache, RenderPipeline, ShaderModule}, registry::{Registry, RegistryReport}, - resource::{Buffer, QuerySet, Sampler, StagingBuffer, Texture, TextureView}, + resource::{Buffer, Fallible, QuerySet, Sampler, StagingBuffer, Texture, TextureView}, storage::{Element, Storage}, }; -use std::fmt::Debug; +use std::{fmt::Debug, sync::Arc}; #[derive(Debug, PartialEq, Eq)] pub struct HubReport { @@ -162,24 +162,24 @@ impl HubReport { /// /// [`A::hub(global)`]: HalApi::hub pub struct Hub { - pub(crate) adapters: Registry, - pub(crate) devices: Registry, - pub(crate) queues: Registry, - pub(crate) pipeline_layouts: Registry, - pub(crate) shader_modules: Registry, - pub(crate) bind_group_layouts: Registry, - pub(crate) bind_groups: Registry, - pub(crate) command_buffers: Registry, - pub(crate) render_bundles: Registry, - pub(crate) render_pipelines: Registry, - pub(crate) compute_pipelines: Registry, - pub(crate) pipeline_caches: Registry, - pub(crate) query_sets: Registry, - pub(crate) buffers: Registry, + pub(crate) adapters: Registry>, + pub(crate) devices: Registry>, + pub(crate) queues: Registry>, + pub(crate) pipeline_layouts: Registry>, + pub(crate) shader_modules: Registry>, + pub(crate) bind_group_layouts: Registry>, + pub(crate) bind_groups: Registry>, + pub(crate) command_buffers: Registry>, + pub(crate) render_bundles: Registry>, + pub(crate) render_pipelines: Registry>, + pub(crate) compute_pipelines: Registry>, + pub(crate) pipeline_caches: Registry>, + pub(crate) query_sets: Registry>, + pub(crate) buffers: Registry>, pub(crate) staging_buffers: Registry, - pub(crate) textures: Registry, - pub(crate) texture_views: Registry, - pub(crate) samplers: Registry, + pub(crate) textures: Registry>, + pub(crate) texture_views: Registry>, + pub(crate) samplers: Registry>, } impl Hub { @@ -206,7 +206,7 @@ impl Hub { } } - pub(crate) fn clear(&self, surface_guard: &Storage) { + pub(crate) fn clear(&self, surface_guard: &Storage>) { let mut devices = self.devices.write(); for element in devices.map.iter() { if let Element::Occupied(ref device, _) = *element { diff --git a/third_party/rust/wgpu-core/src/init_tracker/mod.rs b/third_party/rust/wgpu-core/src/init_tracker/mod.rs index ccaac1e16f2bd..15a79bf52026e 100644 --- a/third_party/rust/wgpu-core/src/init_tracker/mod.rs +++ b/third_party/rust/wgpu-core/src/init_tracker/mod.rs @@ -65,6 +65,35 @@ pub(crate) struct InitTracker { uninitialized_ranges: UninitializedRangeVec, } +pub(crate) struct UninitializedIter<'a, Idx: fmt::Debug + Ord + Copy> { + uninitialized_ranges: &'a UninitializedRangeVec, + drain_range: Range, + next_index: usize, +} + +impl<'a, Idx> Iterator for UninitializedIter<'a, Idx> +where + Idx: fmt::Debug + Ord + Copy, +{ + type Item = Range; + + fn next(&mut self) -> Option { + self.uninitialized_ranges + .get(self.next_index) + .and_then(|range| { + if range.start < self.drain_range.end { + self.next_index += 1; + Some( + range.start.max(self.drain_range.start) + ..range.end.min(self.drain_range.end), + ) + } else { + None + } + }) + } +} + pub(crate) struct InitTrackerDrain<'a, Idx: fmt::Debug + Ord + Copy> { uninitialized_ranges: &'a mut UninitializedRangeVec, drain_range: Range, @@ -190,6 +219,18 @@ where }) } + // Returns an iterator over the uninitialized ranges in a query range. + pub(crate) fn uninitialized(&mut self, drain_range: Range) -> UninitializedIter { + let index = self + .uninitialized_ranges + .partition_point(|r| r.end <= drain_range.start); + UninitializedIter { + drain_range, + uninitialized_ranges: &self.uninitialized_ranges, + next_index: index, + } + } + // Drains uninitialized ranges in a query range. pub(crate) fn drain(&mut self, drain_range: Range) -> InitTrackerDrain { let index = self diff --git a/third_party/rust/wgpu-core/src/instance.rs b/third_party/rust/wgpu-core/src/instance.rs index b3ce11fd177c5..014fbc7bc762c 100644 --- a/third_party/rust/wgpu-core/src/instance.rs +++ b/third_party/rust/wgpu-core/src/instance.rs @@ -4,7 +4,7 @@ use std::{borrow::Cow, collections::HashMap}; use crate::hub::Hub; use crate::{ api_log, - device::{queue::Queue, resource::Device, DeviceDescriptor}, + device::{queue::Queue, resource::Device, DeviceDescriptor, DeviceError}, global::Global, hal_api::HalApi, id::{markers, AdapterId, DeviceId, Id, Marker, QueueId, SurfaceId}, @@ -272,20 +272,19 @@ impl Adapter { ) -> Result<(Arc, Arc), RequestDeviceError> { api_log!("Adapter::create_device"); - if let Ok(device) = Device::new( + let device = Device::new( hal_device.device, hal_device.queue.as_ref(), self, desc, trace_path, instance_flags, - ) { - let device = Arc::new(device); - let queue = Arc::new(Queue::new(device.clone(), hal_device.queue)); - device.set_queue(&queue); - return Ok((device, queue)); - } - Err(RequestDeviceError::OutOfMemory) + )?; + + let device = Arc::new(device); + let queue = Arc::new(Queue::new(device.clone(), hal_device.queue)); + device.set_queue(&queue); + Ok((device, queue)) } #[allow(clippy::type_complexity)] @@ -338,12 +337,7 @@ impl Adapter { &desc.memory_hints, ) } - .map_err(|err| match err { - hal::DeviceError::Lost => RequestDeviceError::DeviceLost, - hal::DeviceError::OutOfMemory => RequestDeviceError::OutOfMemory, - hal::DeviceError::ResourceCreationFailed => RequestDeviceError::Internal, - hal::DeviceError::Unexpected => RequestDeviceError::DeviceLost, - })?; + .map_err(DeviceError::from_hal)?; self.create_device_and_queue_from_hal(open, desc, instance_flags, trace_path) } @@ -352,22 +346,9 @@ impl Adapter { crate::impl_resource_type!(Adapter); crate::impl_storage_item!(Adapter); -#[derive(Clone, Debug, Error)] -#[non_exhaustive] -pub enum IsSurfaceSupportedError { - #[error("Invalid adapter")] - InvalidAdapter, - #[error("Invalid surface")] - InvalidSurface, -} - #[derive(Clone, Debug, Error)] #[non_exhaustive] pub enum GetSurfaceSupportError { - #[error("Invalid adapter")] - InvalidAdapter, - #[error("Invalid surface")] - InvalidSurface, #[error("Surface is not supported by the adapter")] Unsupported, } @@ -377,18 +358,12 @@ pub enum GetSurfaceSupportError { /// Error when requesting a device from the adaptor #[non_exhaustive] pub enum RequestDeviceError { - #[error("Parent adapter is invalid")] - InvalidAdapter, - #[error("Connection to device was lost during initialization")] - DeviceLost, - #[error("Device initialization failed due to implementation specific errors")] - Internal, + #[error(transparent)] + Device(#[from] DeviceError), #[error(transparent)] LimitsExceeded(#[from] FailedLimit), #[error("Device has no queue supporting graphics")] NoGraphicsQueue, - #[error("Not enough memory left to request device")] - OutOfMemory, #[error("Unsupported features were requested: {0:?}")] UnsupportedFeature(wgt::Features), } @@ -413,18 +388,12 @@ impl AdapterInputs<'_, M> { } } -#[derive(Clone, Debug, Error)] -#[error("Adapter is invalid")] -pub struct InvalidAdapter; - #[derive(Clone, Debug, Error)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] #[non_exhaustive] pub enum RequestAdapterError { #[error("No suitable adapter found")] NotFound, - #[error("Surface {0:?} is invalid")] - InvalidSurface(SurfaceId), } #[derive(Clone, Debug, Error)] @@ -624,9 +593,9 @@ impl Global { api_log!("Surface::drop {id:?}"); - let surface = self.surfaces.unregister(id); - let surface = Arc::into_inner(surface.unwrap()) - .expect("Surface cannot be destroyed because is still in use"); + let surface = self.surfaces.remove(id); + let surface = + Arc::into_inner(surface).expect("Surface cannot be destroyed because is still in use"); if let Some(present) = surface.presentation.lock().take() { for (&backend, surface) in &surface.surface_per_backend { @@ -741,14 +710,7 @@ impl Global { } } - let compatible_surface = desc - .compatible_surface - .map(|id| { - self.surfaces - .get(id) - .map_err(|_| RequestAdapterError::InvalidSurface(id)) - }) - .transpose()?; + let compatible_surface = desc.compatible_surface.map(|id| self.surfaces.get(id)); let compatible_surface = compatible_surface.as_ref().map(|surface| surface.as_ref()); let mut device_types = Vec::new(); @@ -879,73 +841,51 @@ impl Global { id } - pub fn adapter_get_info( - &self, - adapter_id: AdapterId, - ) -> Result { - self.hub - .adapters - .get(adapter_id) - .map(|adapter| adapter.raw.info.clone()) - .map_err(|_| InvalidAdapter) + pub fn adapter_get_info(&self, adapter_id: AdapterId) -> wgt::AdapterInfo { + let adapter = self.hub.adapters.get(adapter_id); + adapter.raw.info.clone() } pub fn adapter_get_texture_format_features( &self, adapter_id: AdapterId, format: wgt::TextureFormat, - ) -> Result { - self.hub - .adapters - .get(adapter_id) - .map(|adapter| adapter.get_texture_format_features(format)) - .map_err(|_| InvalidAdapter) + ) -> wgt::TextureFormatFeatures { + let adapter = self.hub.adapters.get(adapter_id); + adapter.get_texture_format_features(format) } - pub fn adapter_features(&self, adapter_id: AdapterId) -> Result { - self.hub - .adapters - .get(adapter_id) - .map(|adapter| adapter.raw.features) - .map_err(|_| InvalidAdapter) + pub fn adapter_features(&self, adapter_id: AdapterId) -> wgt::Features { + let adapter = self.hub.adapters.get(adapter_id); + adapter.raw.features } - pub fn adapter_limits(&self, adapter_id: AdapterId) -> Result { - self.hub - .adapters - .get(adapter_id) - .map(|adapter| adapter.raw.capabilities.limits.clone()) - .map_err(|_| InvalidAdapter) + pub fn adapter_limits(&self, adapter_id: AdapterId) -> wgt::Limits { + let adapter = self.hub.adapters.get(adapter_id); + adapter.raw.capabilities.limits.clone() } pub fn adapter_downlevel_capabilities( &self, adapter_id: AdapterId, - ) -> Result { - self.hub - .adapters - .get(adapter_id) - .map(|adapter| adapter.raw.capabilities.downlevel.clone()) - .map_err(|_| InvalidAdapter) + ) -> wgt::DownlevelCapabilities { + let adapter = self.hub.adapters.get(adapter_id); + adapter.raw.capabilities.downlevel.clone() } pub fn adapter_get_presentation_timestamp( &self, adapter_id: AdapterId, - ) -> Result { - let hub = &self.hub; - - let adapter = hub.adapters.get(adapter_id).map_err(|_| InvalidAdapter)?; - - Ok(unsafe { adapter.raw.adapter.get_presentation_timestamp() }) + ) -> wgt::PresentationTimestamp { + let adapter = self.hub.adapters.get(adapter_id); + unsafe { adapter.raw.adapter.get_presentation_timestamp() } } pub fn adapter_drop(&self, adapter_id: AdapterId) { profiling::scope!("Adapter::drop"); api_log!("Adapter::drop {adapter_id:?}"); - let hub = &self.hub; - hub.adapters.unregister(adapter_id); + self.hub.adapters.remove(adapter_id); } } @@ -957,7 +897,7 @@ impl Global { trace_path: Option<&std::path::Path>, device_id_in: Option, queue_id_in: Option, - ) -> (DeviceId, QueueId, Option) { + ) -> Result<(DeviceId, QueueId), RequestDeviceError> { profiling::scope!("Adapter::request_device"); api_log!("Adapter::request_device"); @@ -965,29 +905,17 @@ impl Global { let device_fid = self.hub.devices.prepare(backend, device_id_in); let queue_fid = self.hub.queues.prepare(backend, queue_id_in); - let error = 'error: { - let adapter = match self.hub.adapters.get(adapter_id) { - Ok(adapter) => adapter, - Err(_) => break 'error RequestDeviceError::InvalidAdapter, - }; - let (device, queue) = - match adapter.create_device_and_queue(desc, self.instance.flags, trace_path) { - Ok((device, queue)) => (device, queue), - Err(e) => break 'error e, - }; - - let device_id = device_fid.assign(device); - resource_log!("Created Device {:?}", device_id); + let adapter = self.hub.adapters.get(adapter_id); + let (device, queue) = + adapter.create_device_and_queue(desc, self.instance.flags, trace_path)?; - let queue_id = queue_fid.assign(queue); - resource_log!("Created Queue {:?}", queue_id); + let device_id = device_fid.assign(device); + resource_log!("Created Device {:?}", device_id); - return (device_id, queue_id, None); - }; + let queue_id = queue_fid.assign(queue); + resource_log!("Created Queue {:?}", queue_id); - let device_id = device_fid.assign_error(); - let queue_id = queue_fid.assign_error(); - (device_id, queue_id, Some(error)) + Ok((device_id, queue_id)) } /// # Safety @@ -1002,40 +930,28 @@ impl Global { trace_path: Option<&std::path::Path>, device_id_in: Option, queue_id_in: Option, - ) -> (DeviceId, QueueId, Option) { + ) -> Result<(DeviceId, QueueId), RequestDeviceError> { profiling::scope!("Global::create_device_from_hal"); let backend = adapter_id.backend(); let devices_fid = self.hub.devices.prepare(backend, device_id_in); let queues_fid = self.hub.queues.prepare(backend, queue_id_in); - let error = 'error: { - let adapter = match self.hub.adapters.get(adapter_id) { - Ok(adapter) => adapter, - Err(_) => break 'error RequestDeviceError::InvalidAdapter, - }; - let (device, queue) = match adapter.create_device_and_queue_from_hal( - hal_device, - desc, - self.instance.flags, - trace_path, - ) { - Ok(device) => device, - Err(e) => break 'error e, - }; - - let device_id = devices_fid.assign(device); - resource_log!("Created Device {:?}", device_id); + let adapter = self.hub.adapters.get(adapter_id); + let (device, queue) = adapter.create_device_and_queue_from_hal( + hal_device, + desc, + self.instance.flags, + trace_path, + )?; - let queue_id = queues_fid.assign(queue); - resource_log!("Created Queue {:?}", queue_id); + let device_id = devices_fid.assign(device); + resource_log!("Created Device {:?}", device_id); - return (device_id, queue_id, None); - }; + let queue_id = queues_fid.assign(queue); + resource_log!("Created Queue {:?}", queue_id); - let device_id = devices_fid.assign_error(); - let queue_id = queues_fid.assign_error(); - (device_id, queue_id, Some(error)) + Ok((device_id, queue_id)) } } diff --git a/third_party/rust/wgpu-core/src/lock/observing.rs b/third_party/rust/wgpu-core/src/lock/observing.rs index a5e02c54bdd0a..afda1ad574977 100644 --- a/third_party/rust/wgpu-core/src/lock/observing.rs +++ b/third_party/rust/wgpu-core/src/lock/observing.rs @@ -78,6 +78,15 @@ impl Mutex { } } +impl<'a, T> MutexGuard<'a, T> { + pub fn try_map(s: Self, f: F) -> Result, ()> + where + F: FnOnce(&mut T) -> Option<&mut U>, + { + parking_lot::MutexGuard::try_map(s.inner, f).map_err(|_| ()) + } +} + impl<'a, T> std::ops::Deref for MutexGuard<'a, T> { type Target = T; @@ -369,16 +378,16 @@ impl ObservationLog { self.write_location(new_location); self.write_action(&Action::Acquisition { older_rank: older_lock.rank.bit.number(), - older_location: older_lock.location as *const _ as usize, + older_location: addr(older_lock.location), newer_rank: new_rank.bit.number(), - newer_location: new_location as *const _ as usize, + newer_location: addr(new_location), }); } fn write_location(&mut self, location: &'static Location<'static>) { if self.locations_seen.insert(location) { self.write_action(&Action::Location { - address: location as *const _ as usize, + address: addr(location), file: location.file(), line: location.line(), column: location.column(), @@ -473,3 +482,8 @@ impl LockRankSet { self.bits().trailing_zeros() } } + +/// Convenience for `std::ptr::from_ref(t) as usize`. +fn addr(t: &T) -> usize { + std::ptr::from_ref(t) as usize +} diff --git a/third_party/rust/wgpu-core/src/lock/vanilla.rs b/third_party/rust/wgpu-core/src/lock/vanilla.rs index 9a35b6d9d8146..51e472b118b24 100644 --- a/third_party/rust/wgpu-core/src/lock/vanilla.rs +++ b/third_party/rust/wgpu-core/src/lock/vanilla.rs @@ -30,6 +30,15 @@ impl Mutex { } } +impl<'a, T> MutexGuard<'a, T> { + pub fn try_map(s: Self, f: F) -> Result, ()> + where + F: FnOnce(&mut T) -> Option<&mut U>, + { + parking_lot::MutexGuard::try_map(s.0, f).map_err(|_| ()) + } +} + impl<'a, T> std::ops::Deref for MutexGuard<'a, T> { type Target = T; diff --git a/third_party/rust/wgpu-core/src/pipeline.rs b/third_party/rust/wgpu-core/src/pipeline.rs index db1c1ba76a850..08e7167db6cca 100644 --- a/third_party/rust/wgpu-core/src/pipeline.rs +++ b/third_party/rust/wgpu-core/src/pipeline.rs @@ -4,7 +4,7 @@ use crate::{ command::ColorAttachmentError, device::{Device, DeviceError, MissingDownlevelFlags, MissingFeatures, RenderPassContext}, id::{PipelineCacheId, PipelineLayoutId, ShaderModuleId}, - resource::{Labeled, TrackingData}, + resource::{InvalidResourceError, Labeled, TrackingData}, resource_log, validation, Label, }; use arrayvec::ArrayVec; @@ -222,10 +222,6 @@ pub struct ResolvedComputePipelineDescriptor<'a> { pub enum CreateComputePipelineError { #[error(transparent)] Device(#[from] DeviceError), - #[error("Pipeline layout is invalid")] - InvalidLayout, - #[error("Cache is invalid")] - InvalidCache, #[error("Unable to derive an implicit layout")] Implicit(#[from] ImplicitLayoutError), #[error("Error matching shader requirements against the pipeline")] @@ -236,6 +232,8 @@ pub enum CreateComputePipelineError { PipelineConstants(String), #[error(transparent)] MissingDownlevelFlags(#[from] MissingDownlevelFlags), + #[error(transparent)] + InvalidResource(#[from] InvalidResourceError), } #[derive(Debug)] @@ -286,16 +284,6 @@ pub enum CreatePipelineCacheError { Internal(String), } -impl From for CreatePipelineCacheError { - fn from(value: hal::PipelineCacheError) -> Self { - match value { - hal::PipelineCacheError::Device(device) => { - CreatePipelineCacheError::Device(device.into()) - } - } - } -} - #[derive(Debug)] pub struct PipelineCache { pub(crate) raw: ManuallyDrop>, @@ -477,10 +465,6 @@ pub enum CreateRenderPipelineError { ColorAttachment(#[from] ColorAttachmentError), #[error(transparent)] Device(#[from] DeviceError), - #[error("Pipeline layout is invalid")] - InvalidLayout, - #[error("Pipeline cache is invalid")] - InvalidCache, #[error("Unable to derive an implicit layout")] Implicit(#[from] ImplicitLayoutError), #[error("Color state [{0}] is invalid")] @@ -550,6 +534,8 @@ pub enum CreateRenderPipelineError { "but no render target for the pipeline was specified." ))] NoTargetSpecified, + #[error(transparent)] + InvalidResource(#[from] InvalidResourceError), } bitflags::bitflags! { diff --git a/third_party/rust/wgpu-core/src/present.rs b/third_party/rust/wgpu-core/src/present.rs index 697156b35f79a..93297bf3f6b53 100644 --- a/third_party/rust/wgpu-core/src/present.rs +++ b/third_party/rust/wgpu-core/src/present.rs @@ -122,10 +122,7 @@ impl Global { let hub = &self.hub; - let surface = self - .surfaces - .get(surface_id) - .map_err(|_| SurfaceError::Invalid)?; + let surface = self.surfaces.get(surface_id); let (device, config) = if let Some(ref present) = *surface.presentation.lock() { present.device.check_is_valid()?; @@ -179,7 +176,7 @@ impl Global { let clear_view_desc = hal::TextureViewDescriptor { label: hal_label( Some("(wgpu internal) clear surface texture view"), - self.instance.flags, + device.instance_flags, ), format: config.format, dimension: wgt::TextureViewDimension::D2, @@ -191,7 +188,7 @@ impl Global { .raw() .create_texture_view(ast.texture.as_ref().borrow(), &clear_view_desc) } - .map_err(DeviceError::from)?; + .map_err(|e| device.handle_hal_error(e))?; let mut presentation = surface.presentation.lock(); let present = presentation.as_mut().unwrap(); @@ -218,7 +215,7 @@ impl Global { .textures .insert_single(&texture, hal::TextureUses::UNINITIALIZED); - let id = fid.assign(texture); + let id = fid.assign(resource::Fallible::Valid(texture)); if present.acquired_texture.is_some() { return Err(SurfaceError::AlreadyAcquired); @@ -238,7 +235,7 @@ impl Global { match err { hal::SurfaceError::Lost => Status::Lost, hal::SurfaceError::Device(err) => { - return Err(DeviceError::from(err).into()); + return Err(device.handle_hal_error(err).into()); } hal::SurfaceError::Outdated => Status::Outdated, hal::SurfaceError::Other(msg) => { @@ -257,10 +254,7 @@ impl Global { let hub = &self.hub; - let surface = self - .surfaces - .get(surface_id) - .map_err(|_| SurfaceError::Invalid)?; + let surface = self.surfaces.get(surface_id); let mut presentation = surface.presentation.lock(); let present = match presentation.as_mut() { @@ -286,8 +280,8 @@ impl Global { // The texture ID got added to the device tracker by `submit()`, // and now we are moving it away. - let texture = hub.textures.unregister(texture_id); - if let Some(texture) = texture { + let texture = hub.textures.remove(texture_id).get(); + if let Ok(texture) = texture { device .trackers .lock() @@ -315,7 +309,9 @@ impl Global { Ok(()) => Ok(Status::Good), Err(err) => match err { hal::SurfaceError::Lost => Ok(Status::Lost), - hal::SurfaceError::Device(err) => Err(SurfaceError::from(DeviceError::from(err))), + hal::SurfaceError::Device(err) => { + Err(SurfaceError::from(device.handle_hal_error(err))) + } hal::SurfaceError::Outdated => Ok(Status::Outdated), hal::SurfaceError::Other(msg) => { log::error!("acquire error: {}", msg); @@ -330,10 +326,7 @@ impl Global { let hub = &self.hub; - let surface = self - .surfaces - .get(surface_id) - .map_err(|_| SurfaceError::Invalid)?; + let surface = self.surfaces.get(surface_id); let mut presentation = surface.presentation.lock(); let present = match presentation.as_mut() { Some(present) => present, @@ -357,9 +350,9 @@ impl Global { // The texture ID got added to the device tracker by `submit()`, // and now we are moving it away. - let texture = hub.textures.unregister(texture_id); + let texture = hub.textures.remove(texture_id).get(); - if let Some(texture) = texture { + if let Ok(texture) = texture { device .trackers .lock() diff --git a/third_party/rust/wgpu-core/src/registry.rs b/third_party/rust/wgpu-core/src/registry.rs index 84f073d086913..6524a4df99b34 100644 --- a/third_party/rust/wgpu-core/src/registry.rs +++ b/third_party/rust/wgpu-core/src/registry.rs @@ -4,7 +4,7 @@ use crate::{ id::Id, identity::IdentityManager, lock::{rank, RwLock, RwLockReadGuard, RwLockWriteGuard}, - storage::{Element, InvalidId, Storage, StorageItem}, + storage::{Element, Storage, StorageItem}, }; #[derive(Copy, Clone, Debug, Default, PartialEq, Eq)] @@ -12,7 +12,6 @@ pub struct RegistryReport { pub num_allocated: usize, pub num_kept_from_user: usize, pub num_released_from_user: usize, - pub num_error: usize, pub element_size: usize, } @@ -56,28 +55,18 @@ pub(crate) struct FutureId<'a, T: StorageItem> { } impl FutureId<'_, T> { - #[allow(dead_code)] pub fn id(&self) -> Id { self.id } - pub fn into_id(self) -> Id { - self.id - } - /// Assign a new resource to this ID. /// /// Registers it with the registry. - pub fn assign(self, value: Arc) -> Id { + pub fn assign(self, value: T) -> Id { let mut data = self.data.write(); data.insert(self.id, value); self.id } - - pub fn assign_error(self) -> Id { - self.data.write().insert_error(self.id); - self.id - } } impl Registry { @@ -98,9 +87,6 @@ impl Registry { } } - pub(crate) fn get(&self, id: Id) -> Result, InvalidId> { - self.read().get_owned(id) - } #[track_caller] pub(crate) fn read<'a>(&'a self) -> RwLockReadGuard<'a, Storage> { self.storage.read() @@ -109,12 +95,7 @@ impl Registry { pub(crate) fn write<'a>(&'a self) -> RwLockWriteGuard<'a, Storage> { self.storage.write() } - pub(crate) fn force_replace_with_error(&self, id: Id) { - let mut storage = self.storage.write(); - storage.remove(id); - storage.insert_error(id); - } - pub(crate) fn unregister(&self, id: Id) -> Option> { + pub(crate) fn remove(&self, id: Id) -> T { let value = self.storage.write().remove(id); // This needs to happen *after* removing it from the storage, to maintain the // invariant that `self.identity` only contains ids which are actually available @@ -135,13 +116,18 @@ impl Registry { match *element { Element::Occupied(..) => report.num_kept_from_user += 1, Element::Vacant => report.num_released_from_user += 1, - Element::Error(_) => report.num_error += 1, } } report } } +impl Registry { + pub(crate) fn get(&self, id: Id) -> T { + self.read().get(id) + } +} + #[cfg(test)] mod tests { use std::sync::Arc; @@ -170,7 +156,7 @@ mod tests { let value = Arc::new(TestData); let new_id = registry.prepare(wgt::Backend::Empty, None); let id = new_id.assign(value); - registry.unregister(id); + registry.remove(id); } }); } diff --git a/third_party/rust/wgpu-core/src/resource.rs b/third_party/rust/wgpu-core/src/resource.rs index 184851fc2ad14..5df285da5426e 100644 --- a/third_party/rust/wgpu-core/src/resource.rs +++ b/third_party/rust/wgpu-core/src/resource.rs @@ -106,8 +106,8 @@ pub(crate) trait ParentDevice: Labeled { } } - fn same_device(&self, device: &Arc) -> Result<(), DeviceError> { - if Arc::ptr_eq(self.device(), device) { + fn same_device(&self, device: &Device) -> Result<(), DeviceError> { + if std::ptr::eq(&**self.device(), device) { Ok(()) } else { Err(DeviceError::DeviceMismatch(Box::new(DeviceMismatch { @@ -305,7 +305,7 @@ impl BufferMapCallback { let status = match result { Ok(()) => BufferMapAsyncStatus::Success, Err(BufferAccessError::Device(_)) => BufferMapAsyncStatus::ContextLost, - Err(BufferAccessError::InvalidBufferId(_)) + Err(BufferAccessError::InvalidResource(_)) | Err(BufferAccessError::DestroyedResource(_)) => BufferMapAsyncStatus::Invalid, Err(BufferAccessError::AlreadyMapped) => BufferMapAsyncStatus::AlreadyMapped, Err(BufferAccessError::MapAlreadyPending) => { @@ -324,7 +324,9 @@ impl BufferMapCallback { | Err(BufferAccessError::NegativeRange { .. }) => { BufferMapAsyncStatus::InvalidRange } - Err(_) => BufferMapAsyncStatus::Error, + Err(BufferAccessError::Failed) + | Err(BufferAccessError::NotMapped) + | Err(BufferAccessError::MapAborted) => BufferMapAsyncStatus::Error, }; (inner.callback)(status, inner.user_data); @@ -347,8 +349,6 @@ pub enum BufferAccessError { Device(#[from] DeviceError), #[error("Buffer map failed")] Failed, - #[error("BufferId {0:?} is invalid")] - InvalidBufferId(BufferId), #[error(transparent)] DestroyedResource(#[from] DestroyedResourceError), #[error("Buffer is already mapped")] @@ -386,6 +386,8 @@ pub enum BufferAccessError { }, #[error("Buffer map aborted")] MapAborted, + #[error(transparent)] + InvalidResource(#[from] InvalidResourceError), } #[derive(Clone, Debug, Error)] @@ -410,6 +412,45 @@ pub struct MissingTextureUsageError { #[error("{0} has been destroyed")] pub struct DestroyedResourceError(pub ResourceErrorIdent); +#[derive(Clone, Debug, Error)] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] +#[error("{0} is invalid")] +pub struct InvalidResourceError(pub ResourceErrorIdent); + +pub(crate) enum Fallible { + Valid(Arc), + Invalid(Arc), +} + +impl Fallible { + pub fn get(self) -> Result, InvalidResourceError> { + match self { + Fallible::Valid(v) => Ok(v), + Fallible::Invalid(label) => Err(InvalidResourceError(ResourceErrorIdent { + r#type: Cow::Borrowed(T::TYPE), + label: (*label).clone(), + })), + } + } +} + +impl Clone for Fallible { + fn clone(&self) -> Self { + match self { + Self::Valid(v) => Self::Valid(v.clone()), + Self::Invalid(l) => Self::Invalid(l.clone()), + } + } +} + +impl ResourceType for Fallible { + const TYPE: &'static str = T::TYPE; +} + +impl crate::storage::StorageItem for Fallible { + type Marker = T::Marker; +} + pub type BufferAccessResult = Result<(), BufferAccessError>; #[derive(Debug)] @@ -832,8 +873,10 @@ impl StagingBuffer { memory_flags: hal::MemoryFlags::TRANSIENT, }; - let raw = unsafe { device.raw().create_buffer(&stage_desc)? }; - let mapping = unsafe { device.raw().map_buffer(raw.as_ref(), 0..size.get()) }?; + let raw = unsafe { device.raw().create_buffer(&stage_desc) } + .map_err(|e| device.handle_hal_error(e))?; + let mapping = unsafe { device.raw().map_buffer(raw.as_ref(), 0..size.get()) } + .map_err(|e| device.handle_hal_error(e))?; let staging_buffer = StagingBuffer { raw, @@ -1014,7 +1057,7 @@ impl Texture { if init { TextureInitTracker::new(desc.mip_level_count, desc.array_layer_count()) } else { - TextureInitTracker::new(0, 0) + TextureInitTracker::new(desc.mip_level_count, 0) }, ), full_range: TextureSelector { @@ -1200,7 +1243,7 @@ impl Global { let hub = &self.hub; - if let Ok(buffer) = hub.buffers.get(id) { + if let Ok(buffer) = hub.buffers.get(id).get() { let snatch_guard = buffer.device.snatchable_lock.read(); let hal_buffer = buffer .raw(&snatch_guard) @@ -1223,7 +1266,7 @@ impl Global { let hub = &self.hub; - if let Ok(texture) = hub.textures.get(id) { + if let Ok(texture) = hub.textures.get(id).get() { let snatch_guard = texture.device.snatchable_lock.read(); let hal_texture = texture.raw(&snatch_guard); let hal_texture = hal_texture @@ -1247,7 +1290,7 @@ impl Global { let hub = &self.hub; - if let Ok(texture_view) = hub.texture_views.get(id) { + if let Ok(texture_view) = hub.texture_views.get(id).get() { let snatch_guard = texture_view.device.snatchable_lock.read(); let hal_texture_view = texture_view.raw(&snatch_guard); let hal_texture_view = hal_texture_view @@ -1270,11 +1313,8 @@ impl Global { profiling::scope!("Adapter::as_hal"); let hub = &self.hub; - let adapter = hub.adapters.get(id).ok(); - let hal_adapter = adapter - .as_ref() - .map(|adapter| &adapter.raw.adapter) - .and_then(|adapter| adapter.as_any().downcast_ref()); + let adapter = hub.adapters.get(id); + let hal_adapter = adapter.raw.adapter.as_any().downcast_ref(); hal_adapter_callback(hal_adapter) } @@ -1289,12 +1329,8 @@ impl Global { ) -> R { profiling::scope!("Device::as_hal"); - let hub = &self.hub; - let device = hub.devices.get(id).ok(); - let hal_device = device - .as_ref() - .map(|device| device.raw()) - .and_then(|device| device.as_any().downcast_ref()); + let device = self.hub.devices.get(id); + let hal_device = device.raw().as_any().downcast_ref(); hal_device_callback(hal_device) } @@ -1309,14 +1345,9 @@ impl Global { ) -> R { profiling::scope!("Device::fence_as_hal"); - let hub = &self.hub; - - if let Ok(device) = hub.devices.get(id) { - let fence = device.fence.read(); - hal_fence_callback(fence.as_any().downcast_ref()) - } else { - hal_fence_callback(None) - } + let device = self.hub.devices.get(id); + let fence = device.fence.read(); + hal_fence_callback(fence.as_any().downcast_ref()) } /// # Safety @@ -1328,10 +1359,9 @@ impl Global { ) -> R { profiling::scope!("Surface::as_hal"); - let surface = self.surfaces.get(id).ok(); + let surface = self.surfaces.get(id); let hal_surface = surface - .as_ref() - .and_then(|surface| surface.raw(A::VARIANT)) + .raw(A::VARIANT) .and_then(|surface| surface.as_any().downcast_ref()); hal_surface_callback(hal_surface) @@ -1353,12 +1383,13 @@ impl Global { let hub = &self.hub; - if let Ok(cmd_buf) = hub.command_buffers.get(id.into_command_buffer_id()) { - let mut cmd_buf_data = cmd_buf.data.lock(); - let cmd_buf_data = cmd_buf_data.as_mut().unwrap(); + let cmd_buf = hub.command_buffers.get(id.into_command_buffer_id()); + let cmd_buf_data = cmd_buf.try_get(); + + if let Ok(mut cmd_buf_data) = cmd_buf_data { let cmd_buf_raw = cmd_buf_data .encoder - .open() + .open(&cmd_buf.device) .ok() .and_then(|encoder| encoder.as_any_mut().downcast_mut()); hal_command_encoder_callback(cmd_buf_raw) @@ -1616,8 +1647,6 @@ impl TextureView { pub enum CreateTextureViewError { #[error(transparent)] Device(#[from] DeviceError), - #[error("TextureId {0:?} is invalid")] - InvalidTextureId(TextureId), #[error(transparent)] DestroyedResource(#[from] DestroyedResourceError), #[error("Not enough memory left to create texture view")] @@ -1660,6 +1689,8 @@ pub enum CreateTextureViewError { texture: wgt::TextureFormat, view: wgt::TextureFormat, }, + #[error(transparent)] + InvalidResource(#[from] InvalidResourceError), } #[derive(Clone, Debug, Error)] @@ -1832,8 +1863,8 @@ impl QuerySet { #[derive(Clone, Debug, Error)] #[non_exhaustive] pub enum DestroyError { - #[error("Resource is invalid")] - Invalid, #[error("Resource is already destroyed")] AlreadyDestroyed, + #[error(transparent)] + InvalidResource(#[from] InvalidResourceError), } diff --git a/third_party/rust/wgpu-core/src/storage.rs b/third_party/rust/wgpu-core/src/storage.rs index c5e91eedd444c..5a57782bf74e2 100644 --- a/third_party/rust/wgpu-core/src/storage.rs +++ b/third_party/rust/wgpu-core/src/storage.rs @@ -8,26 +8,30 @@ use crate::{Epoch, Index}; /// An entry in a `Storage::map` table. #[derive(Debug)] -pub(crate) enum Element { +pub(crate) enum Element +where + T: StorageItem, +{ /// There are no live ids with this index. Vacant, /// There is one live id with this index, allocated at the given /// epoch. - Occupied(Arc, Epoch), - - /// Like `Occupied`, but an error occurred when creating the - /// resource. - Error(Epoch), + Occupied(T, Epoch), } -#[derive(Clone, Debug)] -pub(crate) struct InvalidId; - pub(crate) trait StorageItem: ResourceType { type Marker: Marker; } +impl ResourceType for Arc { + const TYPE: &'static str = T::TYPE; +} + +impl StorageItem for Arc { + type Marker = T::Marker; +} + #[macro_export] macro_rules! impl_storage_item { ($ty:ident) => { @@ -70,34 +74,13 @@ impl Storage where T: StorageItem, { - /// Get a reference to an item behind a potentially invalid ID. - /// Panics if there is an epoch mismatch, or the entry is empty. - pub(crate) fn get(&self, id: Id) -> Result<&Arc, InvalidId> { + pub(crate) fn insert(&mut self, id: Id, value: T) { let (index, epoch, _) = id.unzip(); - let (result, storage_epoch) = match self.map.get(index as usize) { - Some(&Element::Occupied(ref v, epoch)) => (Ok(v), epoch), - None | Some(&Element::Vacant) => panic!("{}[{:?}] does not exist", self.kind, id), - Some(&Element::Error(epoch)) => (Err(InvalidId), epoch), - }; - assert_eq!( - epoch, storage_epoch, - "{}[{:?}] is no longer alive", - self.kind, id - ); - result - } - - /// Get an owned reference to an item behind a potentially invalid ID. - /// Panics if there is an epoch mismatch, or the entry is empty. - pub(crate) fn get_owned(&self, id: Id) -> Result, InvalidId> { - Ok(Arc::clone(self.get(id)?)) - } - - fn insert_impl(&mut self, index: usize, epoch: Epoch, element: Element) { + let index = index as usize; if index >= self.map.len() { self.map.resize_with(index + 1, || Element::Vacant); } - match std::mem::replace(&mut self.map[index], element) { + match std::mem::replace(&mut self.map[index], Element::Occupied(value, epoch)) { Element::Vacant => {} Element::Occupied(_, storage_epoch) => { assert_ne!( @@ -107,52 +90,21 @@ where T::TYPE ); } - Element::Error(storage_epoch) => { - assert_ne!( - epoch, - storage_epoch, - "Index {index:?} of {} is already occupied with Error", - T::TYPE - ); - } } } - pub(crate) fn insert(&mut self, id: Id, value: Arc) { - let (index, epoch, _backend) = id.unzip(); - self.insert_impl(index as usize, epoch, Element::Occupied(value, epoch)) - } - - pub(crate) fn insert_error(&mut self, id: Id) { - let (index, epoch, _) = id.unzip(); - self.insert_impl(index as usize, epoch, Element::Error(epoch)) - } - - pub(crate) fn replace_with_error(&mut self, id: Id) -> Result, InvalidId> { - let (index, epoch, _) = id.unzip(); - match std::mem::replace(&mut self.map[index as usize], Element::Error(epoch)) { - Element::Vacant => panic!("Cannot access vacant resource"), - Element::Occupied(value, storage_epoch) => { - assert_eq!(epoch, storage_epoch); - Ok(value) - } - _ => Err(InvalidId), - } - } - - pub(crate) fn remove(&mut self, id: Id) -> Option> { + pub(crate) fn remove(&mut self, id: Id) -> T { let (index, epoch, _) = id.unzip(); match std::mem::replace(&mut self.map[index as usize], Element::Vacant) { Element::Occupied(value, storage_epoch) => { assert_eq!(epoch, storage_epoch); - Some(value) + value } - Element::Error(_) => None, Element::Vacant => panic!("Cannot remove a vacant resource"), } } - pub(crate) fn iter(&self, backend: Backend) -> impl Iterator, &Arc)> { + pub(crate) fn iter(&self, backend: Backend) -> impl Iterator, &T)> { self.map .iter() .enumerate() @@ -168,3 +120,24 @@ where self.map.len() } } + +impl Storage +where + T: StorageItem + Clone, +{ + /// Get an owned reference to an item. + /// Panics if there is an epoch mismatch, the entry is empty or in error. + pub(crate) fn get(&self, id: Id) -> T { + let (index, epoch, _) = id.unzip(); + let (result, storage_epoch) = match self.map.get(index as usize) { + Some(&Element::Occupied(ref v, epoch)) => (v.clone(), epoch), + None | Some(&Element::Vacant) => panic!("{}[{:?}] does not exist", self.kind, id), + }; + assert_eq!( + epoch, storage_epoch, + "{}[{:?}] is no longer alive", + self.kind, id + ); + result + } +} diff --git a/third_party/rust/wgpu-core/src/track/texture.rs b/third_party/rust/wgpu-core/src/track/texture.rs index 1c74bffd97648..60c22389615b8 100644 --- a/third_party/rust/wgpu-core/src/track/texture.rs +++ b/third_party/rust/wgpu-core/src/track/texture.rs @@ -442,10 +442,9 @@ impl TextureTracker { let transitions = self .temp .drain(..) - .map(|pending| { + .inspect(|pending| { let tex = unsafe { self.metadata.get_resource_unchecked(pending.id as _) }; textures.push(tex.inner.get(snatch_guard)); - pending }) .collect(); (transitions, textures) diff --git a/third_party/rust/wgpu-core/src/validation.rs b/third_party/rust/wgpu-core/src/validation.rs index ea2608d755c37..c1cbdaf183cda 100644 --- a/third_party/rust/wgpu-core/src/validation.rs +++ b/third_party/rust/wgpu-core/src/validation.rs @@ -1,4 +1,4 @@ -use crate::{device::bgl, FastHashMap, FastHashSet}; +use crate::{device::bgl, resource::InvalidResourceError, FastHashMap, FastHashSet}; use arrayvec::ArrayVec; use std::{collections::hash_map::Entry, fmt}; use thiserror::Error; @@ -200,8 +200,6 @@ pub enum InputError { #[derive(Clone, Debug, Error)] #[non_exhaustive] pub enum StageError { - #[error("Shader module is invalid")] - InvalidModule, #[error( "Shader entry point's workgroup size {current:?} ({current_total} total invocations) must be less or equal to the per-dimension limit {limit:?} and the total invocation limit {total}" )] @@ -241,6 +239,8 @@ pub enum StageError { but no entry point was specified" )] MultipleEntryPointsFound, + #[error(transparent)] + InvalidResource(#[from] InvalidResourceError), } fn map_storage_format_to_naga(format: wgt::TextureFormat) -> Option { @@ -275,7 +275,7 @@ fn map_storage_format_to_naga(format: wgt::TextureFormat) -> Option Sf::Rgb10a2Uint, Tf::Rgb10a2Unorm => Sf::Rgb10a2Unorm, - Tf::Rg11b10UFloat => Sf::Rg11b10UFloat, + Tf::Rg11b10Ufloat => Sf::Rg11b10Ufloat, Tf::Rg32Uint => Sf::Rg32Uint, Tf::Rg32Sint => Sf::Rg32Sint, @@ -331,7 +331,7 @@ fn map_storage_format_from_naga(format: naga::StorageFormat) -> wgt::TextureForm Sf::Rgb10a2Uint => Tf::Rgb10a2Uint, Sf::Rgb10a2Unorm => Tf::Rgb10a2Unorm, - Sf::Rg11b10UFloat => Tf::Rg11b10UFloat, + Sf::Rg11b10Ufloat => Tf::Rg11b10Ufloat, Sf::Rg32Uint => Tf::Rg32Uint, Sf::Rg32Sint => Tf::Rg32Sint, @@ -658,7 +658,7 @@ impl NumericType { Tf::Rgba8Sint | Tf::Rgba16Sint | Tf::Rgba32Sint => { (NumericDimension::Vector(Vs::Quad), Scalar::I32) } - Tf::Rg11b10UFloat => (NumericDimension::Vector(Vs::Tri), Scalar::F32), + Tf::Rg11b10Ufloat => (NumericDimension::Vector(Vs::Tri), Scalar::F32), Tf::Stencil8 | Tf::Depth16Unorm | Tf::Depth32Float diff --git a/third_party/rust/wgpu-hal/.cargo-checksum.json b/third_party/rust/wgpu-hal/.cargo-checksum.json index ba3d4c38e4089..45dbf61227e86 100644 --- a/third_party/rust/wgpu-hal/.cargo-checksum.json +++ b/third_party/rust/wgpu-hal/.cargo-checksum.json @@ -1 +1 @@ -{"files":{"Cargo.toml":"a14637e02cba65726d8865779d32de59357686820e735b5ab7867cc4243cd281","LICENSE.APACHE":"a6cba85bc92e0cff7a450b1d873c0eaa2e9fc96bf472df0247a26bec77bf3ff9","LICENSE.MIT":"c7fea58d1cfe49634cd92e54fc10a9d871f4b275321a4cd8c09e449122caaeb4","README.md":"8930213345c3edb1273533f22d2c44bd8a7c6c4fe2510d71852a7e2d27116c6e","build.rs":"c80bdc0152a00471eec6ed0dd0f7d55d0b975498a00ba05e94100c84ad639a49","examples/halmark/main.rs":"e3b3a94ec643f13d0047786a1d4e8d03d076d6df28f987d36f31e6df4533cffb","examples/halmark/shader.wgsl":"26c256ec36d6f0e9a1647431ca772766bee4382d64eaa718ba7b488dcfb6bcca","examples/raw-gles.em.html":"70fbe68394a1a4522192de1dcfaf7d399f60d7bdf5de70b708f9bb0417427546","examples/raw-gles.rs":"faefa9b8dda46cc83eea26d4f0eade4790c2922569e0d2beab06d340b61886c6","examples/ray-traced-triangle/main.rs":"53cc5515b94f8cf57b0c709d69328f1d10112963b70ddb26799d2c858bcb1635","examples/ray-traced-triangle/shader.wgsl":"cc10caf92746724a71f6dd0dbc3a71e57b37c7d1d83278556805a535c0728a9d","src/auxil/dxgi/conv.rs":"06dae4417e804d7a15421e116df9aa6cf05d0471bec77a5f881e0f7dd7c5b731","src/auxil/dxgi/exception.rs":"fd9ca0a9bdfe028ee9d41d149f91351de7563c23864757fd4e6f3e046192f65f","src/auxil/dxgi/factory.rs":"eaddd1a85f803367d4a2729f5775f96d672a2f7675b6ee6ad1d4b2ccd3c1989a","src/auxil/dxgi/mod.rs":"a202564d9ac97530b16a234b87d180cd345aae705e082a9b1177dcde813645f9","src/auxil/dxgi/result.rs":"773b1afddb262ec416b6166506eb748a33bfdaee4bcad1f95839ac2858e73b06","src/auxil/dxgi/time.rs":"9b0fa8af0f7d284c7f18e37d1d15749230dca87b39bc4c205a0c4d358fb16336","src/auxil/mod.rs":"b07c43813541136786f06b8a0c11a8b89157932dde14c0f7a4ead1ba1ee401cc","src/auxil/renderdoc.rs":"be2a012b7f326173cdeeccc0925bc26c64e3439c974338f5d5836f20afcf0022","src/dx12/adapter.rs":"cf1c3e6dca9244fc74b5cc02f9c1e3db25029a93884e2b28d57befcd1699dca2","src/dx12/command.rs":"36ce2fb1edc56f4a10cf9f18dcb357ad03a2b3be878108a447efcac3432a47c4","src/dx12/conv.rs":"14d74445762d000f88067670118a4a259e37e94002904879fbdc56a965236955","src/dx12/descriptor.rs":"a32fe353ec0ac35e55499de437d58599f88675f118f333c201978dacd70380be","src/dx12/device.rs":"b9bceff605f8c2a2c97cae2c40ca0d58c804aae2aaf3995cd0f5f1778930dd59","src/dx12/instance.rs":"92441550e669d8a954c26de8d818f99a0b907297454efa4667f80d0bc4b13051","src/dx12/mod.rs":"6a1b3d1b8778ed933badcf7283f60174669176eab8f5eb7551f935a1bcf52a6b","src/dx12/shader_compilation.rs":"6ddb7ea2d5da783a49642463815de5426cfb18107379cb898834a51413b9758f","src/dx12/suballocation.rs":"b2b0ad99abcd429a61dfb7b68b33d81520b562d3ce1c5150f3dc95b6ec8a5752","src/dx12/types.rs":"3fc7619fc09303eb3c936d4ded6889f94ce9e8b9aa62742ce900baa1b1e1cca7","src/dx12/view.rs":"1b10cee262487ca0466d07c4f994ff6b1ff95178bd037d1c8a1cf1a74cea0581","src/dynamic/adapter.rs":"ea69fd063e4f0492e7927bcca64501640daa53af9b2d81b8948ac7c5836d0910","src/dynamic/command.rs":"ba32bd046f0cd52f7a041bbd51254aedbd21b39db4bc920f96a9adfe7fda0466","src/dynamic/device.rs":"0d30b790224a6d5884f0089303b64546e317b0d0d7c7327aaa9582ed08297889","src/dynamic/instance.rs":"6c1f3b325a8059ae4717e016d03d3c1f6074c98422f787f776aeb6d46a828c26","src/dynamic/mod.rs":"c8f25c902c35306a15cdbe47d8c3877968d9ca6586c5b2c3db1f9e45d7f536fb","src/dynamic/queue.rs":"23367e59dbf97cbaf5363cc68c00b0d2096a296f79ec5f636479f2132ab9a39c","src/dynamic/surface.rs":"c07327f244687c4a700390d3a8f0a443e4fff2a9192bf026962687bad945ccd2","src/empty.rs":"070521690d8846a02859727df817267027ae00bd98a8eb3820620a47dff551c8","src/gles/adapter.rs":"890cd5a93a9dfbff073e1a16c90cc181cfb60beab123787abb224e4d1f86e83f","src/gles/command.rs":"470347bb905f702dd3b9e6cf80f1e5bf0ee53b069f4b347bb4d49216c548d901","src/gles/conv.rs":"9b82ee6b549a4db25a20a9337d30ff9a78443acdb87b820a77f1ad72ab32b3ec","src/gles/device.rs":"322cd233f87e11d314124078610126f7730e54f5fa4d3e58c76828b6eab8d190","src/gles/egl.rs":"eaa90401ecf9027d837c53a6420891c6b3ea1c54d4d7a6b2924a64f753792fd3","src/gles/emscripten.rs":"d225171b044a949bb4ebc295d975f73cca85440e8f892605bf5ef42373e6772d","src/gles/mod.rs":"ce951bd0030a0a4e42e1fac233618fb59410a3bd70935ceca0136774fe903473","src/gles/queue.rs":"36fc2318e37f6eb5e98b01f12c9903f2877075301ddc1f3f75d8c76c839541b4","src/gles/shaders/clear.frag":"9133ed8ed97d3641fbb6b5f5ea894a3554c629ccc1b80a5fc9221d7293aa1954","src/gles/shaders/clear.vert":"a543768725f4121ff2e9e1fb5b00644931e9d6f2f946c0ef01968afb5a135abd","src/gles/shaders/srgb_present.frag":"dd9a43c339a2fa4ccf7f6a1854c6f400cabf271a7d5e9230768e9f39d47f3ff5","src/gles/shaders/srgb_present.vert":"6e85d489403d80b81cc94790730bb53b309dfc5eeede8f1ea3412a660f31d357","src/gles/web.rs":"2c7bd33d66bc41f9fc63226fe92b995b2d9dbfef7ffa4c7930d5133182b5aa87","src/gles/wgl.rs":"7031dd0d6668f4f7e31c46a55afdb69ebc183b3b9ad88218064548555522222e","src/lib.rs":"52e1bd4674c11c1af858628ed3d5e45689e99d47d76347347ccd2e5782d6ce6f","src/metal/adapter.rs":"cdaa1af796641281482981b33d73fde06d97a671b96c3560cf40883709ecbe8d","src/metal/command.rs":"a09cabb0a4aac05ef853d17c4979cf4144f83ebfd153d4f548469f0c79494a17","src/metal/conv.rs":"a34a44e6affb594f3285f8d585b0fb33132aa7dc6a612f7c1aecaa400fe43265","src/metal/device.rs":"2e921cfcb636977273e219b3d0fb6574eceb58471f3bb3327bca986567902cbf","src/metal/mod.rs":"05b9a3bf2e0dc065da880c7ca718d86fdbc416e292e066cc97bfd6c0314ece2a","src/metal/surface.rs":"a2332b8577411a79ecb6aaf1e2089b5b4313e2724c70df49227d11e649e4a721","src/metal/time.rs":"c32d69f30e846dfcc0e39e01097fb80df63b2bebb6586143bb62494999850246","src/vulkan/adapter.rs":"d9870072838a1a221d47223e2894910e626ebc4d0e6c08f4d9197d354dd8ba05","src/vulkan/command.rs":"133192c9d113a5f3bcdddebbf43a91c4204353632045255939e6e3b1b46d1ff5","src/vulkan/conv.rs":"086bd08d19dc6829bdd926e5b495278e721c432b7b7475a50f09281812f5e5d6","src/vulkan/device.rs":"2534fe30ba4da9418a7cf64b4ad01f751ef132b35497cb187254104d2aea5721","src/vulkan/instance.rs":"034f51659ae3caf85b6cc2249165900763ae7dcea128c1d62caca0a0f1cacee5","src/vulkan/mod.rs":"168cc2acb8fecfb9dc7932c7104e4c529bdc9cd8bd02f8328fdd69690c67c6dd"},"package":null} \ No newline at end of file +{"files":{"Cargo.toml":"e9e030a4092a85fa9d7fe9f47ce34f66855b0479bf51a00839a3a49969dbc505","LICENSE.APACHE":"a6cba85bc92e0cff7a450b1d873c0eaa2e9fc96bf472df0247a26bec77bf3ff9","LICENSE.MIT":"c7fea58d1cfe49634cd92e54fc10a9d871f4b275321a4cd8c09e449122caaeb4","README.md":"8930213345c3edb1273533f22d2c44bd8a7c6c4fe2510d71852a7e2d27116c6e","build.rs":"c80bdc0152a00471eec6ed0dd0f7d55d0b975498a00ba05e94100c84ad639a49","examples/halmark/main.rs":"251a067ed7e8cfe8850b5dc1789b5ae3e35ddde2540196fa649337eee8ede022","examples/halmark/shader.wgsl":"26c256ec36d6f0e9a1647431ca772766bee4382d64eaa718ba7b488dcfb6bcca","examples/raw-gles.em.html":"70fbe68394a1a4522192de1dcfaf7d399f60d7bdf5de70b708f9bb0417427546","examples/raw-gles.rs":"efa99462c04f4728c5db738af4901bfd238405a95fc815b5cc95c7d2343c62bf","examples/ray-traced-triangle/main.rs":"ab0715ce6e720682fa2ab4df018bd3e7250bae3f6cbc7d7474c59702ce0f849a","examples/ray-traced-triangle/shader.wgsl":"cc10caf92746724a71f6dd0dbc3a71e57b37c7d1d83278556805a535c0728a9d","src/auxil/dxgi/conv.rs":"0c68c8890d1a4e397f5f8465af3612b90181e4ebc4ebdfefbd40a4f845993bc2","src/auxil/dxgi/exception.rs":"fd9ca0a9bdfe028ee9d41d149f91351de7563c23864757fd4e6f3e046192f65f","src/auxil/dxgi/factory.rs":"d6dd86ce5a1187307bf128550cf26d7510d2839979b6e20f70348a69353ac27f","src/auxil/dxgi/mod.rs":"a202564d9ac97530b16a234b87d180cd345aae705e082a9b1177dcde813645f9","src/auxil/dxgi/result.rs":"a14b8b0dd052e7dde11220043f180d2e8ce7ae522dea6e96536c82db13fc7abc","src/auxil/dxgi/time.rs":"6235cc071b8535b4b58f843dbaf4ff4564a37e80a99c452c4a8988e436192483","src/auxil/mod.rs":"b07c43813541136786f06b8a0c11a8b89157932dde14c0f7a4ead1ba1ee401cc","src/auxil/renderdoc.rs":"be2a012b7f326173cdeeccc0925bc26c64e3439c974338f5d5836f20afcf0022","src/dx12/adapter.rs":"16cb1bfe2666f56e269db34f25545649643a4d768b7451796bc3842167ad5e7b","src/dx12/command.rs":"80bcfde3deabf829ca1d192f21f5c4ae870e66e2f13c40cd6f84502819440ba5","src/dx12/conv.rs":"14d74445762d000f88067670118a4a259e37e94002904879fbdc56a965236955","src/dx12/descriptor.rs":"46e47cb6ca2b50631872aebdecb9beb6c43c205f8e04f89f867c4830eb8139f6","src/dx12/device.rs":"caf6ce9c1c62b4916768711435d1e811a6d74680803cf5ea3874acf1819e51ab","src/dx12/instance.rs":"4e3fceedfeda69889b9451054d6772dc19b4ad66a4d793a57004308265fd9c1b","src/dx12/mod.rs":"7b342c0d241f6e7ebce2bc0fd1b99ff2ad69a65f3708afeafd8e7ef2267c6c95","src/dx12/shader_compilation.rs":"d96c54d732993445b05aeaaaae3d8b8ee3d3a5b4f7551d5bd4169e55ad300f48","src/dx12/suballocation.rs":"f71bb7ee28cb3396608db86d0564f3a4dfe3a662ee6c2a9bb518dd5d8a78fb79","src/dx12/types.rs":"3fc7619fc09303eb3c936d4ded6889f94ce9e8b9aa62742ce900baa1b1e1cca7","src/dx12/view.rs":"1b10cee262487ca0466d07c4f994ff6b1ff95178bd037d1c8a1cf1a74cea0581","src/dynamic/adapter.rs":"ea69fd063e4f0492e7927bcca64501640daa53af9b2d81b8948ac7c5836d0910","src/dynamic/command.rs":"a6d7c8e05ca783a618a51f9ff25af7cbad0cc4f5b4486da8c39eb21458cd7588","src/dynamic/device.rs":"3023e8e69ed712989f321eb705ffae5acfa764ceb62ed8703cd89a856c796728","src/dynamic/instance.rs":"6c1f3b325a8059ae4717e016d03d3c1f6074c98422f787f776aeb6d46a828c26","src/dynamic/mod.rs":"c8f25c902c35306a15cdbe47d8c3877968d9ca6586c5b2c3db1f9e45d7f536fb","src/dynamic/queue.rs":"23367e59dbf97cbaf5363cc68c00b0d2096a296f79ec5f636479f2132ab9a39c","src/dynamic/surface.rs":"c07327f244687c4a700390d3a8f0a443e4fff2a9192bf026962687bad945ccd2","src/empty.rs":"e6681210a0352cb7eb9e85149b2b6e5d1d875a316ca480ab31dcfe05570b0d6b","src/gles/adapter.rs":"cabe4ea115a9dd26f28117b6256a826f133c12b648eb23cea741c59bf181dfd1","src/gles/command.rs":"470347bb905f702dd3b9e6cf80f1e5bf0ee53b069f4b347bb4d49216c548d901","src/gles/conv.rs":"addf1deae35a6d5d38c49a95932e2fbd5c51caa87299a1eb19a145576977353c","src/gles/device.rs":"48b61c769254bcbe0d6ecc808a21129e06bd654be62168308a0c2469ec1240c5","src/gles/egl.rs":"601f6986c3a1fb63a93badb78aa7463a36a19f90c31831f761bc96b9f8a9b342","src/gles/emscripten.rs":"d225171b044a949bb4ebc295d975f73cca85440e8f892605bf5ef42373e6772d","src/gles/mod.rs":"ce951bd0030a0a4e42e1fac233618fb59410a3bd70935ceca0136774fe903473","src/gles/queue.rs":"36fc2318e37f6eb5e98b01f12c9903f2877075301ddc1f3f75d8c76c839541b4","src/gles/shaders/clear.frag":"9133ed8ed97d3641fbb6b5f5ea894a3554c629ccc1b80a5fc9221d7293aa1954","src/gles/shaders/clear.vert":"a543768725f4121ff2e9e1fb5b00644931e9d6f2f946c0ef01968afb5a135abd","src/gles/shaders/srgb_present.frag":"dd9a43c339a2fa4ccf7f6a1854c6f400cabf271a7d5e9230768e9f39d47f3ff5","src/gles/shaders/srgb_present.vert":"6e85d489403d80b81cc94790730bb53b309dfc5eeede8f1ea3412a660f31d357","src/gles/web.rs":"2c7bd33d66bc41f9fc63226fe92b995b2d9dbfef7ffa4c7930d5133182b5aa87","src/gles/wgl.rs":"0d3ccbe0040646d9bd5cefd5b3769acd46b5a4f0cf5be627b008f8fe584958b3","src/lib.rs":"a21532b0f5e2e497c7c844285416d64096d3a7494fe9fad749bfe9ded30602ab","src/metal/adapter.rs":"fceb4ab5233411993da8c8eebbb04c9cea9605c02d8a4e0514d86d0852be33f0","src/metal/command.rs":"a09cabb0a4aac05ef853d17c4979cf4144f83ebfd153d4f548469f0c79494a17","src/metal/conv.rs":"a34a44e6affb594f3285f8d585b0fb33132aa7dc6a612f7c1aecaa400fe43265","src/metal/device.rs":"1c3da79d63cb4941bd6fbc50c8bedf5de043ff069e2a8b5a5803fb4af240bc39","src/metal/mod.rs":"b1d8e977a83c4603e86699c9874e804256ce04e17294aa0ff67fe80eca52a6d0","src/metal/surface.rs":"6fe7aa7e3d35b9dd77369345dc6034e1403a25f350661dd0cda5cb7f2a55263a","src/metal/time.rs":"c32d69f30e846dfcc0e39e01097fb80df63b2bebb6586143bb62494999850246","src/vulkan/adapter.rs":"bcce8f1926dda0191cfba64d2ef7c262cb608869268f0f44f141d14edd5ec134","src/vulkan/command.rs":"133192c9d113a5f3bcdddebbf43a91c4204353632045255939e6e3b1b46d1ff5","src/vulkan/conv.rs":"41b62e73124cdad43dd261e7a4f2e2c30e9d43d15e526d225ca523fc18284abd","src/vulkan/device.rs":"6ecfec3a3f19886fe9394c7a499978be24326a9f80631a75fcb6392b20a71417","src/vulkan/instance.rs":"7369639a708990c2327ea1a8d0f0785893f38b62619bc2441e35cea2ab1002a3","src/vulkan/mod.rs":"7097d9d306955b0ad44186489da663a159fce8b1a0410a2ec4af9df3037e2187"},"package":null} \ No newline at end of file diff --git a/third_party/rust/wgpu-hal/Cargo.toml b/third_party/rust/wgpu-hal/Cargo.toml index cb776b7c3ece5..7d6d59979109b 100644 --- a/third_party/rust/wgpu-hal/Cargo.toml +++ b/third_party/rust/wgpu-hal/Cargo.toml @@ -136,6 +136,7 @@ dx12 = [ "gpu-allocator/d3d12", "naga/hlsl-out-if-target-windows", "windows/Win32_Graphics_Direct3D_Fxc", + "windows/Win32_Graphics_Direct3D_Dxc", "windows/Win32_Graphics_Direct3D", "windows/Win32_Graphics_Direct3D12", "windows/Win32_Graphics_DirectComposition", @@ -147,7 +148,6 @@ dx12 = [ "windows/Win32_System_Threading", "windows/Win32_UI_WindowsAndMessaging", ] -dxc_shader_compiler = ["dep:hassle-rs"] fragile-send-sync-non-atomic-wasm = ["wgt/fragile-send-sync-non-atomic-wasm"] gles = [ "naga/glsl-out", @@ -209,29 +209,31 @@ version = "0.29.0" [target.'cfg(any(target_os="macos", target_os="ios"))'.dependencies.objc] version = "0.2.5" -[target.'cfg(not(any(target_arch = "wasm32", windows, target_os = "ios")))'.dev-dependencies.glutin] +[target.'cfg(not(any(target_arch = "wasm32", target_os = "ios")))'.dev-dependencies.glutin] version = "0.31" features = [ "egl", + "wgl", "wayland", "x11", ] default-features = false -[target.'cfg(not(any(target_arch = "wasm32", windows, target_os = "ios")))'.dev-dependencies.glutin-winit] +[target.'cfg(not(any(target_arch = "wasm32", target_os = "ios")))'.dev-dependencies.glutin-winit] version = "0.4" features = [ "egl", + "wgl", "wayland", "x11", ] default-features = false -[target.'cfg(not(any(target_arch = "wasm32", windows, target_os = "ios")))'.dev-dependencies.rwh_05] +[target.'cfg(not(any(target_arch = "wasm32", target_os = "ios")))'.dev-dependencies.rwh_05] version = "0.5" package = "raw-window-handle" -[target.'cfg(not(any(target_arch = "wasm32", windows, target_os = "ios")))'.dev-dependencies.winit] +[target.'cfg(not(any(target_arch = "wasm32", target_os = "ios")))'.dev-dependencies.winit] version = "0.29" features = [ "android-native-activity", @@ -303,10 +305,6 @@ version = "0.27" optional = true default-features = false -[target."cfg(windows)".dependencies.hassle-rs] -version = "0.11.0" -optional = true - [target."cfg(windows)".dependencies.range-alloc] version = "0.1" optional = true diff --git a/third_party/rust/wgpu-hal/examples/halmark/main.rs b/third_party/rust/wgpu-hal/examples/halmark/main.rs index dabcea418a551..8ab7f1cb4709d 100644 --- a/third_party/rust/wgpu-hal/examples/halmark/main.rs +++ b/third_party/rust/wgpu-hal/examples/halmark/main.rs @@ -14,7 +14,9 @@ use winit::{ use std::{ borrow::{Borrow, Cow}, - iter, mem, ptr, + iter, + mem::size_of, + ptr, time::Instant, }; @@ -193,7 +195,7 @@ impl Example { ty: wgt::BindingType::Buffer { ty: wgt::BufferBindingType::Uniform, has_dynamic_offset: false, - min_binding_size: wgt::BufferSize::new(mem::size_of::() as _), + min_binding_size: wgt::BufferSize::new(size_of::() as _), }, count: None, }, @@ -228,7 +230,7 @@ impl Example { ty: wgt::BindingType::Buffer { ty: wgt::BufferBindingType::Uniform, has_dynamic_offset: true, - min_binding_size: wgt::BufferSize::new(mem::size_of::() as _), + min_binding_size: wgt::BufferSize::new(size_of::() as _), }, count: None, }], @@ -394,7 +396,7 @@ impl Example { let global_buffer_desc = hal::BufferDescriptor { label: Some("global"), - size: mem::size_of::() as wgt::BufferAddress, + size: size_of::() as wgt::BufferAddress, usage: hal::BufferUses::MAP_WRITE | hal::BufferUses::UNIFORM, memory_flags: hal::MemoryFlags::PREFER_COHERENT, }; @@ -406,7 +408,7 @@ impl Example { ptr::copy_nonoverlapping( &globals as *const Globals as *const u8, mapping.ptr.as_ptr(), - mem::size_of::(), + size_of::(), ); device.unmap_buffer(&buffer); assert!(mapping.is_coherent); @@ -414,7 +416,7 @@ impl Example { }; let local_alignment = wgt::math::align_to( - mem::size_of::() as u32, + size_of::() as u32, capabilities.limits.min_uniform_buffer_offset_alignment, ); let local_buffer_desc = hal::BufferDescriptor { @@ -476,7 +478,7 @@ impl Example { let local_buffer_binding = hal::BufferBinding { buffer: &local_buffer, offset: 0, - size: wgt::BufferSize::new(mem::size_of::() as _), + size: wgt::BufferSize::new(size_of::() as _), }; let local_group_desc = hal::BindGroupDescriptor { label: Some("local"), diff --git a/third_party/rust/wgpu-hal/examples/raw-gles.rs b/third_party/rust/wgpu-hal/examples/raw-gles.rs index 34298d0814145..2743ce9b82322 100644 --- a/third_party/rust/wgpu-hal/examples/raw-gles.rs +++ b/third_party/rust/wgpu-hal/examples/raw-gles.rs @@ -10,7 +10,7 @@ extern crate wgpu_hal as hal; -#[cfg(not(any(windows, target_arch = "wasm32", target_os = "ios")))] +#[cfg(not(any(target_arch = "wasm32", target_os = "ios")))] fn main() { use std::{ffi::CString, num::NonZeroU32}; @@ -255,7 +255,6 @@ fn main() { } #[cfg(any( - windows, all(target_arch = "wasm32", not(target_os = "emscripten")), target_os = "ios" ))] @@ -264,7 +263,6 @@ fn main() { } #[cfg(not(any( - windows, all(target_arch = "wasm32", not(target_os = "emscripten")), target_os = "ios" )))] diff --git a/third_party/rust/wgpu-hal/examples/ray-traced-triangle/main.rs b/third_party/rust/wgpu-hal/examples/ray-traced-triangle/main.rs index b1aceeb101d83..816827b5ae356 100644 --- a/third_party/rust/wgpu-hal/examples/ray-traced-triangle/main.rs +++ b/third_party/rust/wgpu-hal/examples/ray-traced-triangle/main.rs @@ -8,7 +8,9 @@ use raw_window_handle::{HasDisplayHandle, HasWindowHandle}; use glam::{Affine3A, Mat4, Vec3}; use std::{ borrow::{Borrow, Cow}, - iter, mem, ptr, + iter, + mem::size_of, + ptr, time::Instant, }; use winit::window::WindowButtons; @@ -304,7 +306,7 @@ impl Example { ty: wgt::BindingType::Buffer { ty: wgt::BufferBindingType::Uniform, has_dynamic_offset: false, - min_binding_size: wgt::BufferSize::new(mem::size_of::() as _), + min_binding_size: wgt::BufferSize::new(size_of::() as _), }, count: None, }, @@ -516,7 +518,7 @@ impl Example { } }; - let uniforms_size = std::mem::size_of::(); + let uniforms_size = size_of::(); let uniform_buffer = unsafe { let uniform_buffer = device @@ -657,8 +659,7 @@ impl Example { ), ]; - let instances_buffer_size = - instances.len() * std::mem::size_of::(); + let instances_buffer_size = instances.len() * size_of::(); let instances_buffer = unsafe { let instances_buffer = device @@ -828,7 +829,7 @@ impl Example { }; let instances_buffer_size = - self.instances.len() * std::mem::size_of::(); + self.instances.len() * size_of::(); let tlas_flags = hal::AccelerationStructureBuildFlags::PREFER_FAST_TRACE | hal::AccelerationStructureBuildFlags::ALLOW_UPDATE; diff --git a/third_party/rust/wgpu-hal/src/auxil/dxgi/conv.rs b/third_party/rust/wgpu-hal/src/auxil/dxgi/conv.rs index 09843f011d312..878dab39e93ef 100644 --- a/third_party/rust/wgpu-hal/src/auxil/dxgi/conv.rs +++ b/third_party/rust/wgpu-hal/src/auxil/dxgi/conv.rs @@ -47,7 +47,7 @@ pub fn map_texture_format_failable( Tf::Rgb9e5Ufloat => DXGI_FORMAT_R9G9B9E5_SHAREDEXP, Tf::Rgb10a2Uint => DXGI_FORMAT_R10G10B10A2_UINT, Tf::Rgb10a2Unorm => DXGI_FORMAT_R10G10B10A2_UNORM, - Tf::Rg11b10UFloat => DXGI_FORMAT_R11G11B10_FLOAT, + Tf::Rg11b10Ufloat => DXGI_FORMAT_R11G11B10_FLOAT, Tf::Rg32Uint => DXGI_FORMAT_R32G32_UINT, Tf::Rg32Sint => DXGI_FORMAT_R32G32_SINT, Tf::Rg32Float => DXGI_FORMAT_R32G32_FLOAT, diff --git a/third_party/rust/wgpu-hal/src/auxil/dxgi/factory.rs b/third_party/rust/wgpu-hal/src/auxil/dxgi/factory.rs index 6c68ffeea6c65..bf806ab320f66 100644 --- a/third_party/rust/wgpu-hal/src/auxil/dxgi/factory.rs +++ b/third_party/rust/wgpu-hal/src/auxil/dxgi/factory.rs @@ -4,12 +4,7 @@ use windows::{core::Interface as _, Win32::Graphics::Dxgi}; use crate::dx12::DxgiLib; -#[derive(Debug, PartialEq, Eq, PartialOrd, Ord)] -pub enum DxgiFactoryType { - Factory2, - Factory4, - Factory6, -} +// We can rely on the presence of DXGI 1.4 since D3D12 requires WDDM 2.0, Windows 10 (1507), and so does DXGI 1.4. fn should_keep_adapter(adapter: &Dxgi::IDXGIAdapter1) -> bool { let desc = unsafe { adapter.GetDesc1() }.unwrap(); @@ -52,75 +47,27 @@ fn should_keep_adapter(adapter: &Dxgi::IDXGIAdapter1) -> bool { } pub enum DxgiAdapter { - Adapter1(Dxgi::IDXGIAdapter1), - Adapter2(Dxgi::IDXGIAdapter2), + /// Provided by DXGI 1.4 Adapter3(Dxgi::IDXGIAdapter3), + /// Provided by DXGI 1.6 Adapter4(Dxgi::IDXGIAdapter4), } -impl windows::core::Param for &DxgiAdapter { - unsafe fn param(self) -> windows::core::ParamValue { - unsafe { self.deref().param() } - } -} - impl Deref for DxgiAdapter { - type Target = Dxgi::IDXGIAdapter; + type Target = Dxgi::IDXGIAdapter3; fn deref(&self) -> &Self::Target { match self { - DxgiAdapter::Adapter1(a) => a, - DxgiAdapter::Adapter2(a) => a, DxgiAdapter::Adapter3(a) => a, DxgiAdapter::Adapter4(a) => a, } } } -impl DxgiAdapter { - pub fn as_adapter2(&self) -> Option<&Dxgi::IDXGIAdapter2> { - match self { - Self::Adapter1(_) => None, - Self::Adapter2(f) => Some(f), - Self::Adapter3(f) => Some(f), - Self::Adapter4(f) => Some(f), - } - } - - pub fn unwrap_adapter2(&self) -> &Dxgi::IDXGIAdapter2 { - self.as_adapter2().unwrap() - } -} - pub fn enumerate_adapters(factory: DxgiFactory) -> Vec { let mut adapters = Vec::with_capacity(8); for cur_index in 0.. { - if let DxgiFactory::Factory6(ref factory6) = factory { - profiling::scope!("IDXGIFactory6::EnumAdapterByGpuPreference"); - // We're already at dxgi1.6, we can grab IDXGIAdapter4 directly - let adapter4: Dxgi::IDXGIAdapter4 = match unsafe { - factory6.EnumAdapterByGpuPreference( - cur_index, - Dxgi::DXGI_GPU_PREFERENCE_HIGH_PERFORMANCE, - ) - } { - Ok(a) => a, - Err(e) if e.code() == Dxgi::DXGI_ERROR_NOT_FOUND => break, - Err(e) => { - log::error!("Failed enumerating adapters: {}", e); - break; - } - }; - - if !should_keep_adapter(&adapter4) { - continue; - } - - adapters.push(DxgiAdapter::Adapter4(adapter4)); - continue; - } - profiling::scope!("IDXGIFactory1::EnumAdapters1"); let adapter1: Dxgi::IDXGIAdapter1 = match unsafe { factory.EnumAdapters1(cur_index) } { Ok(a) => a, @@ -135,31 +82,12 @@ pub fn enumerate_adapters(factory: DxgiFactory) -> Vec { continue; } - // Do the most aggressive casts first, skipping Adapter4 as we definitely don't have dxgi1_6. - - // Adapter1 -> Adapter3 - match adapter1.cast::() { - Ok(adapter3) => { - adapters.push(DxgiAdapter::Adapter3(adapter3)); - continue; - } - Err(err) => { - log::warn!("Failed casting Adapter1 to Adapter3: {}", err); - } - } - - // Adapter1 -> Adapter2 - match adapter1.cast::() { - Ok(adapter2) => { - adapters.push(DxgiAdapter::Adapter2(adapter2)); - continue; - } - Err(err) => { - log::warn!("Failed casting Adapter1 to Adapter2: {}", err); - } + if let Ok(adapter4) = adapter1.cast::() { + adapters.push(DxgiAdapter::Adapter4(adapter4)); + } else { + let adapter3 = adapter1.cast::().unwrap(); + adapters.push(DxgiAdapter::Adapter3(adapter3)); } - - adapters.push(DxgiAdapter::Adapter1(adapter1)); } adapters @@ -167,52 +95,37 @@ pub fn enumerate_adapters(factory: DxgiFactory) -> Vec { #[derive(Clone, Debug)] pub enum DxgiFactory { - Factory1(Dxgi::IDXGIFactory1), - Factory2(Dxgi::IDXGIFactory2), + /// Provided by DXGI 1.4 Factory4(Dxgi::IDXGIFactory4), + /// Provided by DXGI 1.5 + Factory5(Dxgi::IDXGIFactory5), + /// Provided by DXGI 1.6 Factory6(Dxgi::IDXGIFactory6), } impl Deref for DxgiFactory { - type Target = Dxgi::IDXGIFactory1; + type Target = Dxgi::IDXGIFactory4; fn deref(&self) -> &Self::Target { match self { - DxgiFactory::Factory1(f) => f, - DxgiFactory::Factory2(f) => f, DxgiFactory::Factory4(f) => f, + DxgiFactory::Factory5(f) => f, DxgiFactory::Factory6(f) => f, } } } impl DxgiFactory { - pub fn as_factory2(&self) -> Option<&Dxgi::IDXGIFactory2> { - match self { - Self::Factory1(_) => None, - Self::Factory2(f) => Some(f), - Self::Factory4(f) => Some(f), - Self::Factory6(f) => Some(f), - } - } - - pub fn unwrap_factory2(&self) -> &Dxgi::IDXGIFactory2 { - self.as_factory2().unwrap() - } - pub fn as_factory5(&self) -> Option<&Dxgi::IDXGIFactory5> { match self { - Self::Factory1(_) | Self::Factory2(_) | Self::Factory4(_) => None, + Self::Factory4(_) => None, + Self::Factory5(f) => Some(f), Self::Factory6(f) => Some(f), } } } -/// Tries to create a [`Dxgi::IDXGIFactory6`], then a [`Dxgi::IDXGIFactory4`], then a [`Dxgi::IDXGIFactory2`], then a [`Dxgi::IDXGIFactory1`], -/// returning the one that succeeds, or if the required_factory_type fails to be -/// created. pub fn create_factory( - required_factory_type: DxgiFactoryType, instance_flags: wgt::InstanceFlags, ) -> Result<(DxgiLib, DxgiFactory), crate::InstanceError> { let lib_dxgi = DxgiLib::new().map_err(|e| { @@ -225,111 +138,31 @@ pub fn create_factory( // The `DXGI_CREATE_FACTORY_DEBUG` flag is only allowed to be passed to // `CreateDXGIFactory2` if the debug interface is actually available. So // we check for whether it exists first. - match lib_dxgi.debug_interface1() { - Ok(pair) => match pair { - Ok(_debug_controller) => { - factory_flags |= Dxgi::DXGI_CREATE_FACTORY_DEBUG; - } - Err(err) => { - log::warn!("Unable to enable DXGI debug interface: {}", err); - } - }, - Err(err) => { - log::warn!("Debug interface function for DXGI not found: {:?}", err); - } + if lib_dxgi.debug_interface1().is_ok() { + factory_flags |= Dxgi::DXGI_CREATE_FACTORY_DEBUG; } // Intercept `OutputDebugString` calls super::exception::register_exception_handler(); } - // Try to create IDXGIFactory4 - let factory4 = match lib_dxgi.create_factory2(factory_flags) { - Ok(pair) => match pair { - Ok(factory) => Some(factory), - // We hard error here as we _should have_ been able to make a factory4 but couldn't. - Err(err) => { - // err is a Cow, not an Error implementor - return Err(crate::InstanceError::new(format!( - "failed to create IDXGIFactory4: {err:?}" - ))); - } - }, - // If we require factory4, hard error. - Err(err) if required_factory_type == DxgiFactoryType::Factory4 => { + let factory4 = match lib_dxgi.create_factory4(factory_flags) { + Ok(factory) => factory, + Err(err) => { return Err(crate::InstanceError::with_source( - String::from("IDXGIFactory1 creation function not found"), + String::from("IDXGIFactory4 creation failed"), err, )); } - // If we don't print it to warn as all win7 will hit this case. - Err(err) => { - log::warn!("IDXGIFactory1 creation function not found: {err:?}"); - None - } }; - if let Some(factory4) = factory4 { - // Try to cast the IDXGIFactory4 into IDXGIFactory6 - let factory6 = factory4.cast::(); - match factory6 { - Ok(factory6) => { - return Ok((lib_dxgi, DxgiFactory::Factory6(factory6))); - } - // If we require factory6, hard error. - Err(err) if required_factory_type == DxgiFactoryType::Factory6 => { - // err is a Cow, not an Error implementor - return Err(crate::InstanceError::new(format!( - "failed to cast IDXGIFactory4 to IDXGIFactory6: {err:?}" - ))); - } - // If we don't print it to warn. - Err(err) => { - log::warn!("Failed to cast IDXGIFactory4 to IDXGIFactory6: {:?}", err); - return Ok((lib_dxgi, DxgiFactory::Factory4(factory4))); - } - } + if let Ok(factory6) = factory4.cast::() { + return Ok((lib_dxgi, DxgiFactory::Factory6(factory6))); } - // Try to create IDXGIFactory1 - let factory1 = match lib_dxgi.create_factory1() { - Ok(pair) => match pair { - Ok(factory) => factory, - Err(err) => { - // err is a Cow, not an Error implementor - return Err(crate::InstanceError::new(format!( - "failed to create IDXGIFactory1: {err:?}" - ))); - } - }, - // We always require at least factory1, so hard error - Err(err) => { - return Err(crate::InstanceError::with_source( - String::from("IDXGIFactory1 creation function not found"), - err, - )); - } - }; - - // Try to cast the IDXGIFactory1 into IDXGIFactory2 - let factory2 = factory1.cast::(); - match factory2 { - Ok(factory2) => { - return Ok((lib_dxgi, DxgiFactory::Factory2(factory2))); - } - // If we require factory2, hard error. - Err(err) if required_factory_type == DxgiFactoryType::Factory2 => { - // err is a Cow, not an Error implementor - return Err(crate::InstanceError::new(format!( - "failed to cast IDXGIFactory1 to IDXGIFactory2: {err:?}" - ))); - } - // If we don't print it to warn. - Err(err) => { - log::warn!("Failed to cast IDXGIFactory1 to IDXGIFactory2: {:?}", err); - } + if let Ok(factory5) = factory4.cast::() { + return Ok((lib_dxgi, DxgiFactory::Factory5(factory5))); } - // We tried to create 4 and 2, but only succeeded with 1. - Ok((lib_dxgi, DxgiFactory::Factory1(factory1))) + Ok((lib_dxgi, DxgiFactory::Factory4(factory4))) } diff --git a/third_party/rust/wgpu-hal/src/auxil/dxgi/result.rs b/third_party/rust/wgpu-hal/src/auxil/dxgi/result.rs index 3bb88b5bf1a8d..61e3c6acf02a3 100644 --- a/third_party/rust/wgpu-hal/src/auxil/dxgi/result.rs +++ b/third_party/rust/wgpu-hal/src/auxil/dxgi/result.rs @@ -1,56 +1,32 @@ -use std::borrow::Cow; - use windows::Win32::{Foundation, Graphics::Dxgi}; pub(crate) trait HResult { - fn into_result(self) -> Result>; fn into_device_result(self, description: &str) -> Result; } impl HResult for windows::core::Result { - fn into_result(self) -> Result> { - // TODO: use windows-rs built-in error formatting? - let description = match self { - Ok(t) => return Ok(t), - Err(e) if e.code() == Foundation::E_UNEXPECTED => "unexpected", - Err(e) if e.code() == Foundation::E_NOTIMPL => "not implemented", - Err(e) if e.code() == Foundation::E_OUTOFMEMORY => "out of memory", - Err(e) if e.code() == Foundation::E_INVALIDARG => "invalid argument", - Err(e) => return Err(Cow::Owned(format!("{e:?}"))), - }; - Err(Cow::Borrowed(description)) - } fn into_device_result(self, description: &str) -> Result { #![allow(unreachable_code)] - let err_code = if let Err(err) = &self { - Some(err.code()) - } else { - None - }; - self.into_result().map_err(|err| { + self.map_err(|err| { log::error!("{} failed: {}", description, err); - let Some(err_code) = err_code else { - unreachable!() - }; - - match err_code { + match err.code() { Foundation::E_OUTOFMEMORY => { #[cfg(feature = "oom_panic")] panic!("{description} failed: Out of memory"); - return crate::DeviceError::OutOfMemory; + crate::DeviceError::OutOfMemory } Dxgi::DXGI_ERROR_DEVICE_RESET | Dxgi::DXGI_ERROR_DEVICE_REMOVED => { #[cfg(feature = "device_lost_panic")] panic!("{description} failed: Device lost ({err})"); + crate::DeviceError::Lost } _ => { #[cfg(feature = "internal_error_panic")] panic!("{description} failed: {err}"); + crate::DeviceError::Unexpected } } - - crate::DeviceError::Lost }) } } diff --git a/third_party/rust/wgpu-hal/src/auxil/dxgi/time.rs b/third_party/rust/wgpu-hal/src/auxil/dxgi/time.rs index 08bc3cee03110..1b312fd6518be 100644 --- a/third_party/rust/wgpu-hal/src/auxil/dxgi/time.rs +++ b/third_party/rust/wgpu-hal/src/auxil/dxgi/time.rs @@ -62,7 +62,7 @@ impl PresentationTimer { let kernelbase = libloading::os::windows::Library::open_already_loaded("kernelbase.dll").unwrap(); // No concerns about lifetimes here as kernelbase is always there. - let ptr = unsafe { kernelbase.get(b"QueryInterruptTimePrecise").unwrap() }; + let ptr = unsafe { kernelbase.get(b"QueryInterruptTimePrecise\0").unwrap() }; Self::IPresentationManager { fnQueryInterruptTimePrecise: *ptr, } diff --git a/third_party/rust/wgpu-hal/src/dx12/adapter.rs b/third_party/rust/wgpu-hal/src/dx12/adapter.rs index 00930024ab3f3..45d69f558431b 100644 --- a/third_party/rust/wgpu-hal/src/dx12/adapter.rs +++ b/third_party/rust/wgpu-hal/src/dx12/adapter.rs @@ -1,4 +1,9 @@ -use std::{mem, ptr, sync::Arc, thread}; +use std::{ + mem::{size_of, size_of_val}, + ptr, + sync::Arc, + thread, +}; use parking_lot::Mutex; use windows::{ @@ -59,19 +64,9 @@ impl super::Adapter { // Create the device so that we can get the capabilities. let device = { profiling::scope!("ID3D12Device::create_device"); - match library.create_device(&adapter, Direct3D::D3D_FEATURE_LEVEL_11_0) { - Ok(pair) => match pair { - Ok(device) => device, - Err(err) => { - log::warn!("Device creation failed: {}", err); - return None; - } - }, - Err(err) => { - log::warn!("Device creation function is not found: {:?}", err); - return None; - } - } + library + .create_device(&adapter, Direct3D::D3D_FEATURE_LEVEL_11_0) + .ok()?? }; profiling::scope!("feature queries"); @@ -92,7 +87,7 @@ impl super::Adapter { device.CheckFeatureSupport( Direct3D12::D3D12_FEATURE_FEATURE_LEVELS, <*mut _>::cast(&mut device_levels), - mem::size_of_val(&device_levels) as u32, + size_of_val(&device_levels) as u32, ) } .unwrap(); @@ -100,7 +95,7 @@ impl super::Adapter { // We have found a possible adapter. // Acquire the device information. - let desc = unsafe { adapter.unwrap_adapter2().GetDesc2() }.unwrap(); + let desc = unsafe { adapter.GetDesc2() }.unwrap(); let device_name = auxil::dxgi::conv::map_adapter_name(desc.Description); @@ -110,7 +105,7 @@ impl super::Adapter { device.CheckFeatureSupport( Direct3D12::D3D12_FEATURE_ARCHITECTURE, <*mut _>::cast(&mut features_architecture), - mem::size_of_val(&features_architecture) as u32, + size_of_val(&features_architecture) as u32, ) } .unwrap(); @@ -154,7 +149,7 @@ impl super::Adapter { device.CheckFeatureSupport( Direct3D12::D3D12_FEATURE_D3D12_OPTIONS, <*mut _>::cast(&mut options), - mem::size_of_val(&options) as u32, + size_of_val(&options) as u32, ) } .unwrap(); @@ -165,7 +160,7 @@ impl super::Adapter { device.CheckFeatureSupport( Direct3D12::D3D12_FEATURE_D3D12_OPTIONS2, <*mut _>::cast(&mut features2), - mem::size_of_val(&features2) as u32, + size_of_val(&features2) as u32, ) } .is_ok() @@ -178,7 +173,7 @@ impl super::Adapter { device.CheckFeatureSupport( Direct3D12::D3D12_FEATURE_D3D12_OPTIONS3, <*mut _>::cast(&mut features3), - mem::size_of_val(&features3) as u32, + size_of_val(&features3) as u32, ) } .is_ok() @@ -194,7 +189,7 @@ impl super::Adapter { device.CheckFeatureSupport( Direct3D12::D3D12_FEATURE_D3D12_OPTIONS7, <*mut _>::cast(&mut features7), - mem::size_of_val(&features7) as u32, + size_of_val(&features7) as u32, ) } .is_ok() @@ -224,7 +219,7 @@ impl super::Adapter { device.CheckFeatureSupport( Direct3D12::D3D12_FEATURE_SHADER_MODEL, <*mut _>::cast(&mut sm), - mem::size_of_val(&sm) as u32, + size_of_val(&sm) as u32, ) } .is_ok() @@ -354,7 +349,7 @@ impl super::Adapter { device.CheckFeatureSupport( Direct3D12::D3D12_FEATURE_FORMAT_SUPPORT, <*mut _>::cast(&mut bgra8unorm_info), - mem::size_of_val(&bgra8unorm_info) as u32, + size_of_val(&bgra8unorm_info) as u32, ) }; hr.is_ok() @@ -372,7 +367,7 @@ impl super::Adapter { device.CheckFeatureSupport( Direct3D12::D3D12_FEATURE_D3D12_OPTIONS1, <*mut _>::cast(&mut features1), - mem::size_of_val(&features1) as u32, + size_of_val(&features1) as u32, ) }; @@ -396,7 +391,7 @@ impl super::Adapter { device.CheckFeatureSupport( Direct3D12::D3D12_FEATURE_D3D12_OPTIONS9, <*mut _>::cast(&mut features9), - mem::size_of_val(&features9) as u32, + size_of_val(&features9) as u32, ) } .is_ok() @@ -524,6 +519,9 @@ impl super::Adapter { Direct3D12::D3D12_TEXTURE_DATA_PITCH_ALIGNMENT as u64, ) .unwrap(), + // Direct3D correctly bounds-checks all array accesses: + // https://microsoft.github.io/DirectX-Specs/d3d/archive/D3D11_3_FunctionalSpec.htm#18.6.8.2%20Device%20Memory%20Reads + uniform_bounds_check_alignment: wgt::BufferSize::new(1).unwrap(), }, downlevel, }, @@ -604,7 +602,7 @@ impl crate::Adapter for super::Adapter { self.device.CheckFeatureSupport( Direct3D12::D3D12_FEATURE_FORMAT_SUPPORT, <*mut _>::cast(&mut data), - mem::size_of_val(&data) as u32, + size_of_val(&data) as u32, ) } .unwrap(); @@ -622,7 +620,7 @@ impl crate::Adapter for super::Adapter { self.device.CheckFeatureSupport( Direct3D12::D3D12_FEATURE_FORMAT_SUPPORT, ptr::addr_of_mut!(data_srv_uav).cast(), - mem::size_of::() as u32, + size_of::() as u32, ) } .unwrap(); @@ -718,7 +716,7 @@ impl crate::Adapter for super::Adapter { self.device.CheckFeatureSupport( Direct3D12::D3D12_FEATURE_MULTISAMPLE_QUALITY_LEVELS, <*mut _>::cast(&mut ms_levels), - mem::size_of_val(&ms_levels) as u32, + size_of_val(&ms_levels) as u32, ) } .is_ok() diff --git a/third_party/rust/wgpu-hal/src/dx12/command.rs b/third_party/rust/wgpu-hal/src/dx12/command.rs index 5f32480fdbc49..00f56cdb5f034 100644 --- a/third_party/rust/wgpu-hal/src/dx12/command.rs +++ b/third_party/rust/wgpu-hal/src/dx12/command.rs @@ -53,7 +53,6 @@ impl crate::BufferTextureCopy { impl super::Temp { fn prepare_marker(&mut self, marker: &str) -> (&[u16], u32) { - // TODO: Store in HSTRING self.marker.clear(); self.marker.extend(marker.encode_utf16()); self.marker.push(0); @@ -153,7 +152,7 @@ impl super::CommandEncoder { self.update_root_elements(); } - //Note: we have to call this lazily before draw calls. Otherwise, D3D complains + // Note: we have to call this lazily before draw calls. Otherwise, D3D complains // about the root parameters being incompatible with root signature. fn update_root_elements(&mut self) { use super::{BufferViewKind as Bvk, PassKind as Pk}; @@ -265,7 +264,8 @@ impl crate::CommandEncoder for super::CommandEncoder { unsafe fn begin_encoding(&mut self, label: crate::Label) -> Result<(), crate::DeviceError> { let list = loop { if let Some(list) = self.free_lists.pop() { - let reset_result = unsafe { list.Reset(&self.allocator, None) }.into_result(); + // TODO: Is an error expected here and should we print it? + let reset_result = unsafe { list.Reset(&self.allocator, None) }; if reset_result.is_ok() { break Some(list); } @@ -314,7 +314,9 @@ impl crate::CommandEncoder for super::CommandEncoder { for cmd_buf in command_buffers { self.free_lists.push(cmd_buf.raw); } - let _todo_handle_error = unsafe { self.allocator.Reset() }; + if let Err(e) = unsafe { self.allocator.Reset() } { + log::error!("ID3D12CommandAllocator::Reset() failed with {e}"); + } } unsafe fn transition_buffers<'a, T>(&mut self, barriers: T) @@ -724,8 +726,7 @@ impl crate::CommandEncoder for super::CommandEncoder { cat.clear_value.b as f32, cat.clear_value.a as f32, ]; - // TODO: Empty slice vs None? - unsafe { list.ClearRenderTargetView(*rtv, &value, Some(&[])) }; + unsafe { list.ClearRenderTargetView(*rtv, &value, None) }; } if let Some(ref target) = cat.resolve_target { self.pass.resolves.push(super::PassResolve { @@ -754,12 +755,23 @@ impl crate::CommandEncoder for super::CommandEncoder { if let Some(ds_view) = ds_view { if flags != Direct3D12::D3D12_CLEAR_FLAGS::default() { unsafe { - list.ClearDepthStencilView( + // list.ClearDepthStencilView( + // ds_view, + // flags, + // ds.clear_value.0, + // ds.clear_value.1 as u8, + // None, + // ) + // TODO: Replace with the above in the next breaking windows-rs release, + // when https://github.com/microsoft/win32metadata/pull/1971 is in. + (windows_core::Interface::vtable(list).ClearDepthStencilView)( + windows_core::Interface::as_raw(list), ds_view, flags, ds.clear_value.0, ds.clear_value.1 as u8, - &[], + 0, + std::ptr::null(), ) } } @@ -796,7 +808,7 @@ impl crate::CommandEncoder for super::CommandEncoder { Type: Direct3D12::D3D12_RESOURCE_BARRIER_TYPE_TRANSITION, Flags: Direct3D12::D3D12_RESOURCE_BARRIER_FLAG_NONE, Anonymous: Direct3D12::D3D12_RESOURCE_BARRIER_0 { - //Note: this assumes `D3D12_RESOURCE_STATE_RENDER_TARGET`. + // Note: this assumes `D3D12_RESOURCE_STATE_RENDER_TARGET`. // If it's not the case, we can include the `TextureUses` in `PassResove`. Transition: mem::ManuallyDrop::new( Direct3D12::D3D12_RESOURCE_TRANSITION_BARRIER { @@ -813,7 +825,7 @@ impl crate::CommandEncoder for super::CommandEncoder { Type: Direct3D12::D3D12_RESOURCE_BARRIER_TYPE_TRANSITION, Flags: Direct3D12::D3D12_RESOURCE_BARRIER_FLAG_NONE, Anonymous: Direct3D12::D3D12_RESOURCE_BARRIER_0 { - //Note: this assumes `D3D12_RESOURCE_STATE_RENDER_TARGET`. + // Note: this assumes `D3D12_RESOURCE_STATE_RENDER_TARGET`. // If it's not the case, we can include the `TextureUses` in `PassResolve`. Transition: mem::ManuallyDrop::new( Direct3D12::D3D12_RESOURCE_TRANSITION_BARRIER { diff --git a/third_party/rust/wgpu-hal/src/dx12/descriptor.rs b/third_party/rust/wgpu-hal/src/dx12/descriptor.rs index ebb42ddcd139d..f3b7f26f25450 100644 --- a/third_party/rust/wgpu-hal/src/dx12/descriptor.rs +++ b/third_party/rust/wgpu-hal/src/dx12/descriptor.rs @@ -56,16 +56,18 @@ impl GeneralHeap { .into_device_result("Descriptor heap creation")? }; + let start = DualHandle { + cpu: unsafe { raw.GetCPUDescriptorHandleForHeapStart() }, + gpu: unsafe { raw.GetGPUDescriptorHandleForHeapStart() }, + count: 0, + }; + Ok(Self { - raw: raw.clone(), + raw, ty, handle_size: unsafe { device.GetDescriptorHandleIncrementSize(ty) } as u64, total_handles, - start: DualHandle { - cpu: unsafe { raw.GetCPUDescriptorHandleForHeapStart() }, - gpu: unsafe { raw.GetGPUDescriptorHandleForHeapStart() }, - count: 0, - }, + start, ranges: Mutex::new(RangeAllocator::new(0..total_handles)), }) } @@ -268,12 +270,14 @@ impl CpuHeap { let raw = unsafe { device.CreateDescriptorHeap::(&desc) } .into_device_result("CPU descriptor heap creation")?; + let start = unsafe { raw.GetCPUDescriptorHandleForHeapStart() }; + Ok(Self { inner: Mutex::new(CpuHeapInner { - _raw: raw.clone(), + _raw: raw, stage: Vec::new(), }), - start: unsafe { raw.GetCPUDescriptorHandleForHeapStart() }, + start, handle_size, total, }) @@ -297,7 +301,7 @@ impl fmt::Debug for CpuHeap { } pub(super) unsafe fn upload( - device: Direct3D12::ID3D12Device, + device: &Direct3D12::ID3D12Device, src: &CpuHeapInner, dst: &GeneralHeap, dummy_copy_counts: &[u32], diff --git a/third_party/rust/wgpu-hal/src/dx12/device.rs b/third_party/rust/wgpu-hal/src/dx12/device.rs index dd68160315727..43425bb5f14b3 100644 --- a/third_party/rust/wgpu-hal/src/dx12/device.rs +++ b/third_party/rust/wgpu-hal/src/dx12/device.rs @@ -1,5 +1,6 @@ use std::{ - ffi, mem, + ffi, + mem::{self, size_of}, num::NonZeroU32, ptr, sync::Arc, @@ -84,7 +85,7 @@ impl super::Device { } .into_device_result("Zero buffer creation")?; - let zero_buffer = zero_buffer.ok_or(crate::DeviceError::ResourceCreationFailed)?; + let zero_buffer = zero_buffer.ok_or(crate::DeviceError::Unexpected)?; // Note: without `D3D12_HEAP_FLAG_CREATE_NOT_ZEROED` // this resource is zeroed by default. @@ -113,7 +114,7 @@ impl super::Device { ) } .into_device_result("Command signature creation")?; - signature.ok_or(crate::DeviceError::ResourceCreationFailed) + signature.ok_or(crate::DeviceError::Unexpected) } let shared = super::DeviceShared { @@ -121,7 +122,7 @@ impl super::Device { cmd_signatures: super::CommandSignatures { draw: create_command_signature( &raw, - mem::size_of::(), + size_of::(), &[Direct3D12::D3D12_INDIRECT_ARGUMENT_DESC { Type: Direct3D12::D3D12_INDIRECT_ARGUMENT_TYPE_DRAW, ..Default::default() @@ -130,7 +131,7 @@ impl super::Device { )?, draw_indexed: create_command_signature( &raw, - mem::size_of::(), + size_of::(), &[Direct3D12::D3D12_INDIRECT_ARGUMENT_DESC { Type: Direct3D12::D3D12_INDIRECT_ARGUMENT_TYPE_DRAW_INDEXED, ..Default::default() @@ -139,7 +140,7 @@ impl super::Device { )?, dispatch: create_command_signature( &raw, - mem::size_of::(), + size_of::(), &[Direct3D12::D3D12_INDIRECT_ARGUMENT_DESC { Type: Direct3D12::D3D12_INDIRECT_ARGUMENT_TYPE_DISPATCH, ..Default::default() @@ -287,7 +288,7 @@ impl super::Device { }; let full_stage = format!( - "{}_{}\0", + "{}_{}", naga_stage.to_hlsl_str(), naga_options.shader_model.to_str() ); @@ -305,28 +306,33 @@ impl super::Device { let source_name = stage.module.raw_name.as_deref(); // Compile with DXC if available, otherwise fall back to FXC - let (result, log_level) = if let Some(ref dxc_container) = self.dxc_container { + let result = if let Some(ref dxc_container) = self.dxc_container { shader_compilation::compile_dxc( self, &source, source_name, raw_ep, stage_bit, - full_stage, + &full_stage, dxc_container, ) } else { - let full_stage = ffi::CStr::from_bytes_with_nul(full_stage.as_bytes()).unwrap(); shader_compilation::compile_fxc( self, &source, source_name, - &ffi::CString::new(raw_ep.as_str()).unwrap(), + raw_ep, stage_bit, - full_stage, + &full_stage, ) }; + let log_level = if result.is_ok() { + log::Level::Info + } else { + log::Level::Error + }; + log::log!( log_level, "Naga generated shader for {:?} at {:?}:\n{}", @@ -387,7 +393,6 @@ impl crate::Device for super::Device { &self, desc: &crate::BufferDescriptor, ) -> Result { - let mut resource = None; let mut size = desc.size; if desc.usage.contains(crate::BufferUses::UNIFORM) { let align_mask = Direct3D12::D3D12_CONSTANT_BUFFER_DATA_PLACEMENT_ALIGNMENT as u64 - 1; @@ -410,10 +415,8 @@ impl crate::Device for super::Device { Flags: conv::map_buffer_usage_to_resource_flags(desc.usage), }; - let allocation = - super::suballocation::create_buffer_resource(self, desc, raw_desc, &mut resource)?; - - let resource = resource.ok_or(crate::DeviceError::ResourceCreationFailed)?; + let (resource, allocation) = + super::suballocation::create_buffer_resource(self, desc, raw_desc)?; if let Some(label) = desc.label { unsafe { resource.SetName(&windows::core::HSTRING::from(label)) } @@ -441,6 +444,10 @@ impl crate::Device for super::Device { self.counters.buffers.sub(1); } + unsafe fn add_raw_buffer(&self, _buffer: &super::Buffer) { + self.counters.buffers.add(1); + } + unsafe fn map_buffer( &self, buffer: &super::Buffer, @@ -470,10 +477,6 @@ impl crate::Device for super::Device { &self, desc: &crate::TextureDescriptor, ) -> Result { - use super::suballocation::create_texture_resource; - - let mut resource = None; - let raw_desc = Direct3D12::D3D12_RESOURCE_DESC { Dimension: conv::map_texture_dimension(desc.dimension), Alignment: 0, @@ -495,9 +498,9 @@ impl crate::Device for super::Device { Flags: conv::map_texture_usage_to_resource_flags(desc.usage), }; - let allocation = create_texture_resource(self, desc, raw_desc, &mut resource)?; + let (resource, allocation) = + super::suballocation::create_texture_resource(self, desc, raw_desc)?; - let resource = resource.ok_or(crate::DeviceError::ResourceCreationFailed)?; if let Some(label) = desc.label { unsafe { resource.SetName(&windows::core::HSTRING::from(label)) } .into_device_result("SetName")?; @@ -532,6 +535,10 @@ impl crate::Device for super::Device { self.counters.textures.sub(1); } + unsafe fn add_raw_texture(&self, _texture: &super::Texture) { + self.counters.textures.add(1); + } + unsafe fn create_texture_view( &self, texture: &super::Texture, @@ -1295,7 +1302,7 @@ impl crate::Device for super::Device { Some(inner) => { let dual = unsafe { descriptor::upload( - self.raw.clone(), + &self.raw, &inner, &self.shared.heap_views, &desc.layout.copy_counts, @@ -1309,7 +1316,7 @@ impl crate::Device for super::Device { Some(inner) => { let dual = unsafe { descriptor::upload( - self.raw.clone(), + &self.raw, &inner, &self.shared.heap_samplers, &desc.layout.copy_counts, @@ -1643,7 +1650,7 @@ impl crate::Device for super::Device { } .into_device_result("Query heap creation")?; - let raw = raw.ok_or(crate::DeviceError::ResourceCreationFailed)?; + let raw = raw.ok_or(crate::DeviceError::Unexpected)?; if let Some(label) = desc.label { unsafe { raw.SetName(&windows::core::HSTRING::from(label)) } diff --git a/third_party/rust/wgpu-hal/src/dx12/instance.rs b/third_party/rust/wgpu-hal/src/dx12/instance.rs index 03656161951ba..7b6acc623abaa 100644 --- a/third_party/rust/wgpu-hal/src/dx12/instance.rs +++ b/third_party/rust/wgpu-hal/src/dx12/instance.rs @@ -1,4 +1,4 @@ -use std::sync::Arc; +use std::{mem::size_of_val, sync::Arc}; use parking_lot::RwLock; use windows::{ @@ -10,10 +10,7 @@ use windows::{ }; use super::SurfaceTarget; -use crate::{ - auxil::{self, dxgi::result::HResult as _}, - dx12::D3D12Lib, -}; +use crate::{auxil, dx12::D3D12Lib}; impl Drop for super::Instance { fn drop(&mut self) { @@ -37,55 +34,28 @@ impl crate::Instance for super::Instance { .intersects(wgt::InstanceFlags::VALIDATION | wgt::InstanceFlags::GPU_BASED_VALIDATION) { // Enable debug layer - match lib_main.debug_interface() { - Ok(pair) => match pair { - Ok(debug_controller) => { - if desc.flags.intersects(wgt::InstanceFlags::VALIDATION) { - unsafe { debug_controller.EnableDebugLayer() } - } - if desc - .flags - .intersects(wgt::InstanceFlags::GPU_BASED_VALIDATION) - { - #[allow(clippy::collapsible_if)] - if let Ok(debug1) = debug_controller.cast::() - { - unsafe { debug1.SetEnableGPUBasedValidation(true) } - } else { - log::warn!("Failed to enable GPU-based validation"); - } - } - } - Err(err) => { - log::warn!("Unable to enable D3D12 debug interface: {}", err); + if let Ok(debug_controller) = lib_main.debug_interface() { + if desc.flags.intersects(wgt::InstanceFlags::VALIDATION) { + unsafe { debug_controller.EnableDebugLayer() } + } + if desc + .flags + .intersects(wgt::InstanceFlags::GPU_BASED_VALIDATION) + { + #[allow(clippy::collapsible_if)] + if let Ok(debug1) = debug_controller.cast::() { + unsafe { debug1.SetEnableGPUBasedValidation(true) } + } else { + log::warn!("Failed to enable GPU-based validation"); } - }, - Err(err) => { - log::warn!("Debug interface function for D3D12 not found: {:?}", err); } } } - // Create DXGIFactory4 - let (lib_dxgi, factory) = auxil::dxgi::factory::create_factory( - auxil::dxgi::factory::DxgiFactoryType::Factory4, - desc.flags, - )?; + let (lib_dxgi, factory) = auxil::dxgi::factory::create_factory(desc.flags)?; // Create IDXGIFactoryMedia - let factory_media = match lib_dxgi.create_factory_media() { - Ok(pair) => match pair { - Ok(factory_media) => Some(factory_media), - Err(err) => { - log::error!("Failed to create IDXGIFactoryMedia: {}", err); - None - } - }, - Err(err) => { - log::warn!("IDXGIFactory1 creation function not found: {:?}", err); - None - } - }; + let factory_media = lib_dxgi.create_factory_media().ok(); let mut supports_allow_tearing = false; if let Some(factory5) = factory.as_factory5() { @@ -94,12 +64,12 @@ impl crate::Instance for super::Instance { factory5.CheckFeatureSupport( Dxgi::DXGI_FEATURE_PRESENT_ALLOW_TEARING, <*mut _>::cast(&mut allow_tearing), - std::mem::size_of_val(&allow_tearing) as u32, + size_of_val(&allow_tearing) as u32, ) }; - match hr.into_result() { - Err(err) => log::warn!("Unable to check for tearing support: {}", err), + match hr { + Err(err) => log::warn!("Unable to check for tearing support: {err}"), Ok(()) => supports_allow_tearing = true, } } diff --git a/third_party/rust/wgpu-hal/src/dx12/mod.rs b/third_party/rust/wgpu-hal/src/dx12/mod.rs index e4b9e7463783d..defeab1ada41f 100644 --- a/third_party/rust/wgpu-hal/src/dx12/mod.rs +++ b/third_party/rust/wgpu-hal/src/dx12/mod.rs @@ -49,14 +49,13 @@ use std::{ffi, fmt, mem, num::NonZeroU32, ops::Deref, sync::Arc}; use arrayvec::ArrayVec; use parking_lot::{Mutex, RwLock}; use windows::{ - core::{Interface, Param as _}, + core::{Free, Interface}, Win32::{ Foundation, Graphics::{Direct3D, Direct3D12, DirectComposition, Dxgi}, System::Threading, }, }; -use windows_core::Free; use crate::auxil::{ self, @@ -66,21 +65,50 @@ use crate::auxil::{ }, }; +#[derive(Debug)] +struct DynLib { + inner: libloading::Library, +} + +impl DynLib { + unsafe fn new

(filename: P) -> Result + where + P: AsRef, + { + unsafe { libloading::Library::new(filename) }.map(|inner| Self { inner }) + } + + unsafe fn get( + &self, + symbol: &[u8], + ) -> Result, crate::DeviceError> { + unsafe { self.inner.get(symbol) }.map_err(|e| match e { + libloading::Error::GetProcAddress { .. } | libloading::Error::GetProcAddressUnknown => { + crate::DeviceError::Unexpected + } + libloading::Error::IncompatibleSize + | libloading::Error::CreateCString { .. } + | libloading::Error::CreateCStringWithTrailing { .. } => crate::hal_internal_error(e), + _ => crate::DeviceError::Unexpected, // could be unreachable!() but we prefer to be more robust + }) + } +} + #[derive(Debug)] struct D3D12Lib { - lib: libloading::Library, + lib: DynLib, } impl D3D12Lib { fn new() -> Result { - unsafe { libloading::Library::new("d3d12.dll").map(|lib| D3D12Lib { lib }) } + unsafe { DynLib::new("d3d12.dll").map(|lib| Self { lib }) } } fn create_device( &self, adapter: &DxgiAdapter, feature_level: Direct3D::D3D_FEATURE_LEVEL, - ) -> Result, libloading::Error> { + ) -> Result, crate::DeviceError> { // Calls windows::Win32::Graphics::Direct3D12::D3D12CreateDevice on d3d12.dll type Fun = extern "system" fn( padapter: *mut core::ffi::c_void, @@ -88,17 +116,30 @@ impl D3D12Lib { riid: *const windows_core::GUID, ppdevice: *mut *mut core::ffi::c_void, ) -> windows_core::HRESULT; - let func: libloading::Symbol = unsafe { self.lib.get(b"D3D12CreateDevice") }?; + let func: libloading::Symbol = unsafe { self.lib.get(b"D3D12CreateDevice\0") }?; - let mut result__ = None; - Ok((func)( - unsafe { adapter.param().abi() }, + let mut result__: Option = None; + + let res = (func)( + adapter.as_raw(), feature_level, // TODO: Generic? &Direct3D12::ID3D12Device::IID, <*mut _>::cast(&mut result__), ) - .map(|| result__.expect("D3D12CreateDevice succeeded but result is NULL?"))) + .ok(); + + if let Err(ref err) = res { + match err.code() { + Dxgi::DXGI_ERROR_UNSUPPORTED => return Ok(None), + Dxgi::DXGI_ERROR_DRIVER_INTERNAL_ERROR => return Err(crate::DeviceError::Lost), + _ => {} + } + } + + res.into_device_result("Device creation")?; + + result__.ok_or(crate::DeviceError::Unexpected).map(Some) } fn serialize_root_signature( @@ -115,11 +156,8 @@ impl D3D12Lib { ppblob: *mut *mut core::ffi::c_void, pperrorblob: *mut *mut core::ffi::c_void, ) -> windows_core::HRESULT; - let func: libloading::Symbol = unsafe { self.lib.get(b"D3D12SerializeRootSignature") } - .map_err(|e| { - log::error!("Unable to find serialization function: {:?}", e); - crate::DeviceError::Lost - })?; + let func: libloading::Symbol = + unsafe { self.lib.get(b"D3D12SerializeRootSignature\0") }?; let desc = Direct3D12::D3D12_ROOT_SIGNATURE_DESC { NumParameters: parameters.len() as _, @@ -138,8 +176,6 @@ impl D3D12Lib { <*mut _>::cast(&mut error), ) .ok() - // TODO: If there's a HRESULT, error may still be non-null and - // contain info. .into_device_result("Root signature serialization")?; if let Some(error) = error { @@ -148,104 +184,102 @@ impl D3D12Lib { "Root signature serialization error: {:?}", unsafe { error.as_c_str() }.unwrap().to_str().unwrap() ); - return Err(crate::DeviceError::Lost); + return Err(crate::DeviceError::Unexpected); // could be hal_usage_error or hal_internal_error } - Ok(D3DBlob(blob.expect( - "D3D12SerializeRootSignature succeeded but result is NULL?", - ))) + blob.ok_or(crate::DeviceError::Unexpected) } - fn debug_interface( - &self, - ) -> Result, libloading::Error> { + fn debug_interface(&self) -> Result { // Calls windows::Win32::Graphics::Direct3D12::D3D12GetDebugInterface on d3d12.dll type Fun = extern "system" fn( riid: *const windows_core::GUID, ppvdebug: *mut *mut core::ffi::c_void, ) -> windows_core::HRESULT; - let func: libloading::Symbol = unsafe { self.lib.get(b"D3D12GetDebugInterface") }?; + let func: libloading::Symbol = unsafe { self.lib.get(b"D3D12GetDebugInterface\0") }?; + + let mut result__ = None; - let mut result__ = core::ptr::null_mut(); - Ok((func)(&Direct3D12::ID3D12Debug::IID, &mut result__) - .and_then(|| unsafe { windows_core::Type::from_abi(result__) })) + (func)(&Direct3D12::ID3D12Debug::IID, <*mut _>::cast(&mut result__)) + .ok() + .into_device_result("GetDebugInterface")?; + + result__.ok_or(crate::DeviceError::Unexpected) } } #[derive(Debug)] pub(super) struct DxgiLib { - lib: libloading::Library, + lib: DynLib, } impl DxgiLib { pub fn new() -> Result { - unsafe { libloading::Library::new("dxgi.dll").map(|lib| DxgiLib { lib }) } + unsafe { DynLib::new("dxgi.dll").map(|lib| Self { lib }) } } - pub fn debug_interface1( - &self, - ) -> Result, libloading::Error> { + /// Will error with crate::DeviceError::Unexpected if DXGI 1.3 is not available. + pub fn debug_interface1(&self) -> Result { // Calls windows::Win32::Graphics::Dxgi::DXGIGetDebugInterface1 on dxgi.dll type Fun = extern "system" fn( flags: u32, riid: *const windows_core::GUID, pdebug: *mut *mut core::ffi::c_void, ) -> windows_core::HRESULT; - let func: libloading::Symbol = unsafe { self.lib.get(b"DXGIGetDebugInterface1") }?; + let func: libloading::Symbol = unsafe { self.lib.get(b"DXGIGetDebugInterface1\0") }?; - let mut result__ = core::ptr::null_mut(); - Ok((func)(0, &Dxgi::IDXGIInfoQueue::IID, &mut result__) - .and_then(|| unsafe { windows_core::Type::from_abi(result__) })) - } + let mut result__ = None; - pub fn create_factory1( - &self, - ) -> Result, libloading::Error> { - // Calls windows::Win32::Graphics::Dxgi::CreateDXGIFactory1 on dxgi.dll - type Fun = extern "system" fn( - riid: *const windows_core::GUID, - ppfactory: *mut *mut core::ffi::c_void, - ) -> windows_core::HRESULT; - let func: libloading::Symbol = unsafe { self.lib.get(b"CreateDXGIFactory1") }?; + (func)(0, &Dxgi::IDXGIInfoQueue::IID, <*mut _>::cast(&mut result__)) + .ok() + .into_device_result("debug_interface1")?; - let mut result__ = core::ptr::null_mut(); - Ok((func)(&Dxgi::IDXGIFactory1::IID, &mut result__) - .and_then(|| unsafe { windows_core::Type::from_abi(result__) })) + result__.ok_or(crate::DeviceError::Unexpected) } - pub fn create_factory2( + /// Will error with crate::DeviceError::Unexpected if DXGI 1.4 is not available. + pub fn create_factory4( &self, factory_flags: Dxgi::DXGI_CREATE_FACTORY_FLAGS, - ) -> Result, libloading::Error> { + ) -> Result { // Calls windows::Win32::Graphics::Dxgi::CreateDXGIFactory2 on dxgi.dll type Fun = extern "system" fn( flags: Dxgi::DXGI_CREATE_FACTORY_FLAGS, riid: *const windows_core::GUID, ppfactory: *mut *mut core::ffi::c_void, ) -> windows_core::HRESULT; - let func: libloading::Symbol = unsafe { self.lib.get(b"CreateDXGIFactory2") }?; + let func: libloading::Symbol = unsafe { self.lib.get(b"CreateDXGIFactory2\0") }?; - let mut result__ = core::ptr::null_mut(); - Ok( - (func)(factory_flags, &Dxgi::IDXGIFactory4::IID, &mut result__) - .and_then(|| unsafe { windows_core::Type::from_abi(result__) }), + let mut result__ = None; + + (func)( + factory_flags, + &Dxgi::IDXGIFactory4::IID, + <*mut _>::cast(&mut result__), ) + .ok() + .into_device_result("create_factory4")?; + + result__.ok_or(crate::DeviceError::Unexpected) } - pub fn create_factory_media( - &self, - ) -> Result, libloading::Error> { + /// Will error with crate::DeviceError::Unexpected if DXGI 1.3 is not available. + pub fn create_factory_media(&self) -> Result { // Calls windows::Win32::Graphics::Dxgi::CreateDXGIFactory1 on dxgi.dll type Fun = extern "system" fn( riid: *const windows_core::GUID, ppfactory: *mut *mut core::ffi::c_void, ) -> windows_core::HRESULT; - let func: libloading::Symbol = unsafe { self.lib.get(b"CreateDXGIFactory1") }?; + let func: libloading::Symbol = unsafe { self.lib.get(b"CreateDXGIFactory1\0") }?; + + let mut result__ = None; - let mut result__ = core::ptr::null_mut(); // https://learn.microsoft.com/en-us/windows/win32/api/dxgi1_3/nn-dxgi1_3-idxgifactorymedia - Ok((func)(&Dxgi::IDXGIFactoryMedia::IID, &mut result__) - .and_then(|| unsafe { windows_core::Type::from_abi(result__) })) + (func)(&Dxgi::IDXGIFactoryMedia::IID, <*mut _>::cast(&mut result__)) + .ok() + .into_device_result("create_factory_media")?; + + result__.ok_or(crate::DeviceError::Unexpected) } } @@ -368,7 +402,7 @@ pub struct Instance { } impl Instance { - pub unsafe fn create_surface_from_visual(&self, visual: *mut std::ffi::c_void) -> Surface { + pub unsafe fn create_surface_from_visual(&self, visual: *mut ffi::c_void) -> Surface { let visual = unsafe { DirectComposition::IDCompositionVisual::from_raw_borrowed(&visual) } .expect("COM pointer should not be NULL"); Surface { @@ -382,7 +416,7 @@ impl Instance { pub unsafe fn create_surface_from_surface_handle( &self, - surface_handle: *mut std::ffi::c_void, + surface_handle: *mut ffi::c_void, ) -> Surface { // TODO: We're not given ownership, so we shouldn't call HANDLE::free(). This puts an extra burden on the caller to keep it alive. // https://learn.microsoft.com/en-us/windows/win32/api/handleapi/nf-handleapi-duplicatehandle could help us, even though DirectComposition is not in the list? @@ -399,7 +433,7 @@ impl Instance { pub unsafe fn create_surface_from_swap_chain_panel( &self, - swap_chain_panel: *mut std::ffi::c_void, + swap_chain_panel: *mut ffi::c_void, ) -> Surface { let swap_chain_panel = unsafe { types::ISwapChainPanelNative::from_raw_borrowed(&swap_chain_panel) } @@ -622,7 +656,7 @@ struct PassState { #[test] fn test_dirty_mask() { - assert_eq!(MAX_ROOT_ELEMENTS, mem::size_of::() * 8); + assert_eq!(MAX_ROOT_ELEMENTS, u64::BITS as usize); } impl PassState { @@ -897,8 +931,7 @@ pub struct ShaderModule { impl crate::DynShaderModule for ShaderModule {} pub(super) enum CompiledShader { - #[allow(unused)] - Dxc(Vec), + Dxc(Direct3D::Dxc::IDxcBlob), Fxc(Direct3D::ID3DBlob), } @@ -906,8 +939,8 @@ impl CompiledShader { fn create_native_shader(&self) -> Direct3D12::D3D12_SHADER_BYTECODE { match self { CompiledShader::Dxc(shader) => Direct3D12::D3D12_SHADER_BYTECODE { - pShaderBytecode: shader.as_ptr().cast(), - BytecodeLength: shader.len(), + pShaderBytecode: unsafe { shader.GetBufferPointer() }, + BytecodeLength: unsafe { shader.GetBufferSize() }, }, CompiledShader::Fxc(shader) => Direct3D12::D3D12_SHADER_BYTECODE { pShaderBytecode: unsafe { shader.GetBufferPointer() }, @@ -1026,8 +1059,8 @@ impl crate::Surface for Surface { flags, ) }; - if let Err(err) = result.into_result() { - log::error!("ResizeBuffers failed: {}", err); + if let Err(err) = result { + log::error!("ResizeBuffers failed: {err}"); return Err(crate::SurfaceError::Other("window is in use")); } raw @@ -1053,11 +1086,13 @@ impl crate::Surface for Surface { }; let swap_chain1 = match self.target { SurfaceTarget::Visual(_) | SurfaceTarget::SwapChainPanel(_) => { - profiling::scope!("IDXGIFactory4::CreateSwapChainForComposition"); + profiling::scope!("IDXGIFactory2::CreateSwapChainForComposition"); unsafe { - self.factory - .unwrap_factory2() - .CreateSwapChainForComposition(&device.present_queue, &desc, None) + self.factory.CreateSwapChainForComposition( + &device.present_queue, + &desc, + None, + ) } } SurfaceTarget::SurfaceHandle(handle) => { @@ -1077,9 +1112,9 @@ impl crate::Surface for Surface { } } SurfaceTarget::WndHandle(hwnd) => { - profiling::scope!("IDXGIFactory4::CreateSwapChainForHwnd"); + profiling::scope!("IDXGIFactory2::CreateSwapChainForHwnd"); unsafe { - self.factory.unwrap_factory2().CreateSwapChainForHwnd( + self.factory.CreateSwapChainForHwnd( &device.present_queue, hwnd, &desc, @@ -1092,24 +1127,22 @@ impl crate::Surface for Surface { let swap_chain1 = swap_chain1.map_err(|err| { log::error!("SwapChain creation error: {}", err); - crate::SurfaceError::Other("swap chain creation") + crate::SurfaceError::Other("swapchain creation") })?; match &self.target { SurfaceTarget::WndHandle(_) | SurfaceTarget::SurfaceHandle(_) => {} SurfaceTarget::Visual(visual) => { - if let Err(err) = unsafe { visual.SetContent(&swap_chain1) }.into_result() { - log::error!("Unable to SetContent: {}", err); + if let Err(err) = unsafe { visual.SetContent(&swap_chain1) } { + log::error!("Unable to SetContent: {err}"); return Err(crate::SurfaceError::Other( "IDCompositionVisual::SetContent", )); } } SurfaceTarget::SwapChainPanel(swap_chain_panel) => { - if let Err(err) = - unsafe { swap_chain_panel.SetSwapChain(&swap_chain1) }.into_result() - { - log::error!("Unable to SetSwapChain: {}", err); + if let Err(err) = unsafe { swap_chain_panel.SetSwapChain(&swap_chain1) } { + log::error!("Unable to SetSwapChain: {err}"); return Err(crate::SurfaceError::Other( "ISwapChainPanelNative::SetSwapChain", )); @@ -1117,13 +1150,10 @@ impl crate::Surface for Surface { } } - match swap_chain1.cast::() { - Ok(swap_chain3) => swap_chain3, - Err(err) => { - log::error!("Unable to cast swap chain: {}", err); - return Err(crate::SurfaceError::Other("swap chain cast to 3")); - } - } + swap_chain1.cast::().map_err(|err| { + log::error!("Unable to cast swapchain: {err}"); + crate::SurfaceError::Other("swapchain cast to version 3") + })? } }; diff --git a/third_party/rust/wgpu-hal/src/dx12/shader_compilation.rs b/third_party/rust/wgpu-hal/src/dx12/shader_compilation.rs index 8385082e35c40..e3d4d03c6c4ba 100644 --- a/third_party/rust/wgpu-hal/src/dx12/shader_compilation.rs +++ b/third_party/rust/wgpu-hal/src/dx12/shader_compilation.rs @@ -1,13 +1,11 @@ -use std::ffi::CStr; -use std::ptr; - -pub(super) use dxc::{compile_dxc, get_dxc_container, DxcContainer}; -use windows::Win32::Graphics::Direct3D; - use crate::auxil::dxgi::result::HResult; +use std::ffi::CStr; +use std::path::PathBuf; +use windows::{ + core::{Interface, PCSTR, PCWSTR}, + Win32::Graphics::Direct3D::{Dxc, Fxc}, +}; -// This exists so that users who don't want to use dxc can disable the dxc_shader_compiler feature -// and not have to compile hassle_rs. // Currently this will use Dxc if it is chosen as the dx12 compiler at `Instance` creation time, and will // fallback to FXC if the Dxc libraries (dxil.dll and dxcompiler.dll) are not found, or if Fxc is chosen at' // `Instance` creation time. @@ -16,40 +14,41 @@ pub(super) fn compile_fxc( device: &super::Device, source: &str, source_name: Option<&CStr>, - raw_ep: &CStr, + raw_ep: &str, stage_bit: wgt::ShaderStages, - full_stage: &CStr, -) -> ( - Result, - log::Level, -) { + full_stage: &str, +) -> Result { profiling::scope!("compile_fxc"); let mut shader_data = None; - let mut compile_flags = Direct3D::Fxc::D3DCOMPILE_ENABLE_STRICTNESS; + let mut compile_flags = Fxc::D3DCOMPILE_ENABLE_STRICTNESS; if device .private_caps .instance_flags .contains(wgt::InstanceFlags::DEBUG) { - compile_flags |= - Direct3D::Fxc::D3DCOMPILE_DEBUG | Direct3D::Fxc::D3DCOMPILE_SKIP_OPTIMIZATION; + compile_flags |= Fxc::D3DCOMPILE_DEBUG | Fxc::D3DCOMPILE_SKIP_OPTIMIZATION; } + let raw_ep = std::ffi::CString::new(raw_ep).unwrap(); + let full_stage = std::ffi::CString::new(full_stage).unwrap(); + // If no name has been set, D3DCompile wants the null pointer. - let source_name = source_name.map(|cstr| cstr.as_ptr()).unwrap_or(ptr::null()); + let source_name = source_name + .map(|cstr| cstr.as_ptr().cast()) + .unwrap_or(core::ptr::null()); let mut error = None; let hr = unsafe { - profiling::scope!("Direct3D::Fxc::D3DCompile"); - Direct3D::Fxc::D3DCompile( + profiling::scope!("Fxc::D3DCompile"); + Fxc::D3DCompile( // TODO: Update low-level bindings to accept a slice here source.as_ptr().cast(), source.len(), - windows::core::PCSTR(source_name.cast()), + PCSTR(source_name), None, None, - windows::core::PCSTR(raw_ep.as_ptr().cast()), - windows::core::PCSTR(full_stage.as_ptr().cast()), + PCSTR(raw_ep.as_ptr().cast()), + PCSTR(full_stage.as_ptr().cast()), compile_flags, 0, &mut shader_data, @@ -57,13 +56,10 @@ pub(super) fn compile_fxc( ) }; - match hr.into_result() { + match hr { Ok(()) => { let shader_data = shader_data.unwrap(); - ( - Ok(super::CompiledShader::Fxc(shader_data)), - log::Level::Info, - ) + Ok(super::CompiledShader::Fxc(shader_data)) } Err(e) => { let mut full_msg = format!("FXC D3DCompile error ({e})"); @@ -77,234 +73,237 @@ pub(super) fn compile_fxc( }; let _ = write!(full_msg, ": {}", String::from_utf8_lossy(message)); } - ( - Err(crate::PipelineError::Linkage(stage_bit, full_msg)), - log::Level::Warn, - ) + Err(crate::PipelineError::Linkage(stage_bit, full_msg)) } } } -// The Dxc implementation is behind a feature flag so that users who don't want to use dxc can disable the feature. -#[cfg(feature = "dxc_shader_compiler")] -mod dxc { - use std::ffi::CStr; - use std::path::PathBuf; - - // Destructor order should be fine since _dxil and _dxc don't rely on each other. - pub(crate) struct DxcContainer { - compiler: hassle_rs::DxcCompiler, - library: hassle_rs::DxcLibrary, - validator: hassle_rs::DxcValidator, - // Has to be held onto for the lifetime of the device otherwise shaders will fail to compile. - _dxc: hassle_rs::Dxc, - // Also Has to be held onto for the lifetime of the device otherwise shaders will fail to validate. - _dxil: hassle_rs::Dxil, - } +trait DxcObj: Interface { + const CLSID: windows::core::GUID; +} +impl DxcObj for Dxc::IDxcCompiler3 { + const CLSID: windows::core::GUID = Dxc::CLSID_DxcCompiler; +} +impl DxcObj for Dxc::IDxcUtils { + const CLSID: windows::core::GUID = Dxc::CLSID_DxcUtils; +} +impl DxcObj for Dxc::IDxcValidator { + const CLSID: windows::core::GUID = Dxc::CLSID_DxcValidator; +} - pub(crate) fn get_dxc_container( - dxc_path: Option, - dxil_path: Option, - ) -> Result, crate::DeviceError> { - // Make sure that dxil.dll exists. - let dxil = match hassle_rs::Dxil::new(dxil_path) { - Ok(dxil) => dxil, - Err(e) => { - log::warn!("Failed to load dxil.dll. Defaulting to FXC instead: {}", e); - return Ok(None); - } - }; +#[derive(Debug)] +struct DxcLib { + lib: crate::dx12::DynLib, +} - // Needed for explicit validation. - let validator = dxil.create_validator()?; - - let dxc = match hassle_rs::Dxc::new(dxc_path) { - Ok(dxc) => dxc, - Err(e) => { - log::warn!( - "Failed to load dxcompiler.dll. Defaulting to FXC instead: {}", - e - ); - return Ok(None); +impl DxcLib { + fn new(lib_path: Option, lib_name: &'static str) -> Result { + let lib_path = if let Some(lib_path) = lib_path { + if lib_path.is_file() { + lib_path + } else { + lib_path.join(lib_name) } + } else { + PathBuf::from(lib_name) }; - let compiler = dxc.create_compiler()?; - let library = dxc.create_library()?; - - Ok(Some(DxcContainer { - _dxc: dxc, - compiler, - library, - _dxil: dxil, - validator, - })) + unsafe { crate::dx12::DynLib::new(lib_path).map(|lib| Self { lib }) } } - pub(crate) fn compile_dxc( - device: &crate::dx12::Device, - source: &str, - source_name: Option<&CStr>, - raw_ep: &str, - stage_bit: wgt::ShaderStages, - full_stage: String, - dxc_container: &DxcContainer, - ) -> ( - Result, - log::Level, - ) { - profiling::scope!("compile_dxc"); - let mut compile_flags = arrayvec::ArrayVec::<&str, 6>::new_const(); - compile_flags.push("-Ges"); // Direct3D::Fxc::D3DCOMPILE_ENABLE_STRICTNESS - compile_flags.push("-Vd"); // Disable implicit validation to work around bugs when dxil.dll isn't in the local directory. - compile_flags.push("-HV"); // Use HLSL 2018, Naga doesn't supported 2021 yet. - compile_flags.push("2018"); - - if device - .private_caps - .instance_flags - .contains(wgt::InstanceFlags::DEBUG) - { - compile_flags.push("-Zi"); // Direct3D::Fxc::D3DCOMPILE_SKIP_OPTIMIZATION - compile_flags.push("-Od"); // Direct3D::Fxc::D3DCOMPILE_DEBUG + pub fn create_instance(&self) -> Result { + type Fun = extern "system" fn( + rclsid: *const windows_core::GUID, + riid: *const windows_core::GUID, + ppv: *mut *mut core::ffi::c_void, + ) -> windows_core::HRESULT; + let func: libloading::Symbol = unsafe { self.lib.get(b"DxcCreateInstance\0") }?; + + let mut result__ = None; + (func)(&T::CLSID, &T::IID, <*mut _>::cast(&mut result__)) + .ok() + .into_device_result("DxcCreateInstance")?; + result__.ok_or(crate::DeviceError::Unexpected) + } +} + +// Destructor order should be fine since _dxil and _dxc don't rely on each other. +pub(super) struct DxcContainer { + compiler: Dxc::IDxcCompiler3, + utils: Dxc::IDxcUtils, + validator: Dxc::IDxcValidator, + // Has to be held onto for the lifetime of the device otherwise shaders will fail to compile. + _dxc: DxcLib, + // Also Has to be held onto for the lifetime of the device otherwise shaders will fail to validate. + _dxil: DxcLib, +} + +pub(super) fn get_dxc_container( + dxc_path: Option, + dxil_path: Option, +) -> Result, crate::DeviceError> { + let dxc = match DxcLib::new(dxc_path, "dxcompiler.dll") { + Ok(dxc) => dxc, + Err(e) => { + log::warn!( + "Failed to load dxcompiler.dll. Defaulting to FXC instead: {}", + e + ); + return Ok(None); } + }; - let blob = match dxc_container - .library - .create_blob_with_encoding_from_str(source) - .map_err(|e| crate::PipelineError::Linkage(stage_bit, format!("DXC blob error: {e}"))) - { - Ok(blob) => blob, - Err(e) => return (Err(e), log::Level::Error), - }; + let dxil = match DxcLib::new(dxil_path, "dxil.dll") { + Ok(dxil) => dxil, + Err(e) => { + log::warn!("Failed to load dxil.dll. Defaulting to FXC instead: {}", e); + return Ok(None); + } + }; - let source_name = source_name - .and_then(|cstr| cstr.to_str().ok()) - .unwrap_or(""); + let compiler = dxc.create_instance::()?; + let utils = dxc.create_instance::()?; + let validator = dxil.create_instance::()?; - let compiled = dxc_container.compiler.compile( - &blob, - source_name, - raw_ep, - &full_stage, - &compile_flags, - None, - &[], - ); - - let (result, log_level) = match compiled { - Ok(dxc_result) => match dxc_result.get_result() { - Ok(dxc_blob) => { - // Validate the shader. - match dxc_container.validator.validate(dxc_blob) { - Ok(validated_blob) => ( - Ok(crate::dx12::CompiledShader::Dxc(validated_blob.to_vec())), - log::Level::Info, - ), - Err(e) => ( - Err(crate::PipelineError::Linkage( - stage_bit, - format!( - "DXC validation error: {:?}\n{:?}", - get_error_string_from_dxc_result(&dxc_container.library, &e.0) - .unwrap_or_default(), - e.1 - ), - )), - log::Level::Error, - ), - } - } - Err(e) => ( - Err(crate::PipelineError::Linkage( - stage_bit, - format!("DXC compile error: {e}"), - )), - log::Level::Error, - ), - }, - Err(e) => ( - Err(crate::PipelineError::Linkage( - stage_bit, - format!( - "DXC compile error: {}", - get_error_string_from_dxc_result(&dxc_container.library, &e.0) - .unwrap_or_default() - ), - )), - log::Level::Error, - ), - }; + Ok(Some(DxcContainer { + compiler, + utils, + validator, + _dxc: dxc, + _dxil: dxil, + })) +} - (result, log_level) - } +/// Owned PCWSTR +#[allow(clippy::upper_case_acronyms)] +struct OPCWSTR { + inner: Vec, +} - impl From for crate::DeviceError { - fn from(value: hassle_rs::HassleError) -> Self { - match value { - hassle_rs::HassleError::Win32Error(e) => { - // TODO: This returns an HRESULT, should we try and use the associated Windows error message? - log::error!("Win32 error: {e:?}"); - crate::DeviceError::Lost - } - hassle_rs::HassleError::LoadLibraryError { filename, inner } => { - log::error!("Failed to load dxc library {filename:?}. Inner error: {inner:?}"); - crate::DeviceError::Lost - } - hassle_rs::HassleError::LibLoadingError(e) => { - log::error!("Failed to load dxc library. {e:?}"); - crate::DeviceError::Lost - } - hassle_rs::HassleError::WindowsOnly(e) => { - log::error!("Signing with dxil.dll is only supported on Windows. {e:?}"); - crate::DeviceError::Lost - } - // `ValidationError` and `CompileError` should never happen in a context involving `DeviceError` - hassle_rs::HassleError::ValidationError(_e) => unimplemented!(), - hassle_rs::HassleError::CompileError(_e) => unimplemented!(), - } - } +impl OPCWSTR { + fn new(s: &str) -> Self { + let mut inner: Vec<_> = s.encode_utf16().collect(); + inner.push(0); + Self { inner } } - fn get_error_string_from_dxc_result( - library: &hassle_rs::DxcLibrary, - error: &hassle_rs::DxcOperationResult, - ) -> Result { - error - .get_error_buffer() - .and_then(|error| library.get_blob_as_string(&hassle_rs::DxcBlob::from(error))) + fn ptr(&self) -> PCWSTR { + PCWSTR(self.inner.as_ptr()) } } -// These are stubs for when the `dxc_shader_compiler` feature is disabled. -#[cfg(not(feature = "dxc_shader_compiler"))] -mod dxc { - use std::ffi::CStr; - use std::path::PathBuf; - - pub(crate) struct DxcContainer {} - - pub(crate) fn get_dxc_container( - _dxc_path: Option, - _dxil_path: Option, - ) -> Result, crate::DeviceError> { - // Falls back to Fxc and logs an error. - log::error!("DXC shader compiler was requested on Instance creation, but the DXC feature is disabled. Enable the `dxc_shader_compiler` feature on wgpu_hal to use DXC."); - Ok(None) +fn get_output( + res: &Dxc::IDxcResult, + kind: Dxc::DXC_OUT_KIND, +) -> Result { + let mut result__: Option = None; + unsafe { res.GetOutput::(kind, &mut None, <*mut _>::cast(&mut result__)) } + .into_device_result("GetOutput")?; + result__.ok_or(crate::DeviceError::Unexpected) +} + +fn as_err_str(blob: &Dxc::IDxcBlobUtf8) -> Result<&str, crate::DeviceError> { + let ptr = unsafe { blob.GetStringPointer() }; + let len = unsafe { blob.GetStringLength() }; + core::str::from_utf8(unsafe { core::slice::from_raw_parts(ptr.0, len) }) + .map_err(|_| crate::DeviceError::Unexpected) +} + +pub(super) fn compile_dxc( + device: &crate::dx12::Device, + source: &str, + source_name: Option<&CStr>, + raw_ep: &str, + stage_bit: wgt::ShaderStages, + full_stage: &str, + dxc_container: &DxcContainer, +) -> Result { + profiling::scope!("compile_dxc"); + + let source_name = source_name.and_then(|cstr| cstr.to_str().ok()); + + let source_name = source_name.map(OPCWSTR::new); + let raw_ep = OPCWSTR::new(raw_ep); + let full_stage = OPCWSTR::new(full_stage); + + let mut compile_args = arrayvec::ArrayVec::::new_const(); + + if let Some(source_name) = source_name.as_ref() { + compile_args.push(source_name.ptr()) + } + + compile_args.extend([ + windows::core::w!("-E"), + raw_ep.ptr(), + windows::core::w!("-T"), + full_stage.ptr(), + windows::core::w!("-HV"), + windows::core::w!("2018"), // Use HLSL 2018, Naga doesn't supported 2021 yet. + windows::core::w!("-no-warnings"), + Dxc::DXC_ARG_ENABLE_STRICTNESS, + Dxc::DXC_ARG_SKIP_VALIDATION, // Disable implicit validation to work around bugs when dxil.dll isn't in the local directory. + ]); + + if device + .private_caps + .instance_flags + .contains(wgt::InstanceFlags::DEBUG) + { + compile_args.push(Dxc::DXC_ARG_DEBUG); + compile_args.push(Dxc::DXC_ARG_SKIP_OPTIMIZATIONS); + } + + let buffer = Dxc::DxcBuffer { + Ptr: source.as_ptr().cast(), + Size: source.len(), + Encoding: Dxc::DXC_CP_UTF8.0, + }; + + let compile_res: Dxc::IDxcResult = unsafe { + dxc_container + .compiler + .Compile(&buffer, Some(&compile_args), None) + } + .into_device_result("Compile")?; + + drop(compile_args); + drop(source_name); + drop(raw_ep); + drop(full_stage); + + let err_blob = get_output::(&compile_res, Dxc::DXC_OUT_ERRORS)?; + + let len = unsafe { err_blob.GetStringLength() }; + if len != 0 { + let err = as_err_str(&err_blob)?; + return Err(crate::PipelineError::Linkage( + stage_bit, + format!("DXC compile error: {err}"), + )); } - // It shouldn't be possible that this gets called with the `dxc_shader_compiler` feature disabled. - pub(crate) fn compile_dxc( - _device: &crate::dx12::Device, - _source: &str, - _source_name: Option<&CStr>, - _raw_ep: &str, - _stage_bit: wgt::ShaderStages, - _full_stage: String, - _dxc_container: &DxcContainer, - ) -> ( - Result, - log::Level, - ) { - unimplemented!("Something went really wrong, please report this. Attempted to compile shader with DXC, but the DXC feature is disabled. Enable the `dxc_shader_compiler` feature on wgpu_hal to use DXC."); + let blob = get_output::(&compile_res, Dxc::DXC_OUT_OBJECT)?; + + let err_blob = { + let res = unsafe { + dxc_container + .validator + .Validate(&blob, Dxc::DxcValidatorFlags_InPlaceEdit) + } + .into_device_result("Validate")?; + + unsafe { res.GetErrorBuffer() }.into_device_result("GetErrorBuffer")? + }; + + let size = unsafe { err_blob.GetBufferSize() }; + if size != 0 { + let err_blob = unsafe { dxc_container.utils.GetBlobAsUtf8(&err_blob) } + .into_device_result("GetBlobAsUtf8")?; + let err = as_err_str(&err_blob)?; + return Err(crate::PipelineError::Linkage( + stage_bit, + format!("DXC validation error: {err}"), + )); } + + Ok(crate::dx12::CompiledShader::Dxc(blob)) } diff --git a/third_party/rust/wgpu-hal/src/dx12/suballocation.rs b/third_party/rust/wgpu-hal/src/dx12/suballocation.rs index d840e118f1a75..bdb3e85129dcd 100644 --- a/third_party/rust/wgpu-hal/src/dx12/suballocation.rs +++ b/third_party/rust/wgpu-hal/src/dx12/suballocation.rs @@ -52,14 +52,14 @@ pub(crate) fn create_buffer_resource( device: &crate::dx12::Device, desc: &crate::BufferDescriptor, raw_desc: Direct3D12::D3D12_RESOURCE_DESC, - resource: &mut Option, -) -> Result, crate::DeviceError> { +) -> Result<(Direct3D12::ID3D12Resource, Option), crate::DeviceError> { let is_cpu_read = desc.usage.contains(crate::BufferUses::MAP_READ); let is_cpu_write = desc.usage.contains(crate::BufferUses::MAP_WRITE); // Workaround for Intel Xe drivers if !device.private_caps.suballocation_supported { - return create_committed_buffer_resource(device, desc, raw_desc, resource).map(|()| None); + return create_committed_buffer_resource(device, desc, raw_desc) + .map(|resource| (resource, None)); } let location = match (is_cpu_read, is_cpu_write) { @@ -80,6 +80,7 @@ pub(crate) fn create_buffer_resource( location, ); let allocation = allocator.allocator.allocate(&allocation_desc)?; + let mut resource = None; unsafe { device.raw.CreatePlacedResource( @@ -88,32 +89,30 @@ pub(crate) fn create_buffer_resource( &raw_desc, Direct3D12::D3D12_RESOURCE_STATE_COMMON, None, - resource, + &mut resource, ) } .into_device_result("Placed buffer creation")?; - if resource.is_none() { - return Err(crate::DeviceError::ResourceCreationFailed); - } + let resource = resource.ok_or(crate::DeviceError::Unexpected)?; device .counters .buffer_memory .add(allocation.size() as isize); - Ok(Some(AllocationWrapper { allocation })) + Ok((resource, Some(AllocationWrapper { allocation }))) } pub(crate) fn create_texture_resource( device: &crate::dx12::Device, desc: &crate::TextureDescriptor, raw_desc: Direct3D12::D3D12_RESOURCE_DESC, - resource: &mut Option, -) -> Result, crate::DeviceError> { +) -> Result<(Direct3D12::ID3D12Resource, Option), crate::DeviceError> { // Workaround for Intel Xe drivers if !device.private_caps.suballocation_supported { - return create_committed_texture_resource(device, desc, raw_desc, resource).map(|()| None); + return create_committed_texture_resource(device, desc, raw_desc) + .map(|resource| (resource, None)); } let location = MemoryLocation::GpuOnly; @@ -128,6 +127,7 @@ pub(crate) fn create_texture_resource( location, ); let allocation = allocator.allocator.allocate(&allocation_desc)?; + let mut resource = None; unsafe { device.raw.CreatePlacedResource( @@ -136,21 +136,19 @@ pub(crate) fn create_texture_resource( &raw_desc, Direct3D12::D3D12_RESOURCE_STATE_COMMON, None, // clear value - resource, + &mut resource, ) } .into_device_result("Placed texture creation")?; - if resource.is_none() { - return Err(crate::DeviceError::ResourceCreationFailed); - } + let resource = resource.ok_or(crate::DeviceError::Unexpected)?; device .counters .texture_memory .add(allocation.size() as isize); - Ok(Some(AllocationWrapper { allocation })) + Ok((resource, Some(AllocationWrapper { allocation }))) } pub(crate) fn free_buffer_allocation( @@ -226,8 +224,7 @@ pub(crate) fn create_committed_buffer_resource( device: &crate::dx12::Device, desc: &crate::BufferDescriptor, raw_desc: Direct3D12::D3D12_RESOURCE_DESC, - resource: &mut Option, -) -> Result<(), crate::DeviceError> { +) -> Result { let is_cpu_read = desc.usage.contains(crate::BufferUses::MAP_READ); let is_cpu_write = desc.usage.contains(crate::BufferUses::MAP_WRITE); @@ -250,6 +247,8 @@ pub(crate) fn create_committed_buffer_resource( VisibleNodeMask: 0, }; + let mut resource = None; + unsafe { device.raw.CreateCommittedResource( &heap_properties, @@ -261,24 +260,19 @@ pub(crate) fn create_committed_buffer_resource( &raw_desc, Direct3D12::D3D12_RESOURCE_STATE_COMMON, None, - resource, + &mut resource, ) } .into_device_result("Committed buffer creation")?; - if resource.is_none() { - return Err(crate::DeviceError::ResourceCreationFailed); - } - - Ok(()) + resource.ok_or(crate::DeviceError::Unexpected) } pub(crate) fn create_committed_texture_resource( device: &crate::dx12::Device, _desc: &crate::TextureDescriptor, raw_desc: Direct3D12::D3D12_RESOURCE_DESC, - resource: &mut Option, -) -> Result<(), crate::DeviceError> { +) -> Result { let heap_properties = Direct3D12::D3D12_HEAP_PROPERTIES { Type: Direct3D12::D3D12_HEAP_TYPE_CUSTOM, CPUPageProperty: Direct3D12::D3D12_CPU_PAGE_PROPERTY_NOT_AVAILABLE, @@ -290,6 +284,8 @@ pub(crate) fn create_committed_texture_resource( VisibleNodeMask: 0, }; + let mut resource = None; + unsafe { device.raw.CreateCommittedResource( &heap_properties, @@ -301,14 +297,10 @@ pub(crate) fn create_committed_texture_resource( &raw_desc, Direct3D12::D3D12_RESOURCE_STATE_COMMON, None, // clear value - resource, + &mut resource, ) } .into_device_result("Committed texture creation")?; - if resource.is_none() { - return Err(crate::DeviceError::ResourceCreationFailed); - } - - Ok(()) + resource.ok_or(crate::DeviceError::Unexpected) } diff --git a/third_party/rust/wgpu-hal/src/dynamic/command.rs b/third_party/rust/wgpu-hal/src/dynamic/command.rs index 6c0f1cb02d2e6..4ecdf74723d9a 100644 --- a/third_party/rust/wgpu-hal/src/dynamic/command.rs +++ b/third_party/rust/wgpu-hal/src/dynamic/command.rs @@ -61,7 +61,7 @@ pub trait DynCommandEncoder: DynResource + std::fmt::Debug { &mut self, layout: &dyn DynPipelineLayout, index: u32, - group: &dyn DynBindGroup, + group: Option<&dyn DynBindGroup>, dynamic_offsets: &[wgt::DynamicOffset], ); @@ -282,9 +282,15 @@ impl DynCommandEncoder for C { &mut self, layout: &dyn DynPipelineLayout, index: u32, - group: &dyn DynBindGroup, + group: Option<&dyn DynBindGroup>, dynamic_offsets: &[wgt::DynamicOffset], ) { + if group.is_none() { + // TODO: Handle group None correctly. + return; + } + let group = group.unwrap(); + let layout = layout.expect_downcast_ref(); let group = group.expect_downcast_ref(); unsafe { C::set_bind_group(self, layout, index, group, dynamic_offsets) }; diff --git a/third_party/rust/wgpu-hal/src/dynamic/device.rs b/third_party/rust/wgpu-hal/src/dynamic/device.rs index 1386196d60a32..f044a001de751 100644 --- a/third_party/rust/wgpu-hal/src/dynamic/device.rs +++ b/third_party/rust/wgpu-hal/src/dynamic/device.rs @@ -24,6 +24,7 @@ pub trait DynDevice: DynResource { ) -> Result, DeviceError>; unsafe fn destroy_buffer(&self, buffer: Box); + unsafe fn add_raw_buffer(&self, buffer: &dyn DynBuffer); unsafe fn map_buffer( &self, @@ -41,6 +42,8 @@ pub trait DynDevice: DynResource { desc: &TextureDescriptor, ) -> Result, DeviceError>; unsafe fn destroy_texture(&self, texture: Box); + unsafe fn add_raw_texture(&self, texture: &dyn DynTexture); + unsafe fn create_texture_view( &self, texture: &dyn DynTexture, @@ -177,6 +180,10 @@ impl DynDevice for D { unsafe fn destroy_buffer(&self, buffer: Box) { unsafe { D::destroy_buffer(self, buffer.unbox()) }; } + unsafe fn add_raw_buffer(&self, buffer: &dyn DynBuffer) { + let buffer = buffer.expect_downcast_ref(); + unsafe { D::add_raw_buffer(self, buffer) }; + } unsafe fn map_buffer( &self, @@ -217,6 +224,11 @@ impl DynDevice for D { unsafe { D::destroy_texture(self, texture.unbox()) }; } + unsafe fn add_raw_texture(&self, texture: &dyn DynTexture) { + let texture = texture.expect_downcast_ref(); + unsafe { D::add_raw_texture(self, texture) }; + } + unsafe fn create_texture_view( &self, texture: &dyn DynTexture, diff --git a/third_party/rust/wgpu-hal/src/empty.rs b/third_party/rust/wgpu-hal/src/empty.rs index 4d8868c360e0a..72d9784d655b5 100644 --- a/third_party/rust/wgpu-hal/src/empty.rs +++ b/third_party/rust/wgpu-hal/src/empty.rs @@ -168,6 +168,8 @@ impl crate::Device for Context { Ok(Resource) } unsafe fn destroy_buffer(&self, buffer: Resource) {} + unsafe fn add_raw_buffer(&self, _buffer: &Resource) {} + unsafe fn map_buffer( &self, buffer: &Resource, @@ -183,6 +185,8 @@ impl crate::Device for Context { Ok(Resource) } unsafe fn destroy_texture(&self, texture: Resource) {} + unsafe fn add_raw_texture(&self, _texture: &Resource) {} + unsafe fn create_texture_view( &self, texture: &Resource, diff --git a/third_party/rust/wgpu-hal/src/gles/adapter.rs b/third_party/rust/wgpu-hal/src/gles/adapter.rs index e7ecacebe0969..a654215d21cb4 100644 --- a/third_party/rust/wgpu-hal/src/gles/adapter.rs +++ b/third_party/rust/wgpu-hal/src/gles/adapter.rs @@ -841,6 +841,16 @@ impl super::Adapter { alignments: crate::Alignments { buffer_copy_offset: wgt::BufferSize::new(4).unwrap(), buffer_copy_pitch: wgt::BufferSize::new(4).unwrap(), + // #6151: `wgpu_hal::gles` doesn't ask Naga to inject bounds + // checks in GLSL, and it doesn't request extensions like + // `KHR_robust_buffer_access_behavior` that would provide + // them, so we can't really implement the checks promised by + // [`crate::BufferBinding`]. + // + // Since this is a pre-existing condition, for the time + // being, provide 1 as the value here, to cause as little + // trouble as possible. + uniform_bounds_check_alignment: wgt::BufferSize::new(1).unwrap(), }, }, }) @@ -1097,7 +1107,7 @@ impl crate::Adapter for super::Adapter { Tf::Rgba8Sint => renderable | storage, Tf::Rgb10a2Uint => renderable, Tf::Rgb10a2Unorm => filterable_renderable, - Tf::Rg11b10UFloat => filterable | float_renderable, + Tf::Rg11b10Ufloat => filterable | float_renderable, Tf::Rg32Uint => renderable, Tf::Rg32Sint => renderable, Tf::Rg32Float => unfilterable | float_renderable | texture_float_linear, diff --git a/third_party/rust/wgpu-hal/src/gles/conv.rs b/third_party/rust/wgpu-hal/src/gles/conv.rs index 8733d54957b65..3a6d5ebb2e457 100644 --- a/third_party/rust/wgpu-hal/src/gles/conv.rs +++ b/third_party/rust/wgpu-hal/src/gles/conv.rs @@ -45,7 +45,7 @@ impl super::AdapterShared { glow::RGBA, glow::UNSIGNED_INT_2_10_10_10_REV, ), - Tf::Rg11b10UFloat => ( + Tf::Rg11b10Ufloat => ( glow::R11F_G11F_B10F, glow::RGB, glow::UNSIGNED_INT_10F_11F_11F_REV, diff --git a/third_party/rust/wgpu-hal/src/gles/device.rs b/third_party/rust/wgpu-hal/src/gles/device.rs index 490f73259b2eb..15292d95c5a2c 100644 --- a/third_party/rust/wgpu-hal/src/gles/device.rs +++ b/third_party/rust/wgpu-hal/src/gles/device.rs @@ -647,6 +647,10 @@ impl crate::Device for super::Device { self.counters.buffers.sub(1); } + unsafe fn add_raw_buffer(&self, _buffer: &super::Buffer) { + self.counters.buffers.add(1); + } + unsafe fn map_buffer( &self, buffer: &super::Buffer, @@ -982,6 +986,10 @@ impl crate::Device for super::Device { self.counters.textures.sub(1); } + unsafe fn add_raw_texture(&self, _texture: &super::Texture) { + self.counters.textures.add(1); + } + unsafe fn create_texture_view( &self, texture: &super::Texture, diff --git a/third_party/rust/wgpu-hal/src/gles/egl.rs b/third_party/rust/wgpu-hal/src/gles/egl.rs index 86f104ba999f7..42aec2b253df3 100644 --- a/third_party/rust/wgpu-hal/src/gles/egl.rs +++ b/third_party/rust/wgpu-hal/src/gles/egl.rs @@ -143,7 +143,7 @@ impl Drop for DisplayOwner { match self.display { DisplayRef::X11(ptr) => unsafe { let func: libloading::Symbol = - self.library.get(b"XCloseDisplay").unwrap(); + self.library.get(b"XCloseDisplay\0").unwrap(); func(ptr.as_ptr()); }, DisplayRef::Wayland => {} @@ -155,7 +155,7 @@ fn open_x_display() -> Option { log::debug!("Loading X11 library to get the current display"); unsafe { let library = find_library(&["libX11.so.6", "libX11.so"])?; - let func: libloading::Symbol = library.get(b"XOpenDisplay").unwrap(); + let func: libloading::Symbol = library.get(b"XOpenDisplay\0").unwrap(); let result = func(ptr::null()); ptr::NonNull::new(result).map(|ptr| DisplayOwner { display: DisplayRef::X11(ptr), @@ -182,9 +182,9 @@ fn test_wayland_display() -> Option { let library = unsafe { let client_library = find_library(&["libwayland-client.so.0", "libwayland-client.so"])?; let wl_display_connect: libloading::Symbol = - client_library.get(b"wl_display_connect").unwrap(); + client_library.get(b"wl_display_connect\0").unwrap(); let wl_display_disconnect: libloading::Symbol = - client_library.get(b"wl_display_disconnect").unwrap(); + client_library.get(b"wl_display_disconnect\0").unwrap(); let display = ptr::NonNull::new(wl_display_connect(ptr::null()))?; wl_display_disconnect(display.as_ptr()); find_library(&["libwayland-egl.so.1", "libwayland-egl.so"])? @@ -379,7 +379,7 @@ struct EglContextLock<'a> { display: khronos_egl::Display, } -/// A guard containing a lock to an [`AdapterContext`] +/// A guard containing a lock to an [`AdapterContext`], while the GL context is kept current. pub struct AdapterContextLock<'a> { glow: MutexGuard<'a, ManuallyDrop>, egl: Option>, @@ -1082,7 +1082,9 @@ impl crate::Instance for Instance { unsafe { gl.debug_message_callback(super::gl_debug_message_callback) }; } - // Avoid accidental drop when the context is not current. + // Wrap in ManuallyDrop to make it easier to "current" the GL context before dropping this + // GLOW context, which could also happen if a panic occurs after we uncurrent the context + // below but before AdapterContext is constructed. let gl = ManuallyDrop::new(gl); inner.egl.unmake_current(); @@ -1294,7 +1296,7 @@ impl crate::Surface for Surface { (WindowKind::Wayland, Rwh::Wayland(handle)) => { let library = &self.wsi.display_owner.as_ref().unwrap().library; let wl_egl_window_create: libloading::Symbol = - unsafe { library.get(b"wl_egl_window_create") }.unwrap(); + unsafe { library.get(b"wl_egl_window_create\0") }.unwrap(); let window = unsafe { wl_egl_window_create(handle.surface.as_ptr(), 640, 480) } .cast(); @@ -1403,7 +1405,7 @@ impl crate::Surface for Surface { if let Some(window) = wl_window { let library = &self.wsi.display_owner.as_ref().unwrap().library; let wl_egl_window_resize: libloading::Symbol = - unsafe { library.get(b"wl_egl_window_resize") }.unwrap(); + unsafe { library.get(b"wl_egl_window_resize\0") }.unwrap(); unsafe { wl_egl_window_resize( window, @@ -1475,7 +1477,7 @@ impl crate::Surface for Surface { .expect("unsupported window") .library; let wl_egl_window_destroy: libloading::Symbol = - unsafe { library.get(b"wl_egl_window_destroy") }.unwrap(); + unsafe { library.get(b"wl_egl_window_destroy\0") }.unwrap(); unsafe { wl_egl_window_destroy(window) }; } } diff --git a/third_party/rust/wgpu-hal/src/gles/wgl.rs b/third_party/rust/wgpu-hal/src/gles/wgl.rs index ea466f877c479..2d6c91aee0814 100644 --- a/third_party/rust/wgpu-hal/src/gles/wgl.rs +++ b/third_party/rust/wgpu-hal/src/gles/wgl.rs @@ -1,15 +1,7 @@ -use glow::HasContext; -use glutin_wgl_sys::wgl_extra::{ - Wgl, CONTEXT_CORE_PROFILE_BIT_ARB, CONTEXT_DEBUG_BIT_ARB, CONTEXT_FLAGS_ARB, - CONTEXT_PROFILE_MASK_ARB, -}; -use once_cell::sync::Lazy; -use parking_lot::{Mutex, MutexGuard, RwLock}; -use raw_window_handle::{RawDisplayHandle, RawWindowHandle}; use std::{ collections::HashSet, ffi::{c_void, CStr, CString}, - mem::{self, ManuallyDrop}, + mem::{self, size_of, size_of_val, ManuallyDrop}, os::raw::c_int, ptr, sync::{ @@ -19,6 +11,15 @@ use std::{ thread, time::Duration, }; + +use glow::HasContext; +use glutin_wgl_sys::wgl_extra::{ + Wgl, CONTEXT_CORE_PROFILE_BIT_ARB, CONTEXT_DEBUG_BIT_ARB, CONTEXT_FLAGS_ARB, + CONTEXT_PROFILE_MASK_ARB, +}; +use once_cell::sync::Lazy; +use parking_lot::{Mutex, MutexGuard, RwLock}; +use raw_window_handle::{RawDisplayHandle, RawWindowHandle}; use wgt::InstanceFlags; use windows::{ core::{Error, PCSTR}, @@ -48,7 +49,10 @@ impl AdapterContext { } pub fn raw_context(&self) -> *mut c_void { - self.inner.lock().context.context.0 + match self.inner.lock().context { + Some(ref wgl) => wgl.context.0, + None => ptr::null_mut(), + } } /// Obtain a lock to the WGL context and get handle to the [`glow::Context`] that can be used to @@ -62,7 +66,9 @@ impl AdapterContext { .try_lock_for(Duration::from_secs(CONTEXT_LOCK_TIMEOUT_SECS)) .expect("Could not lock adapter context. This is most-likely a deadlock."); - inner.context.make_current(inner.device.dc).unwrap(); + if let Some(wgl) = &inner.context { + wgl.make_current(inner.device.dc).unwrap() + }; AdapterContextLock { inner } } @@ -79,14 +85,15 @@ impl AdapterContext { .try_lock_for(Duration::from_secs(CONTEXT_LOCK_TIMEOUT_SECS)) .expect("Could not lock adapter context. This is most-likely a deadlock."); - inner - .context - .make_current(device) - .map(|()| AdapterContextLock { inner }) + if let Some(wgl) = &inner.context { + wgl.make_current(device)?; + } + + Ok(AdapterContextLock { inner }) } } -/// A guard containing a lock to an [`AdapterContext`] +/// A guard containing a lock to an [`AdapterContext`], while the GL context is kept current. pub struct AdapterContextLock<'a> { inner: MutexGuard<'a, Inner>, } @@ -101,7 +108,9 @@ impl<'a> std::ops::Deref for AdapterContextLock<'a> { impl<'a> Drop for AdapterContextLock<'a> { fn drop(&mut self) { - self.inner.context.unmake_current().unwrap(); + if let Some(wgl) = &self.inner.context { + wgl.unmake_current().unwrap() + } } } @@ -136,7 +145,7 @@ unsafe impl Sync for WglContext {} struct Inner { gl: ManuallyDrop, device: InstanceDevice, - context: WglContext, + context: Option, } impl Drop for Inner { @@ -150,8 +159,14 @@ impl Drop for Inner { // Context must be current when dropped. See safety docs on // `glow::HasContext`. - self.context.make_current(self.device.dc).unwrap(); - let _guard = CurrentGuard(&self.context); + // + // NOTE: This is only set to `None` by `Adapter::new_external` which + // requires the context to be current when anything that may be holding + // the `Arc` is dropped. + let _guard = self.context.as_ref().map(|wgl| { + wgl.make_current(self.device.dc).unwrap(); + CurrentGuard(wgl) + }); // SAFETY: Field not used after this. unsafe { ManuallyDrop::drop(&mut self.gl) }; } @@ -196,7 +211,7 @@ unsafe fn setup_pixel_format(dc: Gdi::HDC) -> Result<(), crate::InstanceError> { { let format = OpenGL::PIXELFORMATDESCRIPTOR { nVersion: 1, - nSize: mem::size_of::() as u16, + nSize: size_of::() as u16, dwFlags: OpenGL::PFD_DRAW_TO_WINDOW | OpenGL::PFD_SUPPORT_OPENGL | OpenGL::PFD_DOUBLEBUFFER, @@ -232,12 +247,7 @@ unsafe fn setup_pixel_format(dc: Gdi::HDC) -> Result<(), crate::InstanceError> { } let mut format = Default::default(); if unsafe { - OpenGL::DescribePixelFormat( - dc, - index, - mem::size_of_val(&format) as u32, - Some(&mut format), - ) + OpenGL::DescribePixelFormat(dc, index, size_of_val(&format) as u32, Some(&mut format)) } == 0 { return Err(crate::InstanceError::with_source( @@ -280,7 +290,7 @@ fn create_global_window_class() -> Result { } let window_class = WindowsAndMessaging::WNDCLASSEXA { - cbSize: mem::size_of::() as u32, + cbSize: size_of::() as u32, style: WindowsAndMessaging::CS_OWNDC, lpfnWndProc: Some(wnd_proc), cbClsExtra: 0, @@ -515,7 +525,9 @@ impl crate::Instance for Instance { unsafe { gl.debug_message_callback(super::gl_debug_message_callback) }; } - // Avoid accidental drop when the context is not current. + // Wrap in ManuallyDrop to make it easier to "current" the GL context before dropping this + // GLOW context, which could also happen if a panic occurs after we uncurrent the context + // below but before Inner is constructed. let gl = ManuallyDrop::new(gl); context.unmake_current().map_err(|e| { crate::InstanceError::with_source( @@ -528,7 +540,7 @@ impl crate::Instance for Instance { inner: Arc::new(Mutex::new(Inner { device, gl, - context, + context: Some(context), })), srgb_capable, }) @@ -570,6 +582,43 @@ impl crate::Instance for Instance { } } +impl super::Adapter { + /// Creates a new external adapter using the specified loader function. + /// + /// # Safety + /// + /// - The underlying OpenGL ES context must be current. + /// - The underlying OpenGL ES context must be current when interfacing with any objects returned by + /// wgpu-hal from this adapter. + /// - The underlying OpenGL ES context must be current when dropping this adapter and when + /// dropping any objects returned from this adapter. + pub unsafe fn new_external( + fun: impl FnMut(&str) -> *const c_void, + ) -> Option> { + let context = unsafe { glow::Context::from_loader_function(fun) }; + unsafe { + Self::expose(AdapterContext { + inner: Arc::new(Mutex::new(Inner { + gl: ManuallyDrop::new(context), + device: create_instance_device().ok()?, + context: None, + })), + }) + } + } + + pub fn adapter_context(&self) -> &AdapterContext { + &self.shared.context + } +} + +impl super::Device { + /// Returns the underlying WGL context. + pub fn context(&self) -> &AdapterContext { + &self.shared.context + } +} + struct DeviceContextHandle { device: Gdi::HDC, window: Foundation::HWND, diff --git a/third_party/rust/wgpu-hal/src/lib.rs b/third_party/rust/wgpu-hal/src/lib.rs index 92afa71d4347e..6578252c1ac14 100644 --- a/third_party/rust/wgpu-hal/src/lib.rs +++ b/third_party/rust/wgpu-hal/src/lib.rs @@ -51,8 +51,8 @@ //! must use [`CommandEncoder::transition_buffers`] between those two //! operations. //! -//! - Pipeline layouts are *explicitly specified* when setting bind -//! group. Incompatible layouts disturb groups bound at higher indices. +//! - Pipeline layouts are *explicitly specified* when setting bind groups. +//! Incompatible layouts disturb groups bound at higher indices. //! //! - The API *accepts collections as iterators*, to avoid forcing the user to //! store data in particular containers. The implementation doesn't guarantee @@ -327,7 +327,7 @@ impl Drop for DropGuard { } #[cfg(any(gles, vulkan))] -impl std::fmt::Debug for DropGuard { +impl fmt::Debug for DropGuard { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("DropGuard").finish() } @@ -345,6 +345,18 @@ pub enum DeviceError { Unexpected, } +#[allow(dead_code)] // may be unused on some platforms +#[cold] +fn hal_usage_error(txt: T) -> ! { + panic!("wgpu-hal invariant was violated (usage error): {txt}") +} + +#[allow(dead_code)] // may be unused on some platforms +#[cold] +fn hal_internal_error(txt: T) -> ! { + panic!("wgpu-hal ran into a preventable internal error: {txt}") +} + #[derive(Clone, Debug, Eq, PartialEq, Error)] pub enum ShaderError { #[error("Compilation failed: {0:?}")] @@ -711,6 +723,9 @@ pub trait Device: WasmNotSendSync { /// - The given `buffer` must not currently be mapped. unsafe fn destroy_buffer(&self, buffer: ::Buffer); + /// A hook for when a wgpu-core buffer is created from a raw wgpu-hal buffer. + unsafe fn add_raw_buffer(&self, buffer: &::Buffer); + /// Return a pointer to CPU memory mapping the contents of `buffer`. /// /// Buffer mappings are persistent: the buffer may remain mapped on the CPU @@ -802,6 +817,10 @@ pub trait Device: WasmNotSendSync { desc: &TextureDescriptor, ) -> Result<::Texture, DeviceError>; unsafe fn destroy_texture(&self, texture: ::Texture); + + /// A hook for when a wgpu-core texture is created from a raw wgpu-hal texture. + unsafe fn add_raw_texture(&self, texture: &::Texture); + unsafe fn create_texture_view( &self, texture: &::Texture, @@ -1230,8 +1249,40 @@ pub trait CommandEncoder: WasmNotSendSync + fmt::Debug { // pass common - /// Sets the bind group at `index` to `group`, assuming the layout - /// of all the preceding groups to be taken from `layout`. + /// Sets the bind group at `index` to `group`. + /// + /// If this is not the first call to `set_bind_group` within the current + /// render or compute pass: + /// + /// - If `layout` contains `n` bind group layouts, then any previously set + /// bind groups at indices `n` or higher are cleared. + /// + /// - If the first `m` bind group layouts of `layout` are equal to those of + /// the previously passed layout, but no more, then any previously set + /// bind groups at indices `m` or higher are cleared. + /// + /// It follows from the above that passing the same layout as before doesn't + /// clear any bind groups. + /// + /// # Safety + /// + /// - This [`CommandEncoder`] must be within a render or compute pass. + /// + /// - `index` must be the valid index of some bind group layout in `layout`. + /// Call this the "relevant bind group layout". + /// + /// - The layout of `group` must be equal to the relevant bind group layout. + /// + /// - The length of `dynamic_offsets` must match the number of buffer + /// bindings [with dynamic offsets][hdo] in the relevant bind group + /// layout. + /// + /// - If those buffer bindings are ordered by increasing [`binding` number] + /// and paired with elements from `dynamic_offsets`, then each offset must + /// be a valid offset for the binding's corresponding buffer in `group`. + /// + /// [hdo]: wgt::BindingType::Buffer::has_dynamic_offset + /// [`binding` number]: wgt::BindGroupLayoutEntry::binding unsafe fn set_bind_group( &mut self, layout: &::PipelineLayout, @@ -1283,11 +1334,43 @@ pub trait CommandEncoder: WasmNotSendSync + fmt::Debug { // render passes - // Begins a render pass, clears all active bindings. + /// Begin a new render pass, clearing all active bindings. + /// + /// This clears any bindings established by the following calls: + /// + /// - [`set_bind_group`](CommandEncoder::set_bind_group) + /// - [`set_push_constants`](CommandEncoder::set_push_constants) + /// - [`begin_query`](CommandEncoder::begin_query) + /// - [`set_render_pipeline`](CommandEncoder::set_render_pipeline) + /// - [`set_index_buffer`](CommandEncoder::set_index_buffer) + /// - [`set_vertex_buffer`](CommandEncoder::set_vertex_buffer) + /// + /// # Safety + /// + /// - All prior calls to [`begin_render_pass`] on this [`CommandEncoder`] must have been followed + /// by a call to [`end_render_pass`]. + /// + /// - All prior calls to [`begin_compute_pass`] on this [`CommandEncoder`] must have been followed + /// by a call to [`end_compute_pass`]. + /// + /// [`begin_render_pass`]: CommandEncoder::begin_render_pass + /// [`begin_compute_pass`]: CommandEncoder::begin_compute_pass + /// [`end_render_pass`]: CommandEncoder::end_render_pass + /// [`end_compute_pass`]: CommandEncoder::end_compute_pass unsafe fn begin_render_pass( &mut self, desc: &RenderPassDescriptor<::QuerySet, ::TextureView>, ); + + /// End the current render pass. + /// + /// # Safety + /// + /// - There must have been a prior call to [`begin_render_pass`] on this [`CommandEncoder`] + /// that has not been followed by a call to [`end_render_pass`]. + /// + /// [`begin_render_pass`]: CommandEncoder::begin_render_pass + /// [`end_render_pass`]: CommandEncoder::end_render_pass unsafe fn end_render_pass(&mut self); unsafe fn set_render_pipeline(&mut self, pipeline: &::RenderPipeline); @@ -1353,11 +1436,41 @@ pub trait CommandEncoder: WasmNotSendSync + fmt::Debug { // compute passes - // Begins a compute pass, clears all active bindings. + /// Begin a new compute pass, clearing all active bindings. + /// + /// This clears any bindings established by the following calls: + /// + /// - [`set_bind_group`](CommandEncoder::set_bind_group) + /// - [`set_push_constants`](CommandEncoder::set_push_constants) + /// - [`begin_query`](CommandEncoder::begin_query) + /// - [`set_compute_pipeline`](CommandEncoder::set_compute_pipeline) + /// + /// # Safety + /// + /// - All prior calls to [`begin_render_pass`] on this [`CommandEncoder`] must have been followed + /// by a call to [`end_render_pass`]. + /// + /// - All prior calls to [`begin_compute_pass`] on this [`CommandEncoder`] must have been followed + /// by a call to [`end_compute_pass`]. + /// + /// [`begin_render_pass`]: CommandEncoder::begin_render_pass + /// [`begin_compute_pass`]: CommandEncoder::begin_compute_pass + /// [`end_render_pass`]: CommandEncoder::end_render_pass + /// [`end_compute_pass`]: CommandEncoder::end_compute_pass unsafe fn begin_compute_pass( &mut self, desc: &ComputePassDescriptor<::QuerySet>, ); + + /// End the current compute pass. + /// + /// # Safety + /// + /// - There must have been a prior call to [`begin_compute_pass`] on this [`CommandEncoder`] + /// that has not been followed by a call to [`end_compute_pass`]. + /// + /// [`begin_compute_pass`]: CommandEncoder::begin_compute_pass + /// [`end_compute_pass`]: CommandEncoder::end_compute_pass unsafe fn end_compute_pass(&mut self); unsafe fn set_compute_pipeline(&mut self, pipeline: &::ComputePipeline); @@ -1635,9 +1748,27 @@ pub struct InstanceDescriptor<'a> { pub struct Alignments { /// The alignment of the start of the buffer used as a GPU copy source. pub buffer_copy_offset: wgt::BufferSize, + /// The alignment of the row pitch of the texture data stored in a buffer that is /// used in a GPU copy operation. pub buffer_copy_pitch: wgt::BufferSize, + + /// The finest alignment of bound range checking for uniform buffers. + /// + /// When `wgpu_hal` restricts shader references to the [accessible + /// region][ar] of a [`Uniform`] buffer, the size of the accessible region + /// is the bind group binding's stated [size], rounded up to the next + /// multiple of this value. + /// + /// We don't need an analogous field for storage buffer bindings, because + /// all our backends promise to enforce the size at least to a four-byte + /// alignment, and `wgpu_hal` requires bound range lengths to be a multiple + /// of four anyway. + /// + /// [ar]: struct.BufferBinding.html#accessible-region + /// [`Uniform`]: wgt::BufferBindingType::Uniform + /// [size]: BufferBinding::size + pub uniform_bounds_check_alignment: wgt::BufferSize, } #[derive(Clone, Debug)] @@ -1807,6 +1938,40 @@ pub struct PipelineLayoutDescriptor<'a, B: DynBindGroupLayout + ?Sized> { pub push_constant_ranges: &'a [wgt::PushConstantRange], } +/// A region of a buffer made visible to shaders via a [`BindGroup`]. +/// +/// [`BindGroup`]: Api::BindGroup +/// +/// ## Accessible region +/// +/// `wgpu_hal` guarantees that shaders compiled with +/// [`ShaderModuleDescriptor::runtime_checks`] set to `true` cannot read or +/// write data via this binding outside the *accessible region* of [`buffer`]: +/// +/// - The accessible region starts at [`offset`]. +/// +/// - For [`Storage`] bindings, the size of the accessible region is [`size`], +/// which must be a multiple of 4. +/// +/// - For [`Uniform`] bindings, the size of the accessible region is [`size`] +/// rounded up to the next multiple of +/// [`Alignments::uniform_bounds_check_alignment`]. +/// +/// Note that this guarantee is stricter than WGSL's requirements for +/// [out-of-bounds accesses][woob], as WGSL allows them to return values from +/// elsewhere in the buffer. But this guarantee is necessary anyway, to permit +/// `wgpu-core` to avoid clearing uninitialized regions of buffers that will +/// never be read by the application before they are overwritten. This +/// optimization consults bind group buffer binding regions to determine which +/// parts of which buffers shaders might observe. This optimization is only +/// sound if shader access is bounds-checked. +/// +/// [`buffer`]: BufferBinding::buffer +/// [`offset`]: BufferBinding::offset +/// [`size`]: BufferBinding::size +/// [`Storage`]: wgt::BufferBindingType::Storage +/// [`Uniform`]: wgt::BufferBindingType::Uniform +/// [woob]: https://gpuweb.github.io/gpuweb/wgsl/#out-of-bounds-access-sec #[derive(Debug)] pub struct BufferBinding<'a, B: DynBuffer + ?Sized> { /// The buffer being bound. @@ -1925,6 +2090,26 @@ pub enum ShaderInput<'a> { pub struct ShaderModuleDescriptor<'a> { pub label: Label<'a>, + + /// Enforce bounds checks in shaders, even if the underlying driver doesn't + /// support doing so natively. + /// + /// When this is `true`, `wgpu_hal` promises that shaders can only read or + /// write the [accessible region][ar] of a bindgroup's buffer bindings. If + /// the underlying graphics platform cannot implement these bounds checks + /// itself, `wgpu_hal` will inject bounds checks before presenting the + /// shader to the platform. + /// + /// When this is `false`, `wgpu_hal` only enforces such bounds checks if the + /// underlying platform provides a way to do so itself. `wgpu_hal` does not + /// itself add any bounds checks to generated shader code. + /// + /// Note that `wgpu_hal` users may try to initialize only those portions of + /// buffers that they anticipate might be read from. Passing `false` here + /// may allow shaders to see wider regions of the buffers than expected, + /// making such deferred initialization visible to the application. + /// + /// [ar]: struct.BufferBinding.html#accessible-region pub runtime_checks: bool, } diff --git a/third_party/rust/wgpu-hal/src/metal/adapter.rs b/third_party/rust/wgpu-hal/src/metal/adapter.rs index 5ef6d358b8423..e7db97a1f930c 100644 --- a/third_party/rust/wgpu-hal/src/metal/adapter.rs +++ b/third_party/rust/wgpu-hal/src/metal/adapter.rs @@ -178,7 +178,7 @@ impl crate::Adapter for super::Adapter { flags.set(Tfc::STORAGE, pc.format_rgb10a2_unorm_all); flags } - Tf::Rg11b10UFloat => { + Tf::Rg11b10Ufloat => { let mut flags = all_caps; flags.set(Tfc::STORAGE, pc.format_rg11b10_all); flags @@ -997,6 +997,10 @@ impl super::PrivateCapabilities { alignments: crate::Alignments { buffer_copy_offset: wgt::BufferSize::new(self.buffer_alignment).unwrap(), buffer_copy_pitch: wgt::BufferSize::new(4).unwrap(), + // This backend has Naga incorporate bounds checks into the + // Metal Shading Language it generates, so from `wgpu_hal`'s + // users' point of view, references are tightly checked. + uniform_bounds_check_alignment: wgt::BufferSize::new(1).unwrap(), }, downlevel, } @@ -1036,7 +1040,7 @@ impl super::PrivateCapabilities { Tf::Rgba8Sint => RGBA8Sint, Tf::Rgb10a2Uint => RGB10A2Uint, Tf::Rgb10a2Unorm => RGB10A2Unorm, - Tf::Rg11b10UFloat => RG11B10Float, + Tf::Rg11b10Ufloat => RG11B10Float, Tf::Rg32Uint => RG32Uint, Tf::Rg32Sint => RG32Sint, Tf::Rg32Float => RG32Float, diff --git a/third_party/rust/wgpu-hal/src/metal/device.rs b/third_party/rust/wgpu-hal/src/metal/device.rs index 077c10f517238..347a97a0868c5 100644 --- a/third_party/rust/wgpu-hal/src/metal/device.rs +++ b/third_party/rust/wgpu-hal/src/metal/device.rs @@ -356,6 +356,10 @@ impl crate::Device for super::Device { self.counters.buffers.sub(1); } + unsafe fn add_raw_buffer(&self, _buffer: &super::Buffer) { + self.counters.buffers.add(1); + } + unsafe fn map_buffer( &self, buffer: &super::Buffer, @@ -436,6 +440,10 @@ impl crate::Device for super::Device { self.counters.textures.sub(1); } + unsafe fn add_raw_texture(&self, _texture: &super::Texture) { + self.counters.textures.add(1); + } + unsafe fn create_texture_view( &self, texture: &super::Texture, diff --git a/third_party/rust/wgpu-hal/src/metal/mod.rs b/third_party/rust/wgpu-hal/src/metal/mod.rs index 62d409a8ff66f..1935e843ec6e4 100644 --- a/third_party/rust/wgpu-hal/src/metal/mod.rs +++ b/third_party/rust/wgpu-hal/src/metal/mod.rs @@ -96,9 +96,7 @@ crate::impl_dyn_resource!( TextureView ); -pub struct Instance { - managed_metal_layer_delegate: surface::HalManagedMetalLayerDelegate, -} +pub struct Instance {} impl Instance { pub fn create_surface_from_layer(&self, layer: &metal::MetalLayerRef) -> Surface { @@ -113,9 +111,7 @@ impl crate::Instance for Instance { profiling::scope!("Init Metal Backend"); // We do not enable metal validation based on the validation flags as it affects the entire // process. Instead, we enable the validation inside the test harness itself in tests/src/native.rs. - Ok(Instance { - managed_metal_layer_delegate: surface::HalManagedMetalLayerDelegate::new(), - }) + Ok(Instance {}) } unsafe fn create_surface( @@ -126,16 +122,12 @@ impl crate::Instance for Instance { match window_handle { #[cfg(target_os = "ios")] raw_window_handle::RawWindowHandle::UiKit(handle) => { - let _ = &self.managed_metal_layer_delegate; - Ok(unsafe { Surface::from_view(handle.ui_view.as_ptr(), None) }) + Ok(unsafe { Surface::from_view(handle.ui_view.cast()) }) } #[cfg(target_os = "macos")] - raw_window_handle::RawWindowHandle::AppKit(handle) => Ok(unsafe { - Surface::from_view( - handle.ns_view.as_ptr(), - Some(&self.managed_metal_layer_delegate), - ) - }), + raw_window_handle::RawWindowHandle::AppKit(handle) => { + Ok(unsafe { Surface::from_view(handle.ns_view.cast()) }) + } _ => Err(crate::InstanceError::new(format!( "window handle {window_handle:?} is not a Metal-compatible handle" ))), @@ -367,7 +359,6 @@ pub struct Device { } pub struct Surface { - view: Option>, render_layer: Mutex, swapchain_format: RwLock>, extent: RwLock, diff --git a/third_party/rust/wgpu-hal/src/metal/surface.rs b/third_party/rust/wgpu-hal/src/metal/surface.rs index 115e4208a51a2..668b602474de3 100644 --- a/third_party/rust/wgpu-hal/src/metal/surface.rs +++ b/third_party/rust/wgpu-hal/src/metal/surface.rs @@ -1,26 +1,30 @@ #![allow(clippy::let_unit_value)] // `let () =` being used to constrain result type -use std::{os::raw::c_void, ptr::NonNull, sync::Once, thread}; +use std::ffi::c_uint; +use std::mem::ManuallyDrop; +use std::ptr::NonNull; +use std::sync::Once; +use std::thread; use core_graphics_types::{ base::CGFloat, geometry::{CGRect, CGSize}, }; +use metal::foreign_types::ForeignType; use objc::{ class, declare::ClassDecl, msg_send, - rc::autoreleasepool, + rc::{autoreleasepool, StrongPtr}, runtime::{Class, Object, Sel, BOOL, NO, YES}, sel, sel_impl, }; use parking_lot::{Mutex, RwLock}; -#[cfg(target_os = "macos")] #[link(name = "QuartzCore", kind = "framework")] extern "C" { #[allow(non_upper_case_globals)] - static kCAGravityTopLeft: *mut Object; + static kCAGravityResize: *mut Object; } extern "C" fn layer_should_inherit_contents_scale_from_window( @@ -46,6 +50,7 @@ impl HalManagedMetalLayerDelegate { type Fun = extern "C" fn(&Class, Sel, *mut Object, CGFloat, *mut Object) -> BOOL; let mut decl = ClassDecl::new(&class_name, class!(NSObject)).unwrap(); unsafe { + // decl.add_class_method::( sel!(layer:shouldInheritContentsScale:fromWindow:), layer_should_inherit_contents_scale_from_window, @@ -58,9 +63,8 @@ impl HalManagedMetalLayerDelegate { } impl super::Surface { - fn new(view: Option>, layer: metal::MetalLayer) -> Self { + fn new(layer: metal::MetalLayer) -> Self { Self { - view, render_layer: Mutex::new(layer), swapchain_format: RwLock::new(None), extent: RwLock::new(wgt::Extent3d::default()), @@ -71,86 +75,183 @@ impl super::Surface { /// If not called on the main thread, this will panic. #[allow(clippy::transmute_ptr_to_ref)] - pub unsafe fn from_view( - view: *mut c_void, - delegate: Option<&HalManagedMetalLayerDelegate>, - ) -> Self { - let view = view.cast::(); - let render_layer = { - let layer = unsafe { Self::get_metal_layer(view, delegate) }; - let layer = layer.cast::(); - // SAFETY: This pointer… - // - // - …is properly aligned. - // - …is dereferenceable to a `MetalLayerRef` as an invariant of the `metal` - // field. - // - …points to an _initialized_ `MetalLayerRef`. - // - …is only ever aliased via an immutable reference that lives within this - // lexical scope. - unsafe { &*layer } - } - .to_owned(); - let _: *mut c_void = msg_send![view, retain]; - Self::new(NonNull::new(view), render_layer) + pub unsafe fn from_view(view: NonNull) -> Self { + let layer = unsafe { Self::get_metal_layer(view) }; + let layer = ManuallyDrop::new(layer); + // SAFETY: The layer is an initialized instance of `CAMetalLayer`, and + // we transfer the retain count to `MetalLayer` using `ManuallyDrop`. + let layer = unsafe { metal::MetalLayer::from_ptr(layer.cast()) }; + Self::new(layer) } pub unsafe fn from_layer(layer: &metal::MetalLayerRef) -> Self { let class = class!(CAMetalLayer); let proper_kind: BOOL = msg_send![layer, isKindOfClass: class]; assert_eq!(proper_kind, YES); - Self::new(None, layer.to_owned()) + Self::new(layer.to_owned()) } - /// If not called on the main thread, this will panic. - pub(crate) unsafe fn get_metal_layer( - view: *mut Object, - delegate: Option<&HalManagedMetalLayerDelegate>, - ) -> *mut Object { - if view.is_null() { - panic!("window does not have a valid contentView"); - } - + /// Get or create a new `CAMetalLayer` associated with the given `NSView` + /// or `UIView`. + /// + /// # Panics + /// + /// If called from a thread that is not the main thread, this will panic. + /// + /// # Safety + /// + /// The `view` must be a valid instance of `NSView` or `UIView`. + pub(crate) unsafe fn get_metal_layer(view: NonNull) -> StrongPtr { let is_main_thread: BOOL = msg_send![class!(NSThread), isMainThread]; if is_main_thread == NO { panic!("get_metal_layer cannot be called in non-ui thread."); } - let main_layer: *mut Object = msg_send![view, layer]; - let class = class!(CAMetalLayer); - let is_valid_layer: BOOL = msg_send![main_layer, isKindOfClass: class]; + // Ensure that the view is layer-backed. + // Views are always layer-backed in UIKit. + #[cfg(target_os = "macos")] + let () = msg_send![view.as_ptr(), setWantsLayer: YES]; - if is_valid_layer == YES { - main_layer + let root_layer: *mut Object = msg_send![view.as_ptr(), layer]; + // `-[NSView layer]` can return `NULL`, while `-[UIView layer]` should + // always be available. + assert!(!root_layer.is_null(), "failed making the view layer-backed"); + + // NOTE: We explicitly do not touch properties such as + // `layerContentsPlacement`, `needsDisplayOnBoundsChange` and + // `contentsGravity` etc. on the root layer, both since we would like + // to give the user full control over them, and because the default + // values suit us pretty well (especially the contents placement being + // `NSViewLayerContentsRedrawDuringViewResize`, which allows the view + // to receive `drawRect:`/`updateLayer` calls). + + let is_metal_layer: BOOL = msg_send![root_layer, isKindOfClass: class!(CAMetalLayer)]; + if is_metal_layer == YES { + // The view has a `CAMetalLayer` as the root layer, which can + // happen for example if user overwrote `-[NSView layerClass]` or + // the view is `MTKView`. + // + // This is easily handled: We take "ownership" over the layer, and + // render directly into that; after all, the user passed a view + // with an explicit Metal layer to us, so this is very likely what + // they expect us to do. + unsafe { StrongPtr::retain(root_layer) } } else { - // If the main layer is not a CAMetalLayer, we create a CAMetalLayer and use it. - let new_layer: *mut Object = msg_send![class, new]; - let frame: CGRect = msg_send![main_layer, bounds]; + // The view does not have a `CAMetalLayer` as the root layer (this + // is the default for most views). + // + // This case is trickier! We cannot use the existing layer with + // Metal, so we must do something else. There are a few options: + // + // 1. Panic here, and require the user to pass a view with a + // `CAMetalLayer` layer. + // + // While this would "work", it doesn't solve the problem, and + // instead passes the ball onwards to the user and ecosystem to + // figure it out. + // + // 2. Override the existing layer with a newly created layer. + // + // If we overlook that this does not work in UIKit since + // `UIView`'s `layer` is `readonly`, and that as such we will + // need to do something different there anyhow, this is + // actually a fairly good solution, and was what the original + // implementation did. + // + // It has some problems though, due to: + // + // a. `wgpu` in our API design choosing not to register a + // callback with `-[CALayerDelegate displayLayer:]`, but + // instead leaves it up to the user to figure out when to + // redraw. That is, we rely on other libraries' callbacks + // telling us when to render. + // + // (If this were an API only for Metal, we would probably + // make the user provide a `render` closure that we'd call + // in the right situations. But alas, we have to be + // cross-platform here). + // + // b. Overwriting the `layer` on `NSView` makes the view + // "layer-hosting", see [wantsLayer], which disables drawing + // functionality on the view like `drawRect:`/`updateLayer`. + // + // These two in combination makes it basically impossible for + // crates like Winit to provide a robust rendering callback + // that integrates with the system's built-in mechanisms for + // redrawing, exactly because overwriting the layer would be + // implicitly disabling those mechanisms! + // + // [wantsLayer]: https://developer.apple.com/documentation/appkit/nsview/1483695-wantslayer?language=objc + // + // 3. Create a sublayer. + // + // `CALayer` has the concept of "sublayers", which we can use + // instead of overriding the layer. + // + // This is also the recommended solution on UIKit, so it's nice + // that we can use (almost) the same implementation for these. + // + // It _might_, however, perform ever so slightly worse than + // overriding the layer directly. + // + // 4. Create a new `MTKView` (or a custom view), and add it as a + // subview. + // + // Similar to creating a sublayer (see above), but also + // provides a bunch of event handling that we don't need. + // + // Option 3 seems like the most robust solution, so this is what + // we're going to do. + + // Create a new sublayer. + let new_layer: *mut Object = msg_send![class!(CAMetalLayer), new]; + let () = msg_send![root_layer, addSublayer: new_layer]; + + // Automatically resize the sublayer's frame to match the + // superlayer's bounds. + // + // Note that there is a somewhat hidden design decision in this: + // We define the `width` and `height` in `configure` to control + // the `drawableSize` of the layer, while `bounds` and `frame` are + // outside of the user's direct control - instead, though, they + // can control the size of the view (or root layer), and get the + // desired effect that way. + // + // We _could_ also let `configure` set the `bounds` size, however + // that would be inconsistent with using the root layer directly + // (as we may do, see above). + let width_sizable = 1 << 1; // kCALayerWidthSizable + let height_sizable = 1 << 4; // kCALayerHeightSizable + let mask: c_uint = width_sizable | height_sizable; + let () = msg_send![new_layer, setAutoresizingMask: mask]; + + // Specify the relative size that the auto resizing mask above + // will keep (i.e. tell it to fill out its superlayer). + let frame: CGRect = msg_send![root_layer, bounds]; let () = msg_send![new_layer, setFrame: frame]; - #[cfg(target_os = "ios")] - { - // Unlike NSView, UIView does not allow to replace main layer. - let () = msg_send![main_layer, addSublayer: new_layer]; - // On iOS, "from_view" may be called before the application initialization is complete, - // `msg_send![view, window]` and `msg_send![window, screen]` will get null. - let screen: *mut Object = msg_send![class!(UIScreen), mainScreen]; - let scale_factor: CGFloat = msg_send![screen, nativeScale]; - let () = msg_send![view, setContentScaleFactor: scale_factor]; - }; - #[cfg(target_os = "macos")] - { - let () = msg_send![view, setLayer: new_layer]; - let () = msg_send![view, setWantsLayer: YES]; - let () = msg_send![new_layer, setContentsGravity: unsafe { kCAGravityTopLeft }]; - let window: *mut Object = msg_send![view, window]; - if !window.is_null() { - let scale_factor: CGFloat = msg_send![window, backingScaleFactor]; - let () = msg_send![new_layer, setContentsScale: scale_factor]; - } - }; - if let Some(delegate) = delegate { - let () = msg_send![new_layer, setDelegate: delegate.0]; - } - new_layer + + // The gravity to use when the layer's `drawableSize` isn't the + // same as the bounds rectangle. + // + // The desired content gravity is `kCAGravityResize`, because it + // masks / alleviates issues with resizing when + // `present_with_transaction` is disabled, and behaves better when + // moving the window between monitors. + // + // Unfortunately, it also makes it harder to see changes to + // `width` and `height` in `configure`. When debugging resize + // issues, swap this for `kCAGravityTopLeft` instead. + let _: () = msg_send![new_layer, setContentsGravity: unsafe { kCAGravityResize }]; + + // Set initial scale factor of the layer. This is kept in sync by + // `configure` (on UIKit), and the delegate below (on AppKit). + let scale_factor: CGFloat = msg_send![root_layer, contentsScale]; + let () = msg_send![new_layer, setContentsScale: scale_factor]; + + let delegate = HalManagedMetalLayerDelegate::new(); + let () = msg_send![new_layer, setDelegate: delegate.0]; + + unsafe { StrongPtr::new(new_layer) } } } @@ -171,16 +272,6 @@ impl super::Surface { } } -impl Drop for super::Surface { - fn drop(&mut self) { - if let Some(view) = self.view { - unsafe { - let () = msg_send![view.as_ptr(), release]; - } - } - } -} - impl crate::Surface for super::Surface { type A = super::Api; @@ -210,19 +301,30 @@ impl crate::Surface for super::Surface { _ => (), } - let device_raw = device.shared.device.lock(); - // On iOS, unless the user supplies a view with a CAMetalLayer, we - // create one as a sublayer. However, when the view changes size, - // its sublayers are not automatically resized, and we must resize - // it here. The drawable size and the layer size don't correlate - #[cfg(target_os = "ios")] + // AppKit / UIKit automatically sets the correct scale factor for + // layers attached to a view. Our layer, however, may not be directly + // attached to a view; in those cases, we need to set the scale + // factor ourselves. + // + // For AppKit, we do so by adding a delegate on the layer with the + // `layer:shouldInheritContentsScale:fromWindow:` method returning + // `true` - this tells the system to automatically update the scale + // factor when it changes. + // + // For UIKit, we manually update the scale factor from the super layer + // here, if there is one. + // + // TODO: Is there a way that we could listen to such changes instead? + #[cfg(not(target_os = "macos"))] { - if let Some(view) = self.view { - let main_layer: *mut Object = msg_send![view.as_ptr(), layer]; - let bounds: CGRect = msg_send![main_layer, bounds]; - let () = msg_send![*render_layer, setFrame: bounds]; + let superlayer: *mut Object = msg_send![render_layer.as_ptr(), superlayer]; + if !superlayer.is_null() { + let scale_factor: CGFloat = msg_send![superlayer, contentsScale]; + let () = msg_send![render_layer.as_ptr(), setContentsScale: scale_factor]; } } + + let device_raw = device.shared.device.lock(); render_layer.set_device(&device_raw); render_layer.set_pixel_format(caps.map_format(config.format)); render_layer.set_framebuffer_only(framebuffer_only); diff --git a/third_party/rust/wgpu-hal/src/vulkan/adapter.rs b/third_party/rust/wgpu-hal/src/vulkan/adapter.rs index 25d0d750494b5..ab6ae02c6fbf3 100644 --- a/third_party/rust/wgpu-hal/src/vulkan/adapter.rs +++ b/third_party/rust/wgpu-hal/src/vulkan/adapter.rs @@ -342,9 +342,6 @@ impl PhysicalDeviceFeatures { None }, robustness2: if enabled_extensions.contains(&ext::robustness2::NAME) { - // Note: enabling `robust_buffer_access2` isn't requires, strictly speaking - // since we can enable `robust_buffer_access` all the time. But it improves - // program portability, so we opt into it if they are supported. Some( vk::PhysicalDeviceRobustness2FeaturesEXT::default() .robust_buffer_access2(private_caps.robust_buffer_access2) @@ -842,6 +839,10 @@ pub struct PhysicalDeviceProperties { /// `VK_EXT_subgroup_size_control` extension, promoted to Vulkan 1.3. subgroup_size_control: Option>, + /// Additional `vk::PhysicalDevice` properties from the + /// `VK_EXT_robustness2` extension. + robustness2: Option>, + /// The device API version. /// /// Which is the version of Vulkan supported for device-level functionality. @@ -1097,13 +1098,38 @@ impl PhysicalDeviceProperties { } } - fn to_hal_alignments(&self) -> crate::Alignments { + /// Return a `wgpu_hal::Alignments` structure describing this adapter. + /// + /// The `using_robustness2` argument says how this adapter will implement + /// `wgpu_hal`'s guarantee that shaders can only read the [accessible + /// region][ar] of bindgroup's buffer bindings: + /// + /// - If this adapter will depend on `VK_EXT_robustness2`'s + /// `robustBufferAccess2` feature to apply bounds checks to shader buffer + /// access, `using_robustness2` must be `true`. + /// + /// - Otherwise, this adapter must use Naga to inject bounds checks on + /// buffer accesses, and `using_robustness2` must be `false`. + /// + /// [ar]: ../../struct.BufferBinding.html#accessible-region + fn to_hal_alignments(&self, using_robustness2: bool) -> crate::Alignments { let limits = &self.properties.limits; crate::Alignments { buffer_copy_offset: wgt::BufferSize::new(limits.optimal_buffer_copy_offset_alignment) .unwrap(), buffer_copy_pitch: wgt::BufferSize::new(limits.optimal_buffer_copy_row_pitch_alignment) .unwrap(), + uniform_bounds_check_alignment: { + let alignment = if using_robustness2 { + self.robustness2 + .unwrap() // if we're using it, we should have its properties + .robust_uniform_buffer_access_size_alignment + } else { + // If the `robustness2` properties are unavailable, then `robustness2` is not available either Naga-injected bounds checks are precise. + 1 + }; + wgt::BufferSize::new(alignment).unwrap() + }, } } } @@ -1133,6 +1159,7 @@ impl super::InstanceShared { let supports_subgroup_size_control = capabilities.device_api_version >= vk::API_VERSION_1_3 || capabilities.supports_extension(ext::subgroup_size_control::NAME); + let supports_robustness2 = capabilities.supports_extension(ext::robustness2::NAME); let supports_acceleration_structure = capabilities.supports_extension(khr::acceleration_structure::NAME); @@ -1180,6 +1207,13 @@ impl super::InstanceShared { properties2 = properties2.push_next(next); } + if supports_robustness2 { + let next = capabilities + .robustness2 + .insert(vk::PhysicalDeviceRobustness2PropertiesEXT::default()); + properties2 = properties2.push_next(next); + } + unsafe { get_device_properties.get_physical_device_properties2(phd, &mut properties2) }; @@ -1191,6 +1225,7 @@ impl super::InstanceShared { capabilities .supported_extensions .retain(|&x| x.extension_name_as_c_str() != Ok(ext::robustness2::NAME)); + capabilities.robustness2 = None; } }; capabilities @@ -1507,7 +1542,7 @@ impl super::Instance { }; let capabilities = crate::Capabilities { limits: phd_capabilities.to_wgpu_limits(), - alignments: phd_capabilities.to_hal_alignments(), + alignments: phd_capabilities.to_hal_alignments(private_caps.robust_buffer_access2), downlevel: wgt::DownlevelCapabilities { flags: downlevel_flags, limits: wgt::DownlevelLimits {}, @@ -1779,7 +1814,7 @@ impl super::Adapter { capabilities: Some(capabilities.iter().cloned().collect()), bounds_check_policies: naga::proc::BoundsCheckPolicies { index: naga::proc::BoundsCheckPolicy::Restrict, - buffer: if self.private_caps.robust_buffer_access { + buffer: if self.private_caps.robust_buffer_access2 { naga::proc::BoundsCheckPolicy::Unchecked } else { naga::proc::BoundsCheckPolicy::Restrict @@ -2010,7 +2045,7 @@ impl crate::Adapter for super::Adapter { vk::Result::ERROR_TOO_MANY_OBJECTS => crate::DeviceError::OutOfMemory, vk::Result::ERROR_INITIALIZATION_FAILED => crate::DeviceError::Lost, vk::Result::ERROR_EXTENSION_NOT_PRESENT | vk::Result::ERROR_FEATURE_NOT_PRESENT => { - super::hal_usage_error(err) + crate::hal_usage_error(err) } other => super::map_host_device_oom_and_lost_err(other), } diff --git a/third_party/rust/wgpu-hal/src/vulkan/conv.rs b/third_party/rust/wgpu-hal/src/vulkan/conv.rs index 38642ba082079..b829307068e6a 100644 --- a/third_party/rust/wgpu-hal/src/vulkan/conv.rs +++ b/third_party/rust/wgpu-hal/src/vulkan/conv.rs @@ -36,7 +36,7 @@ impl super::PrivateCapabilities { Tf::Rgba8Sint => F::R8G8B8A8_SINT, Tf::Rgb10a2Uint => F::A2B10G10R10_UINT_PACK32, Tf::Rgb10a2Unorm => F::A2B10G10R10_UNORM_PACK32, - Tf::Rg11b10UFloat => F::B10G11R11_UFLOAT_PACK32, + Tf::Rg11b10Ufloat => F::B10G11R11_UFLOAT_PACK32, Tf::Rg32Uint => F::R32G32_UINT, Tf::Rg32Sint => F::R32G32_SINT, Tf::Rg32Float => F::R32G32_SFLOAT, diff --git a/third_party/rust/wgpu-hal/src/vulkan/device.rs b/third_party/rust/wgpu-hal/src/vulkan/device.rs index 4283f844a999d..181da3d8871fb 100644 --- a/third_party/rust/wgpu-hal/src/vulkan/device.rs +++ b/third_party/rust/wgpu-hal/src/vulkan/device.rs @@ -15,6 +15,13 @@ use std::{ }; impl super::DeviceShared { + /// Set the name of `object` to `name`. + /// + /// If `name` contains an interior null byte, then the name set will be truncated to that byte. + /// + /// # Safety + /// + /// It must be valid to set `object`'s debug name pub(super) unsafe fn set_object_name(&self, object: impl vk::Handle, name: &str) { let Some(extension) = self.extension_fns.debug_utils.as_ref() else { return; @@ -44,7 +51,7 @@ impl super::DeviceShared { &buffer_vec }; - let name = unsafe { CStr::from_bytes_with_nul_unchecked(name_bytes) }; + let name = CStr::from_bytes_until_nul(name_bytes).expect("We have added a null byte"); let _result = unsafe { extension.set_debug_utils_object_name( @@ -956,6 +963,10 @@ impl crate::Device for super::Device { self.counters.buffers.sub(1); } + unsafe fn add_raw_buffer(&self, _buffer: &super::Buffer) { + self.counters.buffers.add(1); + } + unsafe fn map_buffer( &self, buffer: &super::Buffer, @@ -970,14 +981,14 @@ impl crate::Device for super::Device { .contains(gpu_alloc::MemoryPropertyFlags::HOST_COHERENT); Ok(crate::BufferMapping { ptr, is_coherent }) } else { - super::hal_usage_error("tried to map external buffer") + crate::hal_usage_error("tried to map external buffer") } } unsafe fn unmap_buffer(&self, buffer: &super::Buffer) { if let Some(ref block) = buffer.block { unsafe { block.lock().unmap(&*self.shared) }; } else { - super::hal_usage_error("tried to unmap external buffer") + crate::hal_usage_error("tried to unmap external buffer") } } @@ -1127,6 +1138,10 @@ impl crate::Device for super::Device { self.counters.textures.sub(1); } + unsafe fn add_raw_texture(&self, _texture: &super::Texture) { + self.counters.textures.add(1); + } + unsafe fn create_texture_view( &self, texture: &super::Texture, @@ -2520,7 +2535,7 @@ impl super::DeviceShared { } } None => { - super::hal_usage_error(format!( + crate::hal_usage_error(format!( "no signals reached value {}", wait_value )); @@ -2537,7 +2552,7 @@ impl From for crate::DeviceError { use gpu_alloc::AllocationError as Ae; match error { Ae::OutOfDeviceMemory | Ae::OutOfHostMemory | Ae::TooManyObjects => Self::OutOfMemory, - Ae::NoCompatibleMemoryTypes => super::hal_usage_error(error), + Ae::NoCompatibleMemoryTypes => crate::hal_usage_error(error), } } } @@ -2546,7 +2561,7 @@ impl From for crate::DeviceError { use gpu_alloc::MapError as Me; match error { Me::OutOfDeviceMemory | Me::OutOfHostMemory | Me::MapFailed => Self::OutOfMemory, - Me::NonHostVisible | Me::AlreadyMapped => super::hal_usage_error(error), + Me::NonHostVisible | Me::AlreadyMapped => crate::hal_usage_error(error), } } } diff --git a/third_party/rust/wgpu-hal/src/vulkan/instance.rs b/third_party/rust/wgpu-hal/src/vulkan/instance.rs index 124fd0abcce37..5673859e451e4 100644 --- a/third_party/rust/wgpu-hal/src/vulkan/instance.rs +++ b/third_party/rust/wgpu-hal/src/vulkan/instance.rs @@ -514,7 +514,7 @@ impl super::Instance { #[cfg(metal)] fn create_surface_from_view( &self, - view: *mut c_void, + view: std::ptr::NonNull, ) -> Result { if !self.shared.extensions.contains(&ext::metal_surface::NAME) { return Err(crate::InstanceError::new(String::from( @@ -522,16 +522,17 @@ impl super::Instance { ))); } - let layer = unsafe { - crate::metal::Surface::get_metal_layer(view.cast::(), None) - }; + let layer = unsafe { crate::metal::Surface::get_metal_layer(view.cast()) }; + // NOTE: The layer is retained by Vulkan's `vkCreateMetalSurfaceEXT`, + // so no need to retain it beyond the scope of this function. + let layer_ptr = (*layer).cast(); let surface = { let metal_loader = ext::metal_surface::Instance::new(&self.shared.entry, &self.shared.raw); let vk_info = vk::MetalSurfaceCreateInfoEXT::default() .flags(vk::MetalSurfaceCreateFlagsEXT::empty()) - .layer(layer.cast()); + .layer(layer_ptr); unsafe { metal_loader.create_metal_surface(&vk_info, None).unwrap() } }; @@ -554,10 +555,9 @@ impl Drop for super::InstanceShared { fn drop(&mut self) { unsafe { // Keep du alive since destroy_instance may also log - let _du = self.debug_utils.take().map(|du| { + let _du = self.debug_utils.take().inspect(|du| { du.extension .destroy_debug_utils_messenger(du.messenger, None); - du }); if self.drop_guard.is_none() { self.raw.destroy_instance(None); @@ -874,13 +874,13 @@ impl crate::Instance for super::Instance { (Rwh::AppKit(handle), _) if self.shared.extensions.contains(&ext::metal_surface::NAME) => { - self.create_surface_from_view(handle.ns_view.as_ptr()) + self.create_surface_from_view(handle.ns_view) } #[cfg(all(target_os = "ios", feature = "metal"))] (Rwh::UiKit(handle), _) if self.shared.extensions.contains(&ext::metal_surface::NAME) => { - self.create_surface_from_view(handle.ui_view.as_ptr()) + self.create_surface_from_view(handle.ui_view) } (_, _) => Err(crate::InstanceError::new(format!( "window handle {window_handle:?} is not a Vulkan-compatible handle" diff --git a/third_party/rust/wgpu-hal/src/vulkan/mod.rs b/third_party/rust/wgpu-hal/src/vulkan/mod.rs index 0ee90c4590e5a..843b836f46af2 100644 --- a/third_party/rust/wgpu-hal/src/vulkan/mod.rs +++ b/third_party/rust/wgpu-hal/src/vulkan/mod.rs @@ -493,9 +493,36 @@ struct PrivateCapabilities { /// Ability to present contents to any screen. Only needed to work around broken platform configurations. can_present: bool, non_coherent_map_mask: wgt::BufferAddress, + + /// True if this adapter advertises the [`robustBufferAccess`][vrba] feature. + /// + /// Note that Vulkan's `robustBufferAccess` is not sufficient to implement + /// `wgpu_hal`'s guarantee that shaders will not access buffer contents via + /// a given bindgroup binding outside that binding's [accessible + /// region][ar]. Enabling `robustBufferAccess` does ensure that + /// out-of-bounds reads and writes are not undefined behavior (that's good), + /// but still permits out-of-bounds reads to return data from anywhere + /// within the buffer, not just the accessible region. + /// + /// [ar]: ../struct.BufferBinding.html#accessible-region + /// [vrba]: https://registry.khronos.org/vulkan/specs/1.3-extensions/html/vkspec.html#features-robustBufferAccess robust_buffer_access: bool, + robust_image_access: bool, + + /// True if this adapter supports the [`VK_EXT_robustness2`] extension's + /// [`robustBufferAccess2`] feature. + /// + /// This is sufficient to implement `wgpu_hal`'s [required bounds-checking][ar] of + /// shader accesses to buffer contents. If this feature is not available, + /// this backend must have Naga inject bounds checks in the generated + /// SPIR-V. + /// + /// [`VK_EXT_robustness2`]: https://registry.khronos.org/vulkan/specs/1.3-extensions/man/html/VK_EXT_robustness2.html + /// [`robustBufferAccess2`]: https://registry.khronos.org/vulkan/specs/1.3-extensions/man/html/VkPhysicalDeviceRobustness2FeaturesEXT.html#features-robustBufferAccess2 + /// [ar]: ../struct.BufferBinding.html#accessible-region robust_buffer_access2: bool, + robust_image_access2: bool, zero_initialize_workgroup_memory: bool, image_format_list: bool, @@ -1360,8 +1387,3 @@ fn get_lost_err() -> crate::DeviceError { #[allow(unreachable_code)] crate::DeviceError::Lost } - -#[cold] -fn hal_usage_error(txt: T) -> ! { - panic!("wgpu-hal invariant was violated (usage error): {txt}") -} diff --git a/third_party/rust/wgpu-types/.cargo-checksum.json b/third_party/rust/wgpu-types/.cargo-checksum.json index ef632d7a6a2f7..a9dcc84b76fad 100644 --- a/third_party/rust/wgpu-types/.cargo-checksum.json +++ b/third_party/rust/wgpu-types/.cargo-checksum.json @@ -1 +1 @@ -{"files":{"Cargo.toml":"9c7e14dbc8741dddf1ab54efa393498611e2cfc9c79343937021a4a2a68a032e","LICENSE.APACHE":"a6cba85bc92e0cff7a450b1d873c0eaa2e9fc96bf472df0247a26bec77bf3ff9","LICENSE.MIT":"c7fea58d1cfe49634cd92e54fc10a9d871f4b275321a4cd8c09e449122caaeb4","src/assertions.rs":"d22aa7ddb95d3ae129ff9848fd45913bab41bd354b6f1c1f6c38a86f9d1abbc6","src/counters.rs":"599e9c033c4f8fcc95113bf730191d80f51b3276b3ee8fd03bbc1c27d24bf76d","src/lib.rs":"f3434598049500d0bc2c1d1e047de091c6d8728d5d0ecae22651f97889f5cce0","src/math.rs":"4d03039736dd6926feb139bc68734cb59df34ede310427bbf059e5c925e0af3b"},"package":null} \ No newline at end of file +{"files":{"Cargo.toml":"32a781ba9b4fb68f6854c80c3d307bdd720de34bc8fe17aef73cf8f821f18098","LICENSE.APACHE":"a6cba85bc92e0cff7a450b1d873c0eaa2e9fc96bf472df0247a26bec77bf3ff9","LICENSE.MIT":"c7fea58d1cfe49634cd92e54fc10a9d871f4b275321a4cd8c09e449122caaeb4","src/assertions.rs":"d22aa7ddb95d3ae129ff9848fd45913bab41bd354b6f1c1f6c38a86f9d1abbc6","src/counters.rs":"599e9c033c4f8fcc95113bf730191d80f51b3276b3ee8fd03bbc1c27d24bf76d","src/lib.rs":"e4c3a736e22002ea6d5c4e0aceeb1b5cdf269b742fbfa06b88e369b5c90b20c0","src/math.rs":"4d03039736dd6926feb139bc68734cb59df34ede310427bbf059e5c925e0af3b"},"package":null} \ No newline at end of file diff --git a/third_party/rust/wgpu-types/Cargo.toml b/third_party/rust/wgpu-types/Cargo.toml index 9f5ced5351783..dcbef449ce2e7 100644 --- a/third_party/rust/wgpu-types/Cargo.toml +++ b/third_party/rust/wgpu-types/Cargo.toml @@ -57,7 +57,7 @@ version = "1" features = ["derive"] [dev-dependencies.serde_json] -version = "1.0.125" +version = "1.0.128" [features] counters = [] diff --git a/third_party/rust/wgpu-types/src/lib.rs b/third_party/rust/wgpu-types/src/lib.rs index cd2f5dcd0a045..e3cab5605c3fa 100644 --- a/third_party/rust/wgpu-types/src/lib.rs +++ b/third_party/rust/wgpu-types/src/lib.rs @@ -13,6 +13,7 @@ use serde::Deserialize; #[cfg(any(feature = "serde", test))] use serde::Serialize; use std::hash::{Hash, Hasher}; +use std::mem::size_of; use std::path::PathBuf; use std::{num::NonZeroU32, ops::Range}; @@ -401,7 +402,7 @@ bitflags::bitflags! { const SHADER_F16 = 1 << 8; - /// Allows for usage of textures of format [`TextureFormat::Rg11b10UFloat`] as a render target + /// Allows for usage of textures of format [`TextureFormat::Rg11b10Ufloat`] as a render target /// /// Supported platforms: /// - Vulkan @@ -2553,7 +2554,7 @@ pub enum TextureFormat { /// Red, green, blue, and alpha channels. 10 bit integer for RGB channels, 2 bit integer for alpha channel. [0, 1023] ([0, 3] for alpha) converted to/from float [0, 1] in shader. Rgb10a2Unorm, /// Red, green, and blue channels. 11 bit float with no sign bit for RG channels. 10 bit float with no sign bit for blue channel. Float in shader. - Rg11b10UFloat, + Rg11b10Ufloat, // Normal 64 bit formats /// Red and green channels. 32 bit integer per channel. Unsigned in shader. @@ -2841,7 +2842,7 @@ impl<'de> Deserialize<'de> for TextureFormat { "bgra8unorm-srgb" => TextureFormat::Bgra8UnormSrgb, "rgb10a2uint" => TextureFormat::Rgb10a2Uint, "rgb10a2unorm" => TextureFormat::Rgb10a2Unorm, - "rg11b10ufloat" => TextureFormat::Rg11b10UFloat, + "rg11b10ufloat" => TextureFormat::Rg11b10Ufloat, "rg32uint" => TextureFormat::Rg32Uint, "rg32sint" => TextureFormat::Rg32Sint, "rg32float" => TextureFormat::Rg32Float, @@ -2969,7 +2970,7 @@ impl Serialize for TextureFormat { TextureFormat::Bgra8UnormSrgb => "bgra8unorm-srgb", TextureFormat::Rgb10a2Uint => "rgb10a2uint", TextureFormat::Rgb10a2Unorm => "rgb10a2unorm", - TextureFormat::Rg11b10UFloat => "rg11b10ufloat", + TextureFormat::Rg11b10Ufloat => "rg11b10ufloat", TextureFormat::Rg32Uint => "rg32uint", TextureFormat::Rg32Sint => "rg32sint", TextureFormat::Rg32Float => "rg32float", @@ -3211,7 +3212,7 @@ impl TextureFormat { | Self::Rgb9e5Ufloat | Self::Rgb10a2Uint | Self::Rgb10a2Unorm - | Self::Rg11b10UFloat + | Self::Rg11b10Ufloat | Self::Rg32Uint | Self::Rg32Sint | Self::Rg32Float @@ -3319,7 +3320,7 @@ impl TextureFormat { | Self::Rgb9e5Ufloat | Self::Rgb10a2Uint | Self::Rgb10a2Unorm - | Self::Rg11b10UFloat + | Self::Rg11b10Ufloat | Self::Rg32Uint | Self::Rg32Sint | Self::Rg32Float @@ -3438,7 +3439,7 @@ impl TextureFormat { Self::Bgra8UnormSrgb => (msaa_resolve, attachment), Self::Rgb10a2Uint => ( msaa, attachment), Self::Rgb10a2Unorm => (msaa_resolve, attachment), - Self::Rg11b10UFloat => ( msaa, rg11b10f), + Self::Rg11b10Ufloat => ( msaa, rg11b10f), Self::Rg32Uint => ( noaa, all_flags), Self::Rg32Sint => ( noaa, all_flags), Self::Rg32Float => ( noaa, all_flags), @@ -3549,7 +3550,7 @@ impl TextureFormat { | Self::Rg16Float | Self::Rgba16Float | Self::Rgb10a2Unorm - | Self::Rg11b10UFloat => Some(float), + | Self::Rg11b10Ufloat => Some(float), Self::R32Float | Self::Rg32Float | Self::Rgba32Float => Some(float32_sample_type), @@ -3681,7 +3682,7 @@ impl TextureFormat { | Self::Rg16Sint | Self::Rg16Float => Some(4), Self::R32Uint | Self::R32Sint | Self::R32Float => Some(4), - Self::Rgb9e5Ufloat | Self::Rgb10a2Uint | Self::Rgb10a2Unorm | Self::Rg11b10UFloat => { + Self::Rgb9e5Ufloat | Self::Rgb10a2Uint | Self::Rgb10a2Unorm | Self::Rg11b10Ufloat => { Some(4) } @@ -3784,7 +3785,7 @@ impl TextureFormat { | Self::Rg32Float | Self::Rgb10a2Uint | Self::Rgb10a2Unorm - | Self::Rg11b10UFloat => Some(8), + | Self::Rg11b10Ufloat => Some(8), Self::Rgba32Uint | Self::Rgba32Sint | Self::Rgba32Float => Some(16), Self::Stencil8 | Self::Depth16Unorm @@ -3867,7 +3868,7 @@ impl TextureFormat { | Self::Rgba32Float | Self::Rgb10a2Uint | Self::Rgb10a2Unorm - | Self::Rg11b10UFloat => Some(4), + | Self::Rg11b10Ufloat => Some(4), Self::Stencil8 | Self::Depth16Unorm | Self::Depth24Plus @@ -3958,7 +3959,7 @@ impl TextureFormat { | Self::Rgba32Sint | Self::Rgba32Float => 4, - Self::Rgb9e5Ufloat | Self::Rg11b10UFloat => 3, + Self::Rgb9e5Ufloat | Self::Rg11b10Ufloat => 3, Self::Rgb10a2Uint | Self::Rgb10a2Unorm => 4, Self::Stencil8 | Self::Depth16Unorm | Self::Depth24Plus | Self::Depth32Float => 1, @@ -4176,7 +4177,7 @@ fn texture_format_serialize() { "\"rgb10a2unorm\"".to_string() ); assert_eq!( - serde_json::to_string(&TextureFormat::Rg11b10UFloat).unwrap(), + serde_json::to_string(&TextureFormat::Rg11b10Ufloat).unwrap(), "\"rg11b10ufloat\"".to_string() ); assert_eq!( @@ -4473,7 +4474,7 @@ fn texture_format_deserialize() { ); assert_eq!( serde_json::from_str::("\"rg11b10ufloat\"").unwrap(), - TextureFormat::Rg11b10UFloat + TextureFormat::Rg11b10Ufloat ); assert_eq!( serde_json::from_str::("\"rg32uint\"").unwrap(), @@ -5546,8 +5547,18 @@ pub struct SurfaceConfiguration { /// `Bgra8Unorm` and `Bgra8UnormSrgb` pub format: TextureFormat, /// Width of the swap chain. Must be the same size as the surface, and nonzero. + /// + /// If this is not the same size as the underlying surface (e.g. if it is + /// set once, and the window is later resized), the behaviour is defined + /// but platform-specific, and may change in the future (currently macOS + /// scales the surface, other platforms may do something else). pub width: u32, /// Height of the swap chain. Must be the same size as the surface, and nonzero. + /// + /// If this is not the same size as the underlying surface (e.g. if it is + /// set once, and the window is later resized), the behaviour is defined + /// but platform-specific, and may change in the future (currently macOS + /// scales the surface, other platforms may do something else). pub height: u32, /// Presentation mode of the swap chain. Fifo is the only mode guaranteed to be supported. /// FifoRelaxed, Immediate, and Mailbox will crash if unsupported, while AutoVsync and @@ -7228,7 +7239,7 @@ impl DrawIndirectArgs { unsafe { std::mem::transmute(std::slice::from_raw_parts( std::ptr::from_ref(self).cast::(), - std::mem::size_of::(), + size_of::(), )) } } @@ -7259,7 +7270,7 @@ impl DrawIndexedIndirectArgs { unsafe { std::mem::transmute(std::slice::from_raw_parts( std::ptr::from_ref(self).cast::(), - std::mem::size_of::(), + size_of::(), )) } } @@ -7284,7 +7295,7 @@ impl DispatchIndirectArgs { unsafe { std::mem::transmute(std::slice::from_raw_parts( std::ptr::from_ref(self).cast::(), - std::mem::size_of::(), + size_of::(), )) } } @@ -7309,8 +7320,15 @@ impl ShaderBoundChecks { /// Creates a new configuration where the shader isn't bound checked. /// /// # Safety - /// The caller MUST ensure that all shaders built with this configuration don't perform any - /// out of bounds reads or writes. + /// + /// The caller MUST ensure that all shaders built with this configuration + /// don't perform any out of bounds reads or writes. + /// + /// Note that `wgpu_core`, in particular, initializes only those portions of + /// buffers that it expects might be read, and it does not expect contents + /// outside the ranges bound in bindgroups to be accessible, so using this + /// configuration with ill-behaved shaders could expose uninitialized GPU + /// memory contents to the application. #[must_use] pub unsafe fn unchecked() -> Self { ShaderBoundChecks { @@ -7333,10 +7351,6 @@ impl Default for ShaderBoundChecks { /// Selects which DX12 shader compiler to use. /// -/// If the `wgpu-hal/dx12-shader-compiler` feature isn't enabled then this will fall back -/// to the Fxc compiler at runtime and log an error. -/// This feature is always enabled when using `wgpu`. -/// /// If the `Dxc` option is selected, but `dxcompiler.dll` and `dxil.dll` files aren't found, /// then this will fall back to the Fxc compiler at runtime and log an error. /// @@ -7353,6 +7367,10 @@ pub enum Dx12Compiler { /// /// However, it requires both `dxcompiler.dll` and `dxil.dll` to be shipped with the application. /// These files can be downloaded from . + /// + /// Minimum supported version: [v1.5.2010](https://github.com/microsoft/DirectXShaderCompiler/releases/tag/v1.5.2010) + /// + /// It also requires WDDM 2.1 (Windows 10 version 1607). Dxc { /// Path to the `dxil.dll` file, or path to the directory containing `dxil.dll` file. Passing `None` will use standard platform specific dll loading rules. dxil_path: Option, @@ -7532,10 +7550,4 @@ pub enum DeviceLostReason { /// exactly once before it is dropped, which helps with managing the /// memory owned by the callback. ReplacedCallback = 3, - /// When setting the callback, but the device is already invalid - /// - /// As above, when the callback is provided, wgpu guarantees that it - /// will eventually be called. If the device is already invalid, wgpu - /// will call the callback immediately, with this reason. - DeviceInvalid = 4, }