diff --git a/docs/backends.rst b/docs/backends.rst index 8b985140..93a2d4fd 100644 --- a/docs/backends.rst +++ b/docs/backends.rst @@ -44,7 +44,7 @@ It also works out of the box, because the wgpu-native DLL is shipped with wgpu-p The wgpu_native backend provides a few extra functionalities: -.. py:function:: wgpu.backends.wgpu_native.request_device(adapter, trace_path, *, label="", required_features, required_limits, default_queue) +.. py:function:: wgpu.backends.wgpu_native.request_device_sync(adapter, trace_path, *, label="", required_features, required_limits, default_queue) An alternative to :func:`wgpu.GPUAdapter.request_adapter`, that streams a trace of all low level calls to disk, so the visualization can be replayed (also on other systems), @@ -88,7 +88,7 @@ You must tell the adapter to create a device that supports push constants, and you must tell it the number of bytes of push constants that you are using. Overestimating is okay:: - device = adapter.request_device( + device = adapter.request_device_sync( required_features=["push-constants"], required_limits={"max-push-constant-size": 256}, ) diff --git a/docs/guide.rst b/docs/guide.rst index 118443ba..5f03221b 100644 --- a/docs/guide.rst +++ b/docs/guide.rst @@ -43,8 +43,8 @@ you can obtain a device. .. code-block:: py - adapter = wgpu.gpu.request_adapter(power_preference="high-performance") - device = adapter.request_device() + adapter = wgpu.gpu.request_adapter_sync(power_preference="high-performance") + device = adapter.request_device_sync() The ``wgpu.gpu`` object is the API entrypoint (:class:`wgpu.GPU`). It contains just a handful of functions, including ``request_adapter()``. The device is used to create most other GPU objects. @@ -232,7 +232,7 @@ You can run your application via RenderDoc, which is able to capture a frame, including all API calls, objects and the complete pipeline state, and display all of that information within a nice UI. -You can use ``adapter.request_device()`` to provide a directory path +You can use ``adapter.request_device_sync()`` to provide a directory path where a trace of all API calls will be written. This trace can then be used to re-play your use-case elsewhere (it's cross-platform). diff --git a/docs/start.rst b/docs/start.rst index 6ccc490b..218a9ea3 100644 --- a/docs/start.rst +++ b/docs/start.rst @@ -99,7 +99,7 @@ You can verify whether the `"DiscreteGPU"` adapters are found: import wgpu import pprint - for a in wgpu.gpu.enumerate_adapters(): + for a in wgpu.gpu.enumerate_adapters_sync(): pprint.pprint(a.info) If you are using a remote frame buffer via `jupyter-rfb `_ we also recommend installing the following for optimal performance: diff --git a/examples/compute_noop.py b/examples/compute_noop.py index 8e9d08b5..9be2b906 100644 --- a/examples/compute_noop.py +++ b/examples/compute_noop.py @@ -62,7 +62,7 @@ device = wgpu.utils.get_default_device() # Show all available adapters -adapters = wgpu.gpu.enumerate_adapters() +adapters = wgpu.gpu.enumerate_adapters_sync() for a in adapters: print(a.summary) @@ -73,7 +73,7 @@ # adapter = a # break # assert adapter is not None -# device = adapter.request_device() +# device = adapter.request_device_sync() # %% cshader = device.create_shader_module(code=shader_source) diff --git a/examples/compute_timestamps.py b/examples/compute_timestamps.py index b22564d9..60afddc9 100644 --- a/examples/compute_timestamps.py +++ b/examples/compute_timestamps.py @@ -41,10 +41,12 @@ for i in range(n): data2[i] = i * 2 -adapter = wgpu.gpu.request_adapter(power_preference="high-performance") +adapter = wgpu.gpu.request_adapter_sync(power_preference="high-performance") # Request a device with the timestamp_query feature, so we can profile our computation -device = adapter.request_device(required_features=[wgpu.FeatureName.timestamp_query]) +device = adapter.request_device_sync( + required_features=[wgpu.FeatureName.timestamp_query] +) cshader = device.create_shader_module(code=shader_source) # Create buffer objects, input buffer is mapped. diff --git a/examples/cube.py b/examples/cube.py index c1b1a81c..09936fe9 100644 --- a/examples/cube.py +++ b/examples/cube.py @@ -12,7 +12,7 @@ print("Available adapters on this system:") -for a in wgpu.gpu.enumerate_adapters(): +for a in wgpu.gpu.enumerate_adapters_sync(): print(a.summary) @@ -22,8 +22,8 @@ canvas = WgpuCanvas(title="wgpu cube", size=(640, 480)) # Create a wgpu device -adapter = wgpu.gpu.request_adapter(power_preference="high-performance") -device = adapter.request_device() +adapter = wgpu.gpu.request_adapter_sync(power_preference="high-performance") +device = adapter.request_device_sync() # Prepare present context present_context = canvas.get_context() diff --git a/examples/imgui_backend_sea.py b/examples/imgui_backend_sea.py index f21b7996..77d6af87 100644 --- a/examples/imgui_backend_sea.py +++ b/examples/imgui_backend_sea.py @@ -15,9 +15,9 @@ canvas = WgpuCanvas(title="imgui_sea", size=(800, 450), max_fps=60) # Create a wgpu device -adapter = wgpu.gpu.request_adapter(power_preference="high-performance") +adapter = wgpu.gpu.request_adapter_sync(power_preference="high-performance") -device = adapter.request_device() +device = adapter.request_device_sync() # Prepare present context present_context = canvas.get_context() diff --git a/examples/imgui_basic_example.py b/examples/imgui_basic_example.py index 0012a942..50873dd7 100644 --- a/examples/imgui_basic_example.py +++ b/examples/imgui_basic_example.py @@ -15,8 +15,8 @@ canvas = WgpuCanvas(title="imgui", size=(640, 480)) # Create a wgpu device -adapter = wgpu.gpu.request_adapter(power_preference="high-performance") -device = adapter.request_device() +adapter = wgpu.gpu.request_adapter_sync(power_preference="high-performance") +device = adapter.request_device_sync() app_state = {"text": "Hello, World\nLorem ipsum, etc.\netc."} imgui_renderer = ImguiRenderer(device, canvas) diff --git a/examples/imgui_cmap_picker.py b/examples/imgui_cmap_picker.py index 2c71e58a..3c91d18f 100644 --- a/examples/imgui_cmap_picker.py +++ b/examples/imgui_cmap_picker.py @@ -26,8 +26,8 @@ canvas = WgpuCanvas(title="imgui", size=(512, 256)) # Create a wgpu device -adapter = wgpu.gpu.request_adapter(power_preference="high-performance") -device = adapter.request_device() +adapter = wgpu.gpu.request_adapter_sync(power_preference="high-performance") +device = adapter.request_device_sync() imgui_renderer = ImguiRenderer(device, canvas) diff --git a/examples/imgui_multi_canvas.py b/examples/imgui_multi_canvas.py index 8495666c..37972cfe 100644 --- a/examples/imgui_multi_canvas.py +++ b/examples/imgui_multi_canvas.py @@ -17,8 +17,8 @@ canvases = [canvas1, canvas2, canvas3] # Create a wgpu device -adapter = wgpu.gpu.request_adapter(power_preference="high-performance") -device = adapter.request_device() +adapter = wgpu.gpu.request_adapter_sync(power_preference="high-performance") +device = adapter.request_device_sync() # create a imgui renderer for each canvas imgui_renderer1 = ImguiRenderer(device, canvas1) diff --git a/examples/imgui_renderer_sea.py b/examples/imgui_renderer_sea.py index 3fba3094..4ebba4a0 100644 --- a/examples/imgui_renderer_sea.py +++ b/examples/imgui_renderer_sea.py @@ -15,9 +15,9 @@ canvas = WgpuCanvas(title="imgui_sea", size=(800, 450), max_fps=60) # Create a wgpu device -adapter = wgpu.gpu.request_adapter(power_preference="high-performance") +adapter = wgpu.gpu.request_adapter_sync(power_preference="high-performance") -device = adapter.request_device() +device = adapter.request_device_sync() # Prepare present context present_context = canvas.get_context() diff --git a/examples/triangle.py b/examples/triangle.py index 43e82d89..11f43c62 100644 --- a/examples/triangle.py +++ b/examples/triangle.py @@ -62,8 +62,8 @@ def main(canvas, power_preference="high-performance", limits=None): """Regular function to setup a viz on the given canvas.""" - adapter = wgpu.gpu.request_adapter(power_preference=power_preference) - device = adapter.request_device(required_limits=limits) + adapter = wgpu.gpu.request_adapter_sync(power_preference=power_preference) + device = adapter.request_device_sync(required_limits=limits) return _main(canvas, device) diff --git a/examples/triangle_glsl.py b/examples/triangle_glsl.py index 146a525e..67b2638e 100644 --- a/examples/triangle_glsl.py +++ b/examples/triangle_glsl.py @@ -47,8 +47,8 @@ def main(canvas, power_preference="high-performance", limits=None): """Regular function to setup a viz on the given canvas.""" - adapter = wgpu.gpu.request_adapter(power_preference=power_preference) - device = adapter.request_device(required_limits=limits) + adapter = wgpu.gpu.request_adapter_sync(power_preference=power_preference) + device = adapter.request_device_sync(required_limits=limits) return _main(canvas, device) diff --git a/tests/test_api.py b/tests/test_api.py index 4ad0e391..a5f75cdb 100644 --- a/tests/test_api.py +++ b/tests/test_api.py @@ -111,7 +111,7 @@ def test_base_wgpu_api(): @mark.skipif(not can_use_wgpu_lib, reason="Needs wgpu lib") def test_backend_is_selected_automatically(): # Test this in a subprocess to have a clean wgpu with no backend imported yet - code = "import wgpu; print(wgpu.gpu.request_adapter())" + code = "import wgpu; print(wgpu.gpu.request_adapter_sync())" result = subprocess.run( [sys.executable, "-c", code], stdout=subprocess.PIPE, diff --git a/tests/test_gui_glfw.py b/tests/test_gui_glfw.py index 32a77edd..67d9be29 100644 --- a/tests/test_gui_glfw.py +++ b/tests/test_gui_glfw.py @@ -213,10 +213,10 @@ def get_context(self): canvas = CustomCanvas() # Also pass canvas here, to touch that code somewhere - adapter = wgpu.gpu.request_adapter( + adapter = wgpu.gpu.request_adapter_sync( canvas=canvas, power_preference="high-performance" ) - device = adapter.request_device() + device = adapter.request_device_sync() draw_frame = _get_draw_function(device, canvas) for i in range(5): diff --git a/tests/test_set_constant.py b/tests/test_set_constant.py index 1252feef..b12957a3 100644 --- a/tests/test_set_constant.py +++ b/tests/test_set_constant.py @@ -66,8 +66,8 @@ def setup_pipeline(): - adapter = wgpu.gpu.request_adapter(power_preference="high-performance") - device = adapter.request_device( + adapter = wgpu.gpu.request_adapter_sync(power_preference="high-performance") + device = adapter.request_device_sync( required_features=["push-constants"], required_limits={"max-push-constant-size": 128}, ) diff --git a/tests/test_wgpu_native_basics.py b/tests/test_wgpu_native_basics.py index b4db2c87..21111d90 100644 --- a/tests/test_wgpu_native_basics.py +++ b/tests/test_wgpu_native_basics.py @@ -255,7 +255,7 @@ def test_compute_shader_wgsl(): assert isinstance(code, str) shader = device.create_shader_module(code=code) - assert shader.get_compilation_info() == [] + assert shader.get_compilation_info_sync() == [] run_compute_shader(device, shader) @@ -268,7 +268,7 @@ def test_compute_shader_glsl(): assert isinstance(code, str) shader = device.create_shader_module(label="simple comp", code=code) - assert shader.get_compilation_info() == [] + assert shader.get_compilation_info_sync() == [] run_compute_shader(device, shader) @@ -282,7 +282,7 @@ def test_compute_shader_spirv(): assert isinstance(code, bytes) shader = device.create_shader_module(code=code) - assert shader.get_compilation_info() == [] + assert shader.get_compilation_info_sync() == [] run_compute_shader(device, shader) @@ -328,7 +328,7 @@ def test_wgpu_native_tracer(): assert not os.path.isdir(tempdir) # Works! - wgpu.backends.wgpu_native.request_device(adapter, tempdir) + wgpu.backends.wgpu_native.request_device_sync(adapter, tempdir) assert os.path.isdir(tempdir) # Make dir not empty @@ -336,13 +336,13 @@ def test_wgpu_native_tracer(): pass # Still works, but produces warning - wgpu.backends.wgpu_native.request_device(adapter, tempdir) + wgpu.backends.wgpu_native.request_device_sync(adapter, tempdir) @mark.skipif(not can_use_wgpu_lib, reason="Needs wgpu lib") def test_enumerate_adapters(): # Get all available adapters - adapters = wgpu.gpu.enumerate_adapters() + adapters = wgpu.gpu.enumerate_adapters_sync() assert len(adapters) > 0 # Check adapter summaries @@ -353,13 +353,13 @@ def test_enumerate_adapters(): # Check that we can get a device from each adapter for adapter in adapters: - d = adapter.request_device() + d = adapter.request_device_sync() assert isinstance(d, wgpu.backends.wgpu_native.GPUDevice) @mark.skipif(not can_use_wgpu_lib, reason="Needs wgpu lib") def test_adapter_destroy(): - adapter = wgpu.gpu.request_adapter(power_preference="high-performance") + adapter = wgpu.gpu.request_adapter_sync(power_preference="high-performance") assert adapter._internal is not None adapter.__del__() assert adapter._internal is None @@ -401,9 +401,9 @@ def are_features_wgpu_legal(features): """Returns true if the list of features is legal. Determining whether a specific set of features is implemented on a particular device would make the tests fragile, so we only verify that the names are legal feature names.""" - adapter = wgpu.gpu.request_adapter(power_preference="high-performance") + adapter = wgpu.gpu.request_adapter_sync(power_preference="high-performance") try: - adapter.request_device(required_features=features) + adapter.request_device_sync(required_features=features) return True except RuntimeError as e: assert "Unsupported features were requested" in str(e) @@ -440,9 +440,9 @@ def are_limits_wgpu_legal(limits): """Returns true if the list of features is legal. Determining whether a specific set of features is implemented on a particular device would make the tests fragile, so we only verify that the names are legal feature names.""" - adapter = wgpu.gpu.request_adapter(power_preference="high-performance") + adapter = wgpu.gpu.request_adapter_sync(power_preference="high-performance") try: - adapter.request_device(required_limits=limits) + adapter.request_device_sync(required_limits=limits) return True except RuntimeError as e: assert "Unsupported features were requested" in str(e) diff --git a/tests/test_wgpu_native_buffer.py b/tests/test_wgpu_native_buffer.py index 31f9f5e3..22d25cf6 100644 --- a/tests/test_wgpu_native_buffer.py +++ b/tests/test_wgpu_native_buffer.py @@ -35,7 +35,7 @@ def test_buffer_init1(): ) # Download from buffer to CPU - buf.map(wgpu.MapMode.READ) + buf.map_sync(wgpu.MapMode.READ) wgpu.backends.wgpu_native._api.libf.wgpuDevicePoll( buf._device._internal, True, wgpu.backends.wgpu_native.ffi.NULL ) @@ -74,7 +74,7 @@ def test_buffer_init2(): buf.unmap() # Download from buffer to CPU - buf.map("read") + buf.map_sync("read") data2 = buf.read_mapped() buf.unmap() print(data2.tobytes()) @@ -108,7 +108,7 @@ def test_buffer_init3(): buf = device.create_buffer(size=len(data1), usage="MAP_WRITE | COPY_SRC") # Write data to it - buf.map("write") + buf.map_sync("write") buf.write_mapped(data1) buf.unmap() @@ -124,7 +124,7 @@ def test_buffer_init3(): device.queue.write_buffer(buf, 0, data1) # Download from buffer to CPU - buf.map("read") + buf.map_sync("read") data2 = buf.read_mapped() buf.unmap() assert data1 == data2 @@ -149,7 +149,7 @@ def test_consequitive_writes1(): # Write in parts for i in range(4): - buf.map("write") + buf.map_sync("write") buf.write_mapped(f"{i+1}".encode() * 8, i * 8) buf.unmap() @@ -175,7 +175,7 @@ def test_consequitive_writes2(): ) # Write in parts - buf.map("write") + buf.map_sync("write") for i in range(4): buf.write_mapped(f"{i+1}".encode() * 8, i * 8) buf.unmap() @@ -205,13 +205,13 @@ def test_consequitive_reads(): # Read in parts, the inefficient way for i in range(4): - buf.map("read") + buf.map_sync("read") data = buf.read_mapped(i * 8, 8) assert data == f"{i+1}".encode() * 8 buf.unmap() # Read in parts, the efficient way - buf.map("read") + buf.map_sync("read") for i in range(4): data = buf.read_mapped(i * 8, 8) assert data == f"{i+1}".encode() * 8 @@ -234,15 +234,15 @@ def test_buffer_mapping_fails(): buf.read_mapped() # Not mapped with raises(ValueError): - buf.map("boo") # Invalid map mode + buf.map_sync("boo") # Invalid map mode - buf.map("write", 0, 28) + buf.map_sync("write", 0, 28) with raises(RuntimeError): - buf.map("write") # Cannot map twice + buf.map_sync("write") # Cannot map twice with raises(RuntimeError): - buf.map("read") # Cannot map twice + buf.map_sync("read") # Cannot map twice with raises(RuntimeError): buf.read_mapped() # Not mapped in read mode @@ -296,13 +296,13 @@ def test_buffer_mapping_fails(): with raises(RuntimeError): buf.write_mapped(data) # not mapped - buf.map("read", 8, 20) + buf.map_sync("read", 8, 20) with raises(RuntimeError): - buf.map("read") # Cannot map twice + buf.map_sync("read") # Cannot map twice with raises(RuntimeError): - buf.map("write") # Cannot map twice + buf.map_sync("write") # Cannot map twice with raises(RuntimeError): buf.write_mapped(data) # not mapped in write mode @@ -334,7 +334,7 @@ def test_buffer_read_no_copy(): device.queue.write_buffer(buf, 0, data1) # Download from buffer to CPU - buf.map("read") + buf.map_sync("read") data2 = buf.read_mapped(copy=False) data3 = buf.read_mapped(0, 8, copy=False) data4 = buf.read_mapped(8, 8, copy=False) @@ -502,7 +502,7 @@ def test_buffer_map_read_and_write(): # Upload data1 = b"abcdefghijkl" - buf1.map("write") + buf1.map_sync("write") buf1.write_mapped(data1) buf1.unmap() @@ -512,7 +512,7 @@ def test_buffer_map_read_and_write(): device.queue.submit([command_encoder.finish()]) # Download - buf2.map("read") + buf2.map_sync("read") data2 = buf2.read_mapped() buf2.unmap() assert data1 == data2 diff --git a/tests/test_wgpu_native_query_set.py b/tests/test_wgpu_native_query_set.py index 805ebba6..00ed8fd8 100644 --- a/tests/test_wgpu_native_query_set.py +++ b/tests/test_wgpu_native_query_set.py @@ -30,8 +30,8 @@ def test_query_set(): for i in range(n): data1[i] = float(i) - adapter = wgpu.gpu.request_adapter(power_preference="high-performance") - device = adapter.request_device( + adapter = wgpu.gpu.request_adapter_sync(power_preference="high-performance") + device = adapter.request_device_sync( required_features=[wgpu.FeatureName.timestamp_query] ) diff --git a/tests/test_wgpu_native_texture.py b/tests/test_wgpu_native_texture.py index 6bd300e0..0861fdb5 100644 --- a/tests/test_wgpu_native_texture.py +++ b/tests/test_wgpu_native_texture.py @@ -58,7 +58,7 @@ def test_do_a_copy_roundtrip(): # Upload from CPU to buffer # assert buf1.state == "unmapped" - # mapped_data = buf1.map(wgpu.MapMode.WRITE) + # mapped_data = buf1.map_sync(wgpu.MapMode.WRITE) # assert buf1.state == "mapped" # mapped_data.cast("f")[:] = data1 # buf1.unmap() @@ -97,7 +97,7 @@ def test_do_a_copy_roundtrip(): # Download from buffer to CPU # assert buf5.state == "unmapped" # assert buf5.map_mode == 0 - # result_data = buf5.map(wgpu.MapMode.READ) # a memoryview + # result_data = buf5.map_sync(wgpu.MapMode.READ) # a memoryview # assert buf5.state == "mapped" # assert buf5.map_mode == wgpu.MapMode.READ # buf5.unmap() @@ -115,7 +115,7 @@ def test_do_a_copy_roundtrip(): # Upload from CPU to buffer # assert buf1.state == "unmapped" # assert buf1.map_mode == 0 - # mapped_data = buf1.map(wgpu.MapMode.WRITE) + # mapped_data = buf1.map_sync(wgpu.MapMode.WRITE) # assert buf1.state == "mapped" # assert buf1.map_mode == wgpu.MapMode.WRITE # mapped_data.cast("f")[:] = data3 @@ -150,7 +150,7 @@ def test_do_a_copy_roundtrip(): # Download from buffer to CPU # assert buf5.state == "unmapped" - # result_data = buf5.map(wgpu.MapMode.READ) # always an uint8 array + # result_data = buf5.map_sync(wgpu.MapMode.READ) # always an uint8 array # assert buf5.state == "mapped" # buf5.unmap() # assert buf5.state == "unmapped" diff --git a/tests/test_wgpu_vertex_instance.py b/tests/test_wgpu_vertex_instance.py index ecda57dc..dfba3e42 100644 --- a/tests/test_wgpu_vertex_instance.py +++ b/tests/test_wgpu_vertex_instance.py @@ -72,16 +72,16 @@ class Runner: @classmethod def is_usable(cls): - adapter = wgpu.gpu.request_adapter(power_preference="high-performance") + adapter = wgpu.gpu.request_adapter_sync(power_preference="high-performance") return set(cls.REQUIRED_FEATURES) <= adapter.features def __init__(self): - adapter = wgpu.gpu.request_adapter(power_preference="high-performance") + adapter = wgpu.gpu.request_adapter_sync(power_preference="high-performance") features = [ *self.REQUIRED_FEATURES, *[x for x in self.OPTIONAL_FEATURES if x in adapter.features], ] - self.device = adapter.request_device(required_features=features) + self.device = adapter.request_device_sync(required_features=features) self.output_texture = self.device.create_texture( # Actual size is immaterial. Could just be 1x1 size=[128, 128], diff --git a/tests_mem/test_destroy.py b/tests_mem/test_destroy.py index 3736624b..3424cc1f 100644 --- a/tests_mem/test_destroy.py +++ b/tests_mem/test_destroy.py @@ -26,7 +26,7 @@ def test_destroy_device(n): adapter = DEVICE.adapter for i in range(n): - d = adapter.request_device() + d = adapter.request_device_sync() d.destroy() # NOTE: destroy is not yet implemented in wgpu-natice - this does not actually do anything yet yield d @@ -57,7 +57,7 @@ def test_destroy_buffer(n): # Uncomment the following lines to see. These are commented because it makes wgpu-core create a command-buffer. # try: - # b.map("READ") + # b.map_sync("READ") # except wgpu.GPUValidationError as err: # error = err # assert "destroyed" in error.message.lower() diff --git a/tests_mem/test_objects.py b/tests_mem/test_objects.py index 6aee8068..ce34a10a 100644 --- a/tests_mem/test_objects.py +++ b/tests_mem/test_objects.py @@ -20,7 +20,7 @@ def test_release_adapter(n): yield {} for i in range(n): - yield wgpu.gpu.request_adapter(power_preference="high-performance") + yield wgpu.gpu.request_adapter_sync(power_preference="high-performance") @create_and_release @@ -33,7 +33,7 @@ def test_release_device(n): } adapter = DEVICE.adapter for i in range(n): - d = adapter.request_device() + d = adapter.request_device_sync() yield d @@ -197,7 +197,7 @@ def test_release_queue(n): } adapter = DEVICE.adapter for i in range(n): - d = adapter.request_device() + d = adapter.request_device_sync() q = d.queue d._queue = None # detach yield q diff --git a/wgpu/__init__.py b/wgpu/__init__.py index 0c9ea7cd..646eef13 100644 --- a/wgpu/__init__.py +++ b/wgpu/__init__.py @@ -25,5 +25,5 @@ def request_adapter(*args, **kwargs): """Deprecated!""" raise DeprecationWarning( - "wgpu.request_adapter() is deprecated! Use wgpu.gpu.request_adapter() instead." + "wgpu.request_adapter() is deprecated! Use wgpu.gpu.request_adapter_sync() instead." ) diff --git a/wgpu/_classes.py b/wgpu/_classes.py index fa7dde84..ad020a8e 100644 --- a/wgpu/_classes.py +++ b/wgpu/_classes.py @@ -681,7 +681,7 @@ class GPUDevice(GPUObjectBase): from it: when the device is lost, all objects created from it become invalid. - Create a device using `GPUAdapter.request_device()` or + Create a device using `GPUAdapter.request_device_sync()` or `GPUAdapter.request_device_async()`. """ @@ -2221,7 +2221,7 @@ def write_buffer(self, buffer, buffer_offset, data, data_offset=0, size=None): Alignment: the buffer offset must be a multiple of 4, the total size to write must be a multiple of 4 bytes. - Also see `GPUBuffer.map()`. + Also see `GPUBuffer.map_sync()` and `GPUBuffer.map_async()`. """ raise NotImplementedError() @@ -2239,7 +2239,7 @@ def read_buffer(self, buffer, buffer_offset=0, size=None): and then maps that buffer to read the data. The given buffer's usage must include COPY_SRC. - Also see `GPUBuffer.map()`. + Also see `GPUBuffer._sync()` and `GPUBuffer._async()`. """ raise NotImplementedError() @@ -2546,4 +2546,4 @@ def proxy_method(self, *args, **kwargs): _seed_object_counts() _set_repr_methods() -_set_compat_methods_for_async_methods() +# _set_compat_methods_for_async_methods() diff --git a/wgpu/backends/js_webgpu/__init__.py b/wgpu/backends/js_webgpu/__init__.py index d19d6c24..d8842abf 100644 --- a/wgpu/backends/js_webgpu/__init__.py +++ b/wgpu/backends/js_webgpu/__init__.py @@ -12,7 +12,7 @@ class GPU: - def request_adapter(self, **parameters): + def request_adapter_sync(self, **parameters): raise NotImplementedError("Cannot use sync API functions in JS.") async def request_adapter_async(self, **parameters): diff --git a/wgpu/backends/rs.py b/wgpu/backends/rs.py index a2e4a187..cfe3f2aa 100644 --- a/wgpu/backends/rs.py +++ b/wgpu/backends/rs.py @@ -6,7 +6,7 @@ WARNING: wgpu.backends.rs is deprecated. Instead you can use: - import wgpu.backends.wgpu_native to use the backend by its new name. - import wgpu.backends.auto to do the same, but simpler and more future proof. -- simply use wgpu.gpu.request_adapter() to auto-load the backend. +- simply use wgpu.gpu.request_adapter_sync() to auto-load the backend. """.strip() print(_deprecation_msg) diff --git a/wgpu/backends/wgpu_native/_api.py b/wgpu/backends/wgpu_native/_api.py index c12d4a2d..f372fd61 100644 --- a/wgpu/backends/wgpu_native/_api.py +++ b/wgpu/backends/wgpu_native/_api.py @@ -2224,7 +2224,7 @@ def get_compilation_info_sync(self): return [] async def get_compilation_info_async(self): - raise NotImplementedError() + return self.get_compilation_info_sync() class GPUPipelineBase(classes.GPUPipelineBase): @@ -3098,7 +3098,7 @@ def read_buffer(self, buffer, buffer_offset=0, size=None): self.submit([command_buffer]) # Download from mappable buffer - tmp_buffer.map("READ_NOSYNC") + tmp_buffer.map_sync("READ_NOSYNC") data = tmp_buffer.read_mapped() # Explicit drop. @@ -3202,7 +3202,7 @@ def read_texture(self, source, data_layout, size): self.submit([command_buffer]) # Download from mappable buffer - tmp_buffer.map("READ_NOSYNC") + tmp_buffer.map_sync("READ_NOSYNC") data = tmp_buffer.read_mapped() # Explicit drop. diff --git a/wgpu/backends/wgpu_native/extras.py b/wgpu/backends/wgpu_native/extras.py index e04196c9..e2fe8005 100644 --- a/wgpu/backends/wgpu_native/extras.py +++ b/wgpu/backends/wgpu_native/extras.py @@ -14,7 +14,7 @@ def enumerate_adapters(): raise RuntimeError("Deprecated: use wgpu.gpu.enumerate_adapters() instead.") -def request_device( +def request_device_sync( adapter, trace_path, *, @@ -35,6 +35,14 @@ def request_device( ) +# Backwards compat for deprecated function +def request_device(*args, **kwargs): + logger.warning( + "WGPU: wgpu.backends.wgpu_native.request_device() is deprecated, use request_device_sync() instead." + ) + return request_device_sync(*args, **kwargs) + + def create_pipeline_layout( device, *, diff --git a/wgpu/utils/device.py b/wgpu/utils/device.py index 1a42076e..c50dbbae 100644 --- a/wgpu/utils/device.py +++ b/wgpu/utils/device.py @@ -12,6 +12,6 @@ def get_default_device(): if _default_device is None: import wgpu.backends.auto # noqa - adapter = wgpu.gpu.request_adapter(power_preference="high-performance") - _default_device = adapter.request_device() + adapter = wgpu.gpu.request_adapter_sync(power_preference="high-performance") + _default_device = adapter.request_device_sync() return _default_device