Skip to content

Commit

Permalink
Merge branch 'main' into compilationinfo
Browse files Browse the repository at this point in the history
  • Loading branch information
almarklein authored Oct 24, 2023
2 parents e4f2205 + ce68b40 commit 0d654c1
Show file tree
Hide file tree
Showing 7 changed files with 90 additions and 42 deletions.
7 changes: 5 additions & 2 deletions docs/wgpu.rst
Original file line number Diff line number Diff line change
Expand Up @@ -28,8 +28,7 @@ Most methods in this API have no positional arguments; each argument
must be referenced by name. Some argument values must be a :doc:`dict <wgpu_structs>`, these
can be thought of as "nested" arguments. Many arguments (and dict fields) must be a
:doc:`flag <wgpu_flags>` or :doc:`enum <wgpu_enums>`.
Flags are integer bitmasks that can be *orred* together. Enum values are
strings in this API. Some arguments have a default value. Most do not.
Some arguments have a default value. Most do not.


Differences from WebGPU
Expand Down Expand Up @@ -166,6 +165,10 @@ These classes are not supported and/or documented yet.
List of flags, enums, and structs
---------------------------------

Enum values are strings, so instead of ``wgpu.TextureFormat.rgba8unorm`` one can also use "rgba8unorm".
Flags are integer bitmasks, but can also be passed as strings, so instead of
``wgpu.BufferUsage.MAP_READ | wgpu.BufferUsage.COPY_DST``, one can also use "MAP_READ|COPY_DIST".

.. toctree::
:maxdepth: 2

Expand Down
17 changes: 4 additions & 13 deletions tests/test_rs_buffer.py
Original file line number Diff line number Diff line change
Expand Up @@ -90,9 +90,7 @@ def test_buffer_init3():
# Option 1: write via queue (i.e. temp buffer), read via queue

# Create buffer
buf = device.create_buffer(
size=len(data1), usage=wgpu.BufferUsage.COPY_DST | wgpu.BufferUsage.COPY_SRC
)
buf = device.create_buffer(size=len(data1), usage="COPY_DST|COPY_SRC")

# Write data to it
device.queue.write_buffer(buf, 0, data1)
Expand All @@ -104,9 +102,7 @@ def test_buffer_init3():
# Option 2: Write via mapped data, read via queue

# Create buffer
buf = device.create_buffer(
size=len(data1), usage=wgpu.BufferUsage.MAP_WRITE | wgpu.BufferUsage.COPY_SRC
)
buf = device.create_buffer(size=len(data1), usage="MAP_WRITE | COPY_SRC")

# Write data to it
buf.map("write")
Expand All @@ -119,9 +115,7 @@ def test_buffer_init3():

# Option 3: Write via queue, read via mapped data

buf = device.create_buffer(
size=len(data1), usage=wgpu.BufferUsage.MAP_READ | wgpu.BufferUsage.COPY_DST
)
buf = device.create_buffer(size=len(data1), usage=" MAP_READ | COPY_DST ")

# Write data to it
device.queue.write_buffer(buf, 0, data1)
Expand All @@ -136,10 +130,7 @@ def test_buffer_init3():

# Not actually an option
with raises(wgpu.GPUValidationError):
buf = device.create_buffer(
size=len(data1),
usage=wgpu.BufferUsage.MAP_READ | wgpu.BufferUsage.MAP_WRITE,
)
buf = device.create_buffer(size=len(data1), usage="MAP_READ |MAP_WRITE")


@mark.skipif(not can_use_wgpu_lib, reason="Needs wgpu lib")
Expand Down
20 changes: 19 additions & 1 deletion tests/test_util_core.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
from wgpu._coreutils import error_message_hash
import wgpu
from wgpu._coreutils import error_message_hash, str_flag_to_int, _flag_cache
from testutils import run_tests


Expand All @@ -25,5 +26,22 @@ def test_error_message_hash():
assert error_message_hash(text1) != error_message_hash(text3)


def test_str_flag_to_int():
versions = [
"UNIFORM|VERTEX",
"UNIFORM | VERTEX",
"VERTEX | UNIFORM",
"VERTEX| UNIFORM",
]

flags = [str_flag_to_int(wgpu.BufferUsage, v) for v in versions]

for flag in flags:
assert flag == flags[0]

for v in versions:
assert f"BufferUsage.{v}" in _flag_cache


if __name__ == "__main__":
run_tests(globals())
31 changes: 31 additions & 0 deletions wgpu/_coreutils.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,37 @@ def error_message_hash(message):
return hash(message)


_flag_cache = {} # str -> int


def str_flag_to_int(flag, s):
"""Allow using strings for flags, i.e. 'READ' instead of wgpu.MapMode.READ.
No worries about repeated overhead, because the resuls are cached.
"""
cache_key = (
f"{flag._name}.{s}" # using private attribute, lets call this a friend func
)
value = _flag_cache.get(cache_key, None)

if value is None:
parts = [p.strip() for p in s.split("|")]
parts = [p for p in parts if p]
invalid_parts = [p for p in parts if p.startswith("_")]
if not parts or invalid_parts:
raise ValueError(f"Invalid flag value: {s}")

value = 0
for p in parts:
try:
v = flag.__dict__[p.upper()]
value += v
except KeyError:
raise ValueError(f"Invalid flag value for {flag}: '{p}'")
_flag_cache[cache_key] = value

return value


class ApiDiff:
"""Helper class to define differences in the API by annotating
methods. This way, these difference are made explicit, plus they're
Expand Down
51 changes: 27 additions & 24 deletions wgpu/backends/rs.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@

from .. import base, flags, enums, structs
from .. import _register_backend
from .._coreutils import ApiDiff
from .._coreutils import ApiDiff, str_flag_to_int

from .rs_ffi import ffi, lib, check_expected_version
from .rs_mappings import cstructfield2enum, enummap, enum_str2int, enum_int2str
Expand Down Expand Up @@ -431,7 +431,7 @@ def print_storage_report(topic, d):
class GPUCanvasContext(base.GPUCanvasContext):
def __init__(self, canvas):
super().__init__(canvas)
self._surface_size = (-1, -1, -1)
self._surface_size = (-1, -1)
self._surface_id = None
self._internal = None
self._current_texture = None
Expand Down Expand Up @@ -472,11 +472,10 @@ def present(self):
def _create_native_swap_chain_if_needed(self):
canvas = self._get_canvas()
psize = canvas.get_physical_size()
ref_size = psize[0], psize[1], canvas.get_pixel_ratio()

if ref_size == self._surface_size:
if psize == self._surface_size:
return
self._surface_size = ref_size
self._surface_size = psize

if self._surface_id is None:
self._surface_id = get_surface_id_from_canvas(canvas)
Expand Down Expand Up @@ -829,12 +828,14 @@ def create_buffer(

def _create_buffer(self, label, size, usage, mapped_at_creation):
# Create a buffer object
if isinstance(usage, str):
usage = str_flag_to_int(flags.BufferUsage, usage)
# H: nextInChain: WGPUChainedStruct *, label: char *, usage: WGPUBufferUsageFlags/int, size: int, mappedAtCreation: bool
struct = new_struct_p(
"WGPUBufferDescriptor *",
label=to_c_label(label),
size=size,
usage=usage,
usage=int(usage),
mappedAtCreation=mapped_at_creation,
# not used: nextInChain
)
Expand Down Expand Up @@ -862,6 +863,9 @@ def create_texture(
usage: "flags.TextureUsage",
view_formats: "List[enums.TextureFormat]" = [],
):
if isinstance(usage, str):
usage = str_flag_to_int(flags.TextureUsage, usage)
usage = int(usage)
size = _tuple_from_tuple_or_dict(
size, ("width", "height", "depth_or_array_layers")
)
Expand Down Expand Up @@ -1007,11 +1011,14 @@ def create_bind_group_layout(
)
# Unreachable - fool the codegen
check_struct("ExternalTextureBindingLayout", info)
visibility = entry["visibility"]
if isinstance(visibility, str):
visibility = str_flag_to_int(flags.ShaderStage, visibility)
# H: nextInChain: WGPUChainedStruct *, binding: int, visibility: WGPUShaderStageFlags/int, buffer: WGPUBufferBindingLayout, sampler: WGPUSamplerBindingLayout, texture: WGPUTextureBindingLayout, storageTexture: WGPUStorageTextureBindingLayout
c_entry = new_struct(
"WGPUBindGroupLayoutEntry",
binding=int(entry["binding"]),
visibility=int(entry["visibility"]),
visibility=int(visibility),
buffer=buffer,
sampler=sampler,
texture=texture,
Expand Down Expand Up @@ -1556,23 +1563,11 @@ def map(self, mode, offset=0, size=None):

# Check mode
if isinstance(mode, str):
if mode.upper() == "READ_NOSYNC": # for internal use
mode = flags.MapMode.READ
if mode == "READ_NOSYNC": # for internal use
sync_on_read = False
elif mode.upper() == "READ":
mode = flags.MapMode.READ
elif mode.upper() == "WRITE":
mode = flags.MapMode.WRITE
else:
raise ValueError("Map mode must be READ or WRITE")
if isinstance(mode, int):
map_mode = 0
if mode & flags.MapMode.READ:
map_mode |= lib.WGPUMapMode_Read
if mode & flags.MapMode.WRITE:
map_mode |= lib.WGPUMapMode_Write
else: # pragma: no cover
raise TypeError("Map mode should be flag (int) or str.")
mode = "READ"
mode = str_flag_to_int(flags.MapMode, mode)
map_mode = int(mode)

# Check offset and size
offset, size = self._check_range(offset, size)
Expand Down Expand Up @@ -2599,11 +2594,19 @@ def write_texture(self, destination, data, data_layout, size):
raise ValueError("copy destination texture must be a texture, not a view")

m, address = get_memoryview_and_address(data)
# todo: could we not derive the size from the shape of m?

c_data = ffi.cast("uint8_t *", address)
data_length = m.nbytes

# We could allow size=None in this method, and derive the size from the data.
# Or compare size with the data size if it is given. However, the data
# could be a bit raw, being 1D and/or the shape expressed in bytes, so
# this gets a bit muddy. Also methods like copy_buffer_to_texture have the
# same size arg, so let's just leave it like this.
#
# data_size = list(reversed(m.shape)) + [1, 1, 1]
# data_size = data_size[:3]

size = _tuple_from_tuple_or_dict(
size, ("width", "height", "depth_or_array_layers")
)
Expand Down
3 changes: 2 additions & 1 deletion wgpu/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -988,7 +988,8 @@ def map(self, mode, offset=0, size=None):
to ``unmap()`` when done.
Arguments:
mode (enum): The mapping mode, either mapmode.READ or mapmode.WRITE.
mode (enum): The mapping mode, either wgpu.MapMode.READ or
wgpu.MapMode.WRITE, can also be a string.
offset (str): the buffer offset in bytes. Default 0.
size (int): the size to read. Default until the end.
"""
Expand Down
3 changes: 2 additions & 1 deletion wgpu/gui/_offscreen.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,8 @@ def get_preferred_format(self, adapter):

def get_current_texture(self):
self._create_new_texture_if_needed()
# todo: we return a view here, to align with the rs implementation, even though its wrong.
# Technically a texture view, even though WebGPU says it must be a texture.
# Anyway, this API will change in a next version anyway.
return self._texture_view

def present(self):
Expand Down

0 comments on commit 0d654c1

Please sign in to comment.