diff --git a/framework/CMakeLists.txt b/framework/CMakeLists.txt index 11354050db..31af1c83d3 100644 --- a/framework/CMakeLists.txt +++ b/framework/CMakeLists.txt @@ -1,4 +1,4 @@ -# Copyright (c) 2019-2023, Arm Limited and Contributors +# Copyright (c) 2019-2024, Arm Limited and Contributors # # SPDX-License-Identifier: Apache-2.0 # @@ -78,7 +78,7 @@ set(COMMON_FILES common/ktx_common.h common/vk_common.h common/vk_initializers.h - common/glm_common.h + common/glm_common.h common/resource_caching.h common/logging.h common/helpers.h @@ -242,6 +242,7 @@ set(CORE_FILES core/command_pool.h core/swapchain.h core/command_buffer.h + core/allocated.h core/buffer.h core/image.h core/image_view.h @@ -254,6 +255,7 @@ set(CORE_FILES core/scratch_buffer.h core/acceleration_structure.h core/shader_binding_table.h + core/hpp_allocated.h core/hpp_buffer.h core/hpp_command_buffer.h core/hpp_command_pool.h @@ -293,6 +295,7 @@ set(CORE_FILES core/command_pool.cpp core/swapchain.cpp core/command_buffer.cpp + core/allocated.cpp core/buffer.cpp core/image.cpp core/image_view.cpp @@ -387,7 +390,7 @@ set(LINUX_D2D_FILES platform/unix/direct_window.h # Source Files platform/unix/unix_d2d_platform.cpp - platform/unix/direct_window.cpp) + platform/unix/direct_window.cpp) source_group("\\" FILES ${FRAMEWORK_FILES}) source_group("common\\" FILES ${COMMON_FILES}) diff --git a/framework/api_vulkan_sample.cpp b/framework/api_vulkan_sample.cpp index 8c5a0ee30d..c44552299c 100644 --- a/framework/api_vulkan_sample.cpp +++ b/framework/api_vulkan_sample.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2019-2023, Sascha Willems +/* Copyright (c) 2019-2024, Sascha Willems * * SPDX-License-Identifier: Apache-2.0 * @@ -1000,12 +1000,7 @@ Texture ApiVulkanSample::load_texture(const std::string &file, vkb::sg::Image::C VkCommandBuffer command_buffer = device->create_command_buffer(VK_COMMAND_BUFFER_LEVEL_PRIMARY, true); - vkb::core::Buffer stage_buffer{*device, - texture.image->get_data().size(), - VK_BUFFER_USAGE_TRANSFER_SRC_BIT, - VMA_MEMORY_USAGE_CPU_ONLY}; - - stage_buffer.update(texture.image->get_data()); + vkb::core::Buffer stage_buffer = vkb::core::Buffer::create_staging_buffer(*device, texture.image->get_data()); // Setup buffer copy regions for each mip level std::vector bufferCopyRegions; @@ -1096,12 +1091,7 @@ Texture ApiVulkanSample::load_texture_array(const std::string &file, vkb::sg::Im VkCommandBuffer command_buffer = device->create_command_buffer(VK_COMMAND_BUFFER_LEVEL_PRIMARY, true); - vkb::core::Buffer stage_buffer{*device, - texture.image->get_data().size(), - VK_BUFFER_USAGE_TRANSFER_SRC_BIT, - VMA_MEMORY_USAGE_CPU_ONLY}; - - stage_buffer.update(texture.image->get_data()); + vkb::core::Buffer stage_buffer = vkb::core::Buffer::create_staging_buffer(*device, texture.image->get_data()); // Setup buffer copy regions for each mip level std::vector buffer_copy_regions; @@ -1195,12 +1185,7 @@ Texture ApiVulkanSample::load_texture_cubemap(const std::string &file, vkb::sg:: VkCommandBuffer command_buffer = device->create_command_buffer(VK_COMMAND_BUFFER_LEVEL_PRIMARY, true); - vkb::core::Buffer stage_buffer{*device, - texture.image->get_data().size(), - VK_BUFFER_USAGE_TRANSFER_SRC_BIT, - VMA_MEMORY_USAGE_CPU_ONLY}; - - stage_buffer.update(texture.image->get_data()); + vkb::core::Buffer stage_buffer = vkb::core::Buffer::create_staging_buffer(*device, texture.image->get_data()); // Setup buffer copy regions for each mip level std::vector buffer_copy_regions; diff --git a/framework/buffer_pool.h b/framework/buffer_pool.h index ab0cd2ee20..44e5ef3be2 100644 --- a/framework/buffer_pool.h +++ b/framework/buffer_pool.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2019-2023, Arm Limited and Contributors +/* Copyright (c) 2019-2024, Arm Limited and Contributors * * SPDX-License-Identifier: Apache-2.0 * @@ -127,7 +127,7 @@ class BufferBlock class BufferPool { public: - BufferPool(Device &device, VkDeviceSize block_size, VkBufferUsageFlags usage, VmaMemoryUsage memory_usage = VMA_MEMORY_USAGE_CPU_TO_GPU); + BufferPool(Device &device, VkDeviceSize block_size, VkBufferUsageFlags usage, VmaMemoryUsage memory_usage = VMA_MEMORY_USAGE_AUTO); BufferBlock &request_buffer_block(VkDeviceSize minimum_size, bool minimal = false); diff --git a/framework/core/allocated.cpp b/framework/core/allocated.cpp new file mode 100644 index 0000000000..545722d589 --- /dev/null +++ b/framework/core/allocated.cpp @@ -0,0 +1,90 @@ +/* Copyright (c) 2021-2024, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2024, Bradley Austin Davis. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 the "License"; + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "allocated.h" + +namespace vkb +{ + +namespace core +{ + +[[nodiscard]] std::pair vma_create_buffer(VmaAllocator alloctor, const VkBufferCreateInfo &create_info, VmaAllocationCreateInfo &alloc_create_info, VmaAllocationInfo *alloc_info) +{ + VkBuffer handle = VK_NULL_HANDLE; + VmaAllocation allocation = VK_NULL_HANDLE; + + // The existing code produces some weird combinations, like in gui.cpp where you see this... + + if (alloc_create_info.usage < VMA_MEMORY_USAGE_AUTO) + { + alloc_create_info.usage = VMA_MEMORY_USAGE_AUTO; + } + + auto result = vmaCreateBuffer( + alloctor, + &create_info, + &alloc_create_info, + &handle, + &allocation, + alloc_info); + + if (result != VK_SUCCESS) + { + throw VulkanException{result, "Cannot create Buffer"}; + } + return {handle, allocation}; +} + +[[nodiscard]] std::pair vma_create_image(VmaAllocator alloctor, const VkImageCreateInfo &create_info, VmaAllocationCreateInfo &alloc_create_info, VmaAllocationInfo *alloc_info) +{ + VkImage handle = VK_NULL_HANDLE; + VmaAllocation allocation = VK_NULL_HANDLE; + +#if 0 + // If the image is an attachment, prefer dedicated memory + constexpr VkImageUsageFlags attachment_only_flags = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT; + if (create_info.usage & attachment_only_flags) + { + alloc_create_info.flags |= VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT; + } + + if (create_info.usage & VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT) + { + alloc_create_info.preferredFlags |= VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT; + } +#endif + + auto result = vmaCreateImage( + alloctor, + &create_info, + &alloc_create_info, + &handle, + &allocation, + alloc_info); + + if (result != VK_SUCCESS) + { + throw VulkanException{result, "Cannot create Image"}; + } + + return {handle, allocation}; +} + +} // namespace core +} // namespace vkb diff --git a/framework/core/allocated.h b/framework/core/allocated.h new file mode 100644 index 0000000000..8da63238c9 --- /dev/null +++ b/framework/core/allocated.h @@ -0,0 +1,379 @@ +/* Copyright (c) 2021-2024, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2024, Bradley Austin Davis. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 the "License"; + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once + +#include "common/error.h" +#include "core/vulkan_resource.h" +#include +#include + +namespace vkb +{ + +class Device; + +namespace core +{ + +template +struct AllocatedBuilder +{ + VmaAllocationCreateInfo alloc_create_info{}; + CreateInfoType create_info; + std::string debug_name; + + protected: + AllocatedBuilder(const AllocatedBuilder &other) = delete; + AllocatedBuilder(const CreateInfoType &create_info) : + create_info(create_info) + { + alloc_create_info.usage = VMA_MEMORY_USAGE_AUTO; + }; + + public: + BuilderType &with_debug_name(const std::string &name) + { + debug_name = name; + return *static_cast(this); + } + + BuilderType &with_vma_usage(VmaMemoryUsage usage) + { + alloc_create_info.usage = usage; + return *static_cast(this); + } + + BuilderType &with_vma_flags(VmaAllocationCreateFlags flags) + { + alloc_create_info.flags = flags; + return *static_cast(this); + } + + BuilderType &with_vma_required_flags(VkMemoryPropertyFlags flags) + { + alloc_create_info.requiredFlags = flags; + return *static_cast(this); + } + + BuilderType &with_vma_preferred_flags(VkMemoryPropertyFlags flags) + { + alloc_create_info.preferredFlags = flags; + return *static_cast(this); + } + + BuilderType &with_memory_type_bits(uint32_t type_bits) + { + alloc_create_info.memoryTypeBits = type_bits; + return *static_cast(this); + } + + BuilderType &with_vma_pool(VmaPool pool) + { + alloc_create_info.pool = pool; + return *static_cast(this); + } + + BuilderType &with_queue_families(uint32_t count, const uint32_t *family_indices) + { + create_info.queueFamilyIndexCount = count; + create_info.pQueueFamilyIndices = family_indices; + return *static_cast(this); + } + + BuilderType &with_sharing(VkSharingMode sharing) + { + create_info.sharingMode = sharing; + return *static_cast(this); + } + + BuilderType &with_implicit_sharing_mode() + { + if (create_info.queueFamilyIndexCount != 0) + { + create_info.sharingMode = static_cast(VK_SHARING_MODE_CONCURRENT); + } + else + { + create_info.sharingMode = static_cast(VK_SHARING_MODE_EXCLUSIVE); + } + return *static_cast(this); + } + + BuilderType &with_queue_families(const std::vector &queue_families) + { + return with_queue_families(static_cast(queue_families.size()), queue_families.data()); + } +}; + +[[nodiscard]] std::pair vma_create_buffer(VmaAllocator alloctor, const VkBufferCreateInfo &create_info, VmaAllocationCreateInfo &alloc_create_info, VmaAllocationInfo *alloc_info = nullptr); + +[[nodiscard]] std::pair vma_create_image(VmaAllocator alloctor, const VkImageCreateInfo &create_info, VmaAllocationCreateInfo &alloc_create_info, VmaAllocationInfo *alloc_info = nullptr); + +template < + typename HandleType, + typename MemoryType = VkDeviceMemory, + typename DeviceType = const vkb::Device, + typename ParentType = VulkanResource> +class Allocated : public ParentType +{ + public: + using ParentType::ParentType; + + // Import the base class constructors + template + Allocated(const VmaAllocationCreateInfo &alloc_create_info, Args &&...args) : + ParentType(std::forward(args)...), + alloc_create_info(alloc_create_info) + { + persistent = (alloc_create_info.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0; + } + + Allocated(const Allocated &) = delete; + Allocated(Allocated &&other) : + ParentType{static_cast(other)}, + alloc_create_info(std::exchange(other.alloc_create_info, {})), + allocation(std::exchange(other.allocation, {})), + memory(std::exchange(other.memory, {})), + mapped_data(std::exchange(other.mapped_data, {})), + mapped(std::exchange(other.mapped, {})) + { + assert(other.allocation == VK_NULL_HANDLE); + assert(other.memory == MemoryType{VK_NULL_HANDLE}); + assert(other.mapped_data == nullptr); + assert(other.mapped == false); + } + + Allocated &operator=(const Allocated &) = delete; + Allocated &operator=(Allocated &&) = delete; + + const HandleType *get() const + { + return &ParentType::get_handle(); + } + + VmaAllocation get_allocation() const + { + return allocation; + } + + const uint8_t *get_data() const + { + return mapped_data; + } + + MemoryType get_memory() const + { + return memory; + } + + /** + * @brief Flushes memory if it is HOST_VISIBLE and not HOST_COHERENT + */ + void flush(VkDeviceSize offset = 0, VkDeviceSize size = VK_WHOLE_SIZE) + { + if (!coherent) + { + vmaFlushAllocation(get_memory_allocator(), allocation, offset, size); + } + } + + /** + * @brief Maps vulkan memory if it isn't already mapped to an host visible address + * @return Pointer to host visible memory + */ + uint8_t *map() + { + if (!persistent && !mapped && !mapped_data) + { + VK_CHECK(vmaMapMemory(get_memory_allocator(), allocation, reinterpret_cast(&mapped_data))); + mapped = true; + } + return mapped_data; + } + + /** + * @brief Unmaps vulkan memory from the host visible address + */ + void unmap() + { + if (!persistent && mapped) + { + vmaUnmapMemory(get_memory_allocator(), allocation); + mapped_data = nullptr; + mapped = false; + } + } + + /** + * @brief Copies byte data into the buffer + * @param data The data to copy from + * @param size The amount of bytes to copy + * @param offset The offset to start the copying into the mapped data + */ + size_t update(const uint8_t *data, size_t size, size_t offset = 0) + { + if (persistent) + { + std::copy(data, data + size, mapped_data + offset); + flush(); + } + else + { + map(); + std::copy(data, data + size, mapped_data + offset); + flush(); + unmap(); + } + return size; + } + + /** + * @brief Converts any non byte data into bytes and then updates the buffer + * @param data The data to copy from + * @param size The amount of bytes to copy + * @param offset The offset to start the copying into the mapped data + */ + size_t update(void const *data, size_t size, size_t offset = 0) + { + return update(reinterpret_cast(data), size, offset); + } + + /** + * @brief Copies a vector of bytes into the buffer + * @param data The data vector to upload + * @param offset The offset to start the copying into the mapped data + */ + size_t update(const std::vector &data, size_t offset = 0) + { + return update(data.data(), data.size(), offset); + } + + template + size_t update(std::vector const &data, size_t offset = 0) + { + return update(data.data(), data.size() * sizeof(T), offset); + } + + template + size_t update(std::array const &data, size_t offset = 0) + { + return update(data.data(), data.size() * sizeof(T), offset); + } + + /** + * @brief Copies an object as byte data into the buffer + * @param object The object to convert into byte data + * @param offset The offset to start the copying into the mapped data + */ + template + size_t convert_and_update(const T &object, size_t offset = 0) + { + return update(reinterpret_cast(&object), sizeof(T), offset); + } + + protected: + void post_create(VmaAllocationInfo const &allocation_info) + { + if (alloc_create_info.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) + { + persistent = true; + } + + VkMemoryPropertyFlags memory_properties; + vmaGetAllocationMemoryProperties(get_memory_allocator(), allocation, &memory_properties); + if (memory_properties & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) + { + coherent = true; + } + + memory = allocation_info.deviceMemory; + if (persistent) + { + mapped_data = static_cast(allocation_info.pMappedData); + assert(mapped_data); + } + } + + [[nodiscard]] VkBuffer create_buffer(VkBufferCreateInfo const &create_info) + { + VkBuffer handleResult = VK_NULL_HANDLE; + VmaAllocationInfo allocation_info{}; + std::tie(handleResult, allocation) = vma_create_buffer(get_memory_allocator(), create_info, alloc_create_info, &allocation_info); + post_create(allocation_info); + return handleResult; + } + + [[nodiscard]] VkImage create_image(VkImageCreateInfo const &create_info) + { + assert(0 < create_info.mipLevels && "Images should have at least one level"); + assert(0 < create_info.arrayLayers && "Images should have at least one layer"); + assert(0 < create_info.usage && "Images should have at least one usage type"); + + VkImage handleResult = VK_NULL_HANDLE; + VmaAllocationInfo allocation_info{}; + std::tie(handleResult, allocation) = vma_create_image(get_memory_allocator(), create_info, alloc_create_info, &allocation_info); + post_create(allocation_info); + return handleResult; + } + + void destroy_buffer() + { + if (ParentType::get_handle() != HandleType{VK_NULL_HANDLE} && memory != MemoryType{VK_NULL_HANDLE}) + { + unmap(); + vmaDestroyBuffer(get_memory_allocator(), ParentType::get_handle(), allocation); + clear(); + } + } + + void destroy_image() + { + if (ParentType::get_handle() != VK_NULL_HANDLE && memory != MemoryType{VK_NULL_HANDLE}) + { + unmap(); + vmaDestroyImage(get_memory_allocator(), ParentType::get_handle(), allocation); + clear(); + } + } + + void clear() + { + memory = nullptr; + mapped_data = nullptr; + persistent = false; + alloc_create_info = {}; + } + + private: + VmaAllocator get_memory_allocator() const + { + return ParentType::get_device().get_memory_allocator(); + } + + protected: + VmaAllocationCreateInfo alloc_create_info{}; + VmaAllocation allocation = VK_NULL_HANDLE; + MemoryType memory = nullptr; + uint8_t *mapped_data = nullptr; + bool coherent = false; + bool persistent = false; // Whether the buffer is persistently mapped or not + bool mapped = false; // Whether the buffer has been mapped with vmaMapMemory +}; +} // namespace core +} // namespace vkb diff --git a/framework/core/buffer.cpp b/framework/core/buffer.cpp index defe63aed7..7b5bf4b91d 100644 --- a/framework/core/buffer.cpp +++ b/framework/core/buffer.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2019-2023, Arm Limited and Contributors +/* Copyright (c) 2019-2024, Arm Limited and Contributors * * SPDX-License-Identifier: Apache-2.0 * @@ -23,88 +23,49 @@ namespace vkb { namespace core { -Buffer::Buffer(Device const &device, VkDeviceSize size, VkBufferUsageFlags buffer_usage, VmaMemoryUsage memory_usage, VmaAllocationCreateFlags flags, const std::vector &queue_family_indices) : - VulkanResource{VK_NULL_HANDLE, &device}, - size{size} -{ -#ifdef VK_USE_PLATFORM_METAL_EXT - // Workaround for Mac (MoltenVK requires unmapping https://github.com/KhronosGroup/MoltenVK/issues/175) - // Force cleares the flag VMA_ALLOCATION_CREATE_MAPPED_BIT - flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT; -#endif - - persistent = (flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0; - - VkBufferCreateInfo buffer_info{VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO}; - buffer_info.usage = buffer_usage; - buffer_info.size = size; - if (queue_family_indices.size() >= 2) - { - buffer_info.sharingMode = VK_SHARING_MODE_CONCURRENT; - buffer_info.queueFamilyIndexCount = static_cast(queue_family_indices.size()); - buffer_info.pQueueFamilyIndices = queue_family_indices.data(); - } - - VmaAllocationCreateInfo memory_info{}; - memory_info.flags = flags; - memory_info.usage = memory_usage; - VmaAllocationInfo allocation_info{}; - auto result = vmaCreateBuffer(device.get_memory_allocator(), - &buffer_info, &memory_info, - &handle, &allocation, - &allocation_info); - - if (result != VK_SUCCESS) - { - throw VulkanException{result, "Cannot create Buffer"}; - } - - memory = allocation_info.deviceMemory; - - if (persistent) +Buffer Buffer::create_staging_buffer(Device const &device, VkDeviceSize size, const void *data) +{ + BufferBuilder builder{size}; + builder.with_vma_flags(VMA_ALLOCATION_CREATE_MAPPED_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT); + builder.with_usage(VK_BUFFER_USAGE_TRANSFER_SRC_BIT); + Buffer result(device, builder); + if (data != nullptr) { - mapped_data = static_cast(allocation_info.pMappedData); + result.update(data, size); } + return Buffer(device, builder); } -Buffer::Buffer(Buffer &&other) : - VulkanResource{other.handle, other.device}, - allocation{other.allocation}, - memory{other.memory}, - size{other.size}, - mapped_data{other.mapped_data}, - mapped{other.mapped} -{ - // Reset other handles to avoid releasing on destruction - other.allocation = VK_NULL_HANDLE; - other.memory = VK_NULL_HANDLE; - other.mapped_data = nullptr; - other.mapped = false; -} - -Buffer::~Buffer() -{ - if (handle != VK_NULL_HANDLE && allocation != VK_NULL_HANDLE) +Buffer::Buffer(Device const &device, VkDeviceSize size, VkBufferUsageFlags buffer_usage, VmaMemoryUsage memory_usage, VmaAllocationCreateFlags flags, const std::vector &queue_family_indices) : + Buffer(device, + BufferBuilder(size) + .with_usage(buffer_usage) + .with_vma_usage(memory_usage) + .with_vma_flags(flags) + .with_queue_families(queue_family_indices) + .with_implicit_sharing_mode()) +{} + +Buffer::Buffer(Device const &device, const BufferBuilder &builder) : + Allocated{builder.alloc_create_info, VK_NULL_HANDLE, &device}, size(builder.create_info.size) +{ + handle = create_buffer(builder.create_info); + if (!builder.debug_name.empty()) { - unmap(); - vmaDestroyBuffer(device->get_memory_allocator(), handle, allocation); + set_debug_name(builder.debug_name); } } -const VkBuffer *Buffer::get() const +Buffer::Buffer(Buffer &&other) noexcept : + Allocated{std::move(other)}, + size{std::exchange(other.size, {})} { - return &handle; } -VmaAllocation Buffer::get_allocation() const -{ - return allocation; -} - -VkDeviceMemory Buffer::get_memory() const +Buffer::~Buffer() { - return memory; + destroy_buffer(); } VkDeviceSize Buffer::get_size() const @@ -112,36 +73,6 @@ VkDeviceSize Buffer::get_size() const return size; } -uint8_t *Buffer::map() -{ - if (!mapped && !mapped_data) - { - VK_CHECK(vmaMapMemory(device->get_memory_allocator(), allocation, reinterpret_cast(&mapped_data))); - mapped = true; - } - return mapped_data; -} - -void Buffer::unmap() -{ - if (mapped) - { - vmaUnmapMemory(device->get_memory_allocator(), allocation); - mapped_data = nullptr; - mapped = false; - } -} - -void Buffer::flush() const -{ - vmaFlushAllocation(device->get_memory_allocator(), allocation, 0, size); -} - -void Buffer::update(const std::vector &data, size_t offset) -{ - update(data.data(), data.size(), offset); -} - uint64_t Buffer::get_device_address() { VkBufferDeviceAddressInfoKHR buffer_device_address_info{}; @@ -150,26 +81,5 @@ uint64_t Buffer::get_device_address() return vkGetBufferDeviceAddressKHR(device->get_handle(), &buffer_device_address_info); } -void Buffer::update(void const *data, size_t size, size_t offset) -{ - update(reinterpret_cast(data), size, offset); -} - -void Buffer::update(const uint8_t *data, const size_t size, const size_t offset) -{ - if (persistent) - { - std::copy(data, data + size, mapped_data + offset); - flush(); - } - else - { - map(); - std::copy(data, data + size, mapped_data + offset); - flush(); - unmap(); - } -} - } // namespace core } // namespace vkb diff --git a/framework/core/buffer.h b/framework/core/buffer.h index 5ccf37d4fc..964e7a3245 100644 --- a/framework/core/buffer.h +++ b/framework/core/buffer.h @@ -19,6 +19,7 @@ #include "common/helpers.h" #include "common/vk_common.h" +#include "core/allocated.h" #include "core/vulkan_resource.h" namespace vkb @@ -27,9 +28,59 @@ class Device; namespace core { -class Buffer : public VulkanResource + +struct BufferBuilder : public AllocatedBuilder +{ + BufferBuilder(VkDeviceSize size) : + AllocatedBuilder(VkBufferCreateInfo{VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, nullptr, 0, size}) + { + } + + BufferBuilder &with_flags(VkBufferCreateFlags flags) + { + create_info.flags = flags; + return *this; + } + + BufferBuilder &with_usage(VkBufferUsageFlags usage) + { + create_info.usage = usage; + return *this; + } + + BufferBuilder &with_sharing_mode(VkSharingMode sharing_mode) + { + create_info.sharingMode = sharing_mode; + return *this; + } + + BufferBuilder &with_implicit_sharing_mode() + { + if (create_info.queueFamilyIndexCount != 0) + { + create_info.sharingMode = VK_SHARING_MODE_CONCURRENT; + } + return *this; + } +}; + +class Buffer : public Allocated { public: + static Buffer create_staging_buffer(Device const &device, VkDeviceSize size, const void *data); + + template + static Buffer create_staging_buffer(Device const &device, const std::vector &data) + { + return create_staging_buffer(device, data.size() * sizeof(T), data.data()); + } + + template + static Buffer create_staging_buffer(Device const &device, const T &data) + { + return create_staging_buffer(device, sizeof(T), &data); + } + /** * @brief Creates a buffer using VMA * @param device A valid Vulkan device @@ -46,9 +97,11 @@ class Buffer : public VulkanResource &queue_family_indices = {}); + Buffer(Device const &device, const BufferBuilder &builder); + Buffer(const Buffer &) = delete; - Buffer(Buffer &&other); + Buffer(Buffer &&other) noexcept; ~Buffer(); @@ -56,6 +109,9 @@ class Buffer : public VulkanResource static std::vector copy(std::unordered_map &buffers, const char *buffer_name) { @@ -82,103 +138,18 @@ class Buffer : public VulkanResource &data, size_t offset = 0); - - template - void update(std::array const &data, size_t offset = 0) - { - update(data.data(), data.size() * sizeof(T), offset); - } - - template - void update(std::vector const &data, size_t offset = 0) - { - update(data.data(), data.size() * sizeof(T), offset); - } - - /** - * @brief Copies an object as byte data into the buffer - * @param object The object to convert into byte data - * @param offset The offset to start the copying into the mapped data - */ - template - void convert_and_update(const T &object, size_t offset = 0) - { - update(reinterpret_cast(&object), sizeof(T), offset); - } - /** * @return Return the buffer's device address (note: requires that the buffer has been created with the VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT usage fla) */ uint64_t get_device_address(); private: - VmaAllocation allocation{VK_NULL_HANDLE}; - - VkDeviceMemory memory{VK_NULL_HANDLE}; - VkDeviceSize size{0}; - - uint8_t *mapped_data{nullptr}; - - /// Whether the buffer is persistently mapped or not - bool persistent{false}; - - /// Whether the buffer has been mapped with vmaMapMemory - bool mapped{false}; }; } // namespace core } // namespace vkb diff --git a/framework/core/command_buffer.h b/framework/core/command_buffer.h index e8680f0195..25c0530d39 100644 --- a/framework/core/command_buffer.h +++ b/framework/core/command_buffer.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2019-2023, Arm Limited and Contributors +/* Copyright (c) 2019-2024, Arm Limited and Contributors * * SPDX-License-Identifier: Apache-2.0 * @@ -47,7 +47,7 @@ struct LightingState; * @brief Helper class to manage and record a command buffer, building and * keeping track of pipeline state and resource bindings */ -class CommandBuffer : public core::VulkanResource +class CommandBuffer : public core::VulkanResource { public: enum class ResetMode diff --git a/framework/core/device.h b/framework/core/device.h index d4bb7b5903..151251bd9f 100644 --- a/framework/core/device.h +++ b/framework/core/device.h @@ -1,5 +1,5 @@ -/* Copyright (c) 2019-2023, Arm Limited and Contributors - * Copyright (c) 2019-2023, Sascha Willems +/* Copyright (c) 2019-2024, Arm Limited and Contributors + * Copyright (c) 2019-2024, Sascha Willems * * SPDX-License-Identifier: Apache-2.0 * @@ -50,7 +50,7 @@ struct DriverVersion uint16_t patch; }; -class Device : public core::VulkanResource +class Device : public core::VulkanResource { public: /** diff --git a/framework/core/hpp_allocated.h b/framework/core/hpp_allocated.h new file mode 100644 index 0000000000..767ddf76c8 --- /dev/null +++ b/framework/core/hpp_allocated.h @@ -0,0 +1,97 @@ +/* Copyright (c) 2021-2024, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2024, Bradley Austin Davis. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 the "License"; + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once + +#include "allocated.h" +#include "hpp_vulkan_resource.h" + +namespace vkb +{ +namespace core +{ + +template +struct HPPAllocatedBuilder : public AllocatedBuilder +{ + private: + using Parent = AllocatedBuilder; + + protected: + HPPAllocatedBuilder(const HPPAllocatedBuilder &other) = delete; + + HPPAllocatedBuilder(const CreateInfoType &create_info) : + Parent(create_info) + {} + + public: + BuilderType &with_vma_required_flags(const vk::MemoryPropertyFlags &flags) + { + Parent::alloc_create_info.requiredFlags = flags.operator VkMemoryPropertyFlags(); + return *static_cast(this); + } + + BuilderType &with_vma_preferred_flags(const vk::MemoryPropertyFlags &flags) + { + Parent::alloc_create_info.preferredFlags = flags.operator VkMemoryPropertyFlags(); + return *static_cast(this); + } + + BuilderType &with_sharing(vk::SharingMode sharingMode) + { + Parent::create_info.sharingMode = sharingMode; + return *static_cast(this); + } +}; + +template +class HPPAllocated : public Allocated> +{ + using Parent = Allocated>; + + public: + using Parent::get_handle; + using Parent::Parent; + using Parent::update; + + /** + * @brief Copies byte data into the buffer + * @param data The data to copy from + * @param offset The offset to start the copying into the mapped data + */ + template + vk::DeviceSize update(const vk::ArrayProxy &data, size_t offset = 0) + { + return Parent::update(static_cast(data.data()), data.size() * sizeof(T), offset); + } + + /** + * @brief Copies byte data into the buffer + * @param data The data to copy from + * @param count The number of array elements + * @param offset The offset to start the copying into the mapped data + */ + template + vk::DeviceSize update_from_array(const T *data, size_t count, size_t offset = 0) + { + return update(vk::ArrayProxy{data, count}, offset); + } +}; + +} // namespace core +} // namespace vkb diff --git a/framework/core/hpp_buffer.cpp b/framework/core/hpp_buffer.cpp index 6c8744fedd..0758425630 100644 --- a/framework/core/hpp_buffer.cpp +++ b/framework/core/hpp_buffer.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. +/* Copyright (c) 2023-2024, NVIDIA CORPORATION. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -23,79 +23,56 @@ namespace vkb { namespace core { -HPPBuffer::HPPBuffer(vkb::core::HPPDevice &device, - vk::DeviceSize size_, - vk::BufferUsageFlags buffer_usage, - VmaMemoryUsage memory_usage, - VmaAllocationCreateFlags flags, - const std::vector &queue_family_indices) : - HPPVulkanResource(nullptr, &device), size(size_) -{ -#ifdef VK_USE_PLATFORM_METAL_EXT - // Workaround for Mac (MoltenVK requires unmapping https://github.com/KhronosGroup/MoltenVK/issues/175) - // Force cleares the flag VMA_ALLOCATION_CREATE_MAPPED_BIT - flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT; -#endif - - persistent = (flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0; - vk::BufferCreateInfo buffer_create_info({}, size, buffer_usage); - if (queue_family_indices.size() >= 2) - { - buffer_create_info.sharingMode = vk::SharingMode::eConcurrent; - buffer_create_info.queueFamilyIndexCount = static_cast(queue_family_indices.size()); - buffer_create_info.pQueueFamilyIndices = queue_family_indices.data(); - } - - VmaAllocationCreateInfo memory_info{}; - memory_info.flags = flags; - memory_info.usage = memory_usage; - - VmaAllocationInfo allocation_info{}; - auto result = vmaCreateBuffer(device.get_memory_allocator(), - reinterpret_cast(&buffer_create_info), &memory_info, - reinterpret_cast(&get_handle()), &allocation, - &allocation_info); - - if (result != VK_SUCCESS) - { - throw VulkanException{result, "Cannot create HPPBuffer"}; - } - - memory = static_cast(allocation_info.deviceMemory); +HPPBuffer HPPBuffer::create_staging_buffer(HPPDevice &device, vk::DeviceSize size, const void *data) +{ + HPPBufferBuilder builder{size}; + builder + .with_usage(vk::BufferUsageFlagBits::eTransferSrc) + .with_vma_flags(VMA_ALLOCATION_CREATE_MAPPED_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT); - if (persistent) + HPPBuffer staging_buffer{device, builder}; + if (data != nullptr) { - mapped_data = static_cast(allocation_info.pMappedData); + staging_buffer.update(static_cast(data), size); } + return staging_buffer; } -HPPBuffer::HPPBuffer(HPPBuffer &&other) : - HPPVulkanResource{other.get_handle(), &other.get_device()}, - allocation(std::exchange(other.allocation, {})), - memory(std::exchange(other.memory, {})), - size(std::exchange(other.size, {})), - mapped_data(std::exchange(other.mapped_data, {})), - mapped(std::exchange(other.mapped, {})) +HPPBuffer::HPPBuffer(vkb::core::HPPDevice &device, + vk::DeviceSize size_, + vk::BufferUsageFlags buffer_usage, + VmaMemoryUsage memory_usage, + VmaAllocationCreateFlags flags, + const std::vector &queue_family_indices) : + HPPBuffer(device, + HPPBufferBuilder{size_} + .with_usage(buffer_usage) + .with_vma_flags(flags) + .with_vma_usage(memory_usage) + .with_queue_families(queue_family_indices) + .with_implicit_sharing_mode()) {} -HPPBuffer::~HPPBuffer() +HPPBuffer::HPPBuffer(vkb::core::HPPDevice &device, + HPPBufferBuilder const &builder) : + Parent{builder.alloc_create_info, nullptr, &device}, size(builder.create_info.size) { - if (get_handle() && (allocation != VK_NULL_HANDLE)) + get_handle() = create_buffer(builder.create_info.operator const VkBufferCreateInfo &()); + if (!builder.debug_name.empty()) { - unmap(); - vmaDestroyBuffer(get_device().get_memory_allocator(), static_cast(get_handle()), allocation); + set_debug_name(builder.debug_name); } } -VmaAllocation HPPBuffer::get_allocation() const -{ - return allocation; -} +HPPBuffer::HPPBuffer(HPPBuffer &&other) noexcept : + HPPAllocated{static_cast(other)}, + size(std::exchange(other.size, {})) +{} -vk::DeviceMemory HPPBuffer::get_memory() const +HPPBuffer::~HPPBuffer() { - return memory; + destroy_buffer(); } vk::DeviceSize HPPBuffer::get_size() const @@ -103,66 +80,10 @@ vk::DeviceSize HPPBuffer::get_size() const return size; } -const uint8_t *HPPBuffer::get_data() const -{ - return mapped_data; -} - -uint8_t *HPPBuffer::map() -{ - if (!mapped && !mapped_data) - { - VK_CHECK(vmaMapMemory(get_device().get_memory_allocator(), allocation, reinterpret_cast(&mapped_data))); - mapped = true; - } - return mapped_data; -} - -void HPPBuffer::unmap() -{ - if (mapped) - { - vmaUnmapMemory(get_device().get_memory_allocator(), allocation); - mapped_data = nullptr; - mapped = false; - } -} - -void HPPBuffer::flush() -{ - vmaFlushAllocation(get_device().get_memory_allocator(), allocation, 0, size); -} - -void HPPBuffer::update(const std::vector &data, size_t offset) -{ - update(data.data(), data.size(), offset); -} - uint64_t HPPBuffer::get_device_address() const { return get_device().get_handle().getBufferAddressKHR({get_handle()}); } -void HPPBuffer::update(void const *data, size_t size, size_t offset) -{ - update(reinterpret_cast(data), size, offset); -} - -void HPPBuffer::update(const uint8_t *data, const size_t size, const size_t offset) -{ - if (persistent) - { - std::copy(data, data + size, mapped_data + offset); - flush(); - } - else - { - map(); - std::copy(data, data + size, mapped_data + offset); - flush(); - unmap(); - } -} - } // namespace core } // namespace vkb diff --git a/framework/core/hpp_buffer.h b/framework/core/hpp_buffer.h index 4aa899c966..e0c490ffbc 100644 --- a/framework/core/hpp_buffer.h +++ b/framework/core/hpp_buffer.h @@ -17,18 +17,55 @@ #pragma once +#include "allocated.h" +#include "buffer.h" +#include "common/hpp_error.h" +#include "hpp_allocated.h" #include "hpp_vulkan_resource.h" - #include -#include namespace vkb { namespace core { -class HPPBuffer : public vkb::core::HPPVulkanResource + +struct HPPBufferBuilder : public HPPAllocatedBuilder +{ + private: + using Parent = HPPAllocatedBuilder; + + public: + HPPBufferBuilder(vk::DeviceSize size) : + HPPAllocatedBuilder(vk::BufferCreateInfo{{}, size}) + { + } + + HPPBufferBuilder &with_usage(vk::BufferUsageFlags usage) + { + create_info.usage = usage; + return *this; + } + + HPPBufferBuilder &with_flags(vk::BufferCreateFlags flags) + { + create_info.flags = flags; + return *this; + } +}; + +class HPPBuffer : public HPPAllocated { + using Parent = HPPAllocated; + public: + static HPPBuffer create_staging_buffer(HPPDevice &device, vk::DeviceSize size, const void *data); + + template + static HPPBuffer create_staging_buffer(HPPDevice &device, std::vector const &data) + { + return create_staging_buffer(device, data.size() * sizeof(T), data.data()); + } + /** * @brief Creates a buffer using VMA * @param device A valid Vulkan device @@ -38,25 +75,25 @@ class HPPBuffer : public vkb::core::HPPVulkanResource * @param flags The allocation create flags * @param queue_family_indices optional queue family indices */ - HPPBuffer(vkb::core::HPPDevice &device, + // [[deprecated("Use the HPPBufferBuilder ctor instead")]] + HPPBuffer(HPPDevice &device, vk::DeviceSize size, vk::BufferUsageFlags buffer_usage, VmaMemoryUsage memory_usage, VmaAllocationCreateFlags flags = VMA_ALLOCATION_CREATE_MAPPED_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT, const std::vector &queue_family_indices = {}); + HPPBuffer(vkb::core::HPPDevice &device, + HPPBufferBuilder const &builder); + HPPBuffer(const HPPBuffer &) = delete; - HPPBuffer(HPPBuffer &&other); + HPPBuffer(HPPBuffer &&other) noexcept; ~HPPBuffer(); HPPBuffer &operator=(const HPPBuffer &) = delete; HPPBuffer &operator=(HPPBuffer &&) = delete; - VmaAllocation get_allocation() const; - const uint8_t *get_data() const; - vk::DeviceMemory get_memory() const; - /** * @return Return the buffer's device address (note: requires that the buffer has been created with the VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT usage fla) */ @@ -67,69 +104,8 @@ class HPPBuffer : public vkb::core::HPPVulkanResource */ vk::DeviceSize get_size() const; - /** - * @brief Flushes memory if it is HOST_VISIBLE and not HOST_COHERENT - */ - void flush(); - - /** - * @brief Maps vulkan memory if it isn't already mapped to an host visible address - * @return Pointer to host visible memory - */ - uint8_t *map(); - - /** - * @brief Unmaps vulkan memory from the host visible address - */ - void unmap(); - - /** - * @brief Copies byte data into the buffer - * @param data The data to copy from - * @param size The amount of bytes to copy - * @param offset The offset to start the copying into the mapped data - */ - void update(const uint8_t *data, size_t size, size_t offset = 0); - - /** - * @brief Converts any non byte data into bytes and then updates the buffer - * @param data The data to copy from - * @param size The amount of bytes to copy - * @param offset The offset to start the copying into the mapped data - */ - void update(void const *data, size_t size, size_t offset = 0); - - /** - * @brief Copies a vector of bytes into the buffer - * @param data The data vector to upload - * @param offset The offset to start the copying into the mapped data - */ - void update(const std::vector &data, size_t offset = 0); - - template - void update(std::vector const &data, size_t offset = 0) - { - update(data.data(), data.size() * sizeof(T), offset); - } - - /** - * @brief Copies an object as byte data into the buffer - * @param object The object to convert into byte data - * @param offset The offset to start the copying into the mapped data - */ - template - void convert_and_update(const T &object, size_t offset = 0) - { - update(reinterpret_cast(&object), sizeof(T), offset); - } - private: - VmaAllocation allocation = VK_NULL_HANDLE; - vk::DeviceMemory memory = nullptr; - vk::DeviceSize size = 0; - uint8_t *mapped_data = nullptr; - bool persistent = false; // Whether the buffer is persistently mapped or not - bool mapped = false; // Whether the buffer has been mapped with vmaMapMemory + vk::DeviceSize size = 0; }; } // namespace core } // namespace vkb diff --git a/framework/core/hpp_device.cpp b/framework/core/hpp_device.cpp index 2e4c49b14d..1fea09e9b6 100644 --- a/framework/core/hpp_device.cpp +++ b/framework/core/hpp_device.cpp @@ -185,9 +185,10 @@ HPPDevice::HPPDevice(vkb::core::HPPPhysicalDevice &gpu, vma_vulkan_func.vkGetDeviceProcAddr = reinterpret_cast(get_handle().getProcAddr("vkGetDeviceProcAddr")); VmaAllocatorCreateInfo allocator_info{}; - allocator_info.physicalDevice = static_cast(gpu.get_handle()); - allocator_info.device = static_cast(get_handle()); - allocator_info.instance = static_cast(gpu.get_instance().get_handle()); + allocator_info.pVulkanFunctions = &vma_vulkan_func; + allocator_info.physicalDevice = static_cast(gpu.get_handle()); + allocator_info.device = static_cast(get_handle()); + allocator_info.instance = static_cast(gpu.get_instance().get_handle()); if (can_get_memory_requirements && has_dedicated_allocation) { @@ -219,8 +220,6 @@ HPPDevice::HPPDevice(vkb::core::HPPPhysicalDevice &gpu, allocator_info.flags |= VMA_ALLOCATOR_CREATE_AMD_DEVICE_COHERENT_MEMORY_BIT; } - allocator_info.pVulkanFunctions = &vma_vulkan_func; - VkResult result = vmaCreateAllocator(&allocator_info, &memory_allocator); if (result != VK_SUCCESS) diff --git a/framework/core/hpp_image.cpp b/framework/core/hpp_image.cpp index 5e6f27363f..1aa14f1467 100644 --- a/framework/core/hpp_image.cpp +++ b/framework/core/hpp_image.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. +/* Copyright (c) 2022-2024, NVIDIA CORPORATION. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -55,48 +55,27 @@ HPPImage::HPPImage(HPPDevice &device, vk::ImageCreateFlags flags, uint32_t num_queue_families, const uint32_t *queue_families) : - HPPVulkanResource{nullptr, &device}, - type{find_image_type(extent)}, - extent{extent}, - format{format}, - sample_count{sample_count}, - usage{image_usage}, - array_layer_count{array_layers}, - tiling{tiling} -{ - assert(0 < mip_levels && "HPPImage should have at least one level"); - assert(0 < array_layers && "HPPImage should have at least one layer"); - - subresource.mipLevel = mip_levels; - subresource.arrayLayer = array_layers; - - vk::ImageCreateInfo image_info(flags, type, format, extent, mip_levels, array_layers, sample_count, tiling, image_usage); - - if (num_queue_families != 0) - { - image_info.sharingMode = vk::SharingMode::eConcurrent; - image_info.queueFamilyIndexCount = num_queue_families; - image_info.pQueueFamilyIndices = queue_families; - } - - VmaAllocationCreateInfo memory_info{}; - memory_info.usage = memory_usage; - - if (image_usage & vk::ImageUsageFlagBits::eTransientAttachment) - { - memory_info.preferredFlags = VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT; - } - - auto result = vmaCreateImage(device.get_memory_allocator(), - reinterpret_cast(&image_info), - &memory_info, - const_cast(reinterpret_cast(&get_handle())), - &memory, - nullptr); - - if (result != VK_SUCCESS) + HPPImage{device, + HPPImageBuilder{extent} + .with_format(format) + .with_mip_levels(mip_levels) + .with_array_layers(array_layers) + .with_sample_count(sample_count) + .with_tiling(tiling) + .with_flags(flags) + .with_usage(image_usage) + .with_queue_families(num_queue_families, queue_families)} +{} + +HPPImage::HPPImage(HPPDevice &device, HPPImageBuilder const &builder) : + HPPAllocated{builder.alloc_create_info, nullptr, &device}, create_info{builder.create_info} +{ + get_handle() = create_image(create_info.operator const VkImageCreateInfo &()); + subresource.arrayLayer = create_info.arrayLayers; + subresource.mipLevel = create_info.mipLevels; + if (!builder.debug_name.empty()) { - throw VulkanException{result, "Cannot create HPPImage"}; + set_debug_name(builder.debug_name); } } @@ -106,25 +85,23 @@ HPPImage::HPPImage(HPPDevice &device, vk::Format format, vk::ImageUsageFlags image_usage, vk::SampleCountFlagBits sample_count) : - HPPVulkanResource{handle, &device}, type{find_image_type(extent)}, extent{extent}, format{format}, sample_count{sample_count}, usage{image_usage} + HPPAllocated{handle, &device} { - subresource.mipLevel = 1; - subresource.arrayLayer = 1; + create_info.samples = sample_count; + create_info.format = format; + create_info.extent = extent; + create_info.imageType = find_image_type(extent); + create_info.arrayLayers = 1; + create_info.mipLevels = 1; + subresource.mipLevel = 1; + subresource.arrayLayer = 1; } -HPPImage::HPPImage(HPPImage &&other) : - HPPVulkanResource{std::move(other)}, - memory(std::exchange(other.memory, {})), - type(std::exchange(other.type, {})), - extent(std::exchange(other.extent, {})), - format(std::exchange(other.format, {})), - sample_count(std::exchange(other.sample_count, {})), - usage(std::exchange(other.usage, {})), - tiling(std::exchange(other.tiling, {})), +HPPImage::HPPImage(HPPImage &&other) noexcept : + HPPAllocated{std::move(other)}, + create_info(std::exchange(other.create_info, {})), subresource(std::exchange(other.subresource, {})), - views(std::exchange(other.views, {})), - mapped_data(std::exchange(other.mapped_data, {})), - mapped(std::exchange(other.mapped, {})) + views(std::exchange(other.views, {})) { // Update image views references to this image to avoid dangling pointers for (auto &view : views) @@ -138,67 +115,47 @@ HPPImage::~HPPImage() if (get_handle() && memory) { unmap(); - vmaDestroyImage(get_device().get_memory_allocator(), static_cast(get_handle()), memory); + vmaDestroyImage(get_device().get_memory_allocator(), static_cast(get_handle()), allocation); } } -VmaAllocation HPPImage::get_memory() const -{ - return memory; -} - uint8_t *HPPImage::map() { - if (!mapped_data) - { - if (tiling != vk::ImageTiling::eLinear) - { - LOGW("Mapping image memory that is not linear"); - } - VK_CHECK(vmaMapMemory(get_device().get_memory_allocator(), memory, reinterpret_cast(&mapped_data))); - mapped = true; - } - return mapped_data; -} - -void HPPImage::unmap() -{ - if (mapped) + if (create_info.tiling != vk::ImageTiling::eLinear) { - vmaUnmapMemory(get_device().get_memory_allocator(), memory); - mapped_data = nullptr; - mapped = false; + LOGW("Mapping image memory that is not linear"); } + return Allocated::map(); } vk::ImageType HPPImage::get_type() const { - return type; + return create_info.imageType; } const vk::Extent3D &HPPImage::get_extent() const { - return extent; + return create_info.extent; } vk::Format HPPImage::get_format() const { - return format; + return create_info.format; } vk::SampleCountFlagBits HPPImage::get_sample_count() const { - return sample_count; + return create_info.samples; } vk::ImageUsageFlags HPPImage::get_usage() const { - return usage; + return create_info.usage; } vk::ImageTiling HPPImage::get_tiling() const { - return tiling; + return create_info.tiling; } vk::ImageSubresource HPPImage::get_subresource() const @@ -208,7 +165,7 @@ vk::ImageSubresource HPPImage::get_subresource() const uint32_t HPPImage::get_array_layer_count() const { - return array_layer_count; + return create_info.arrayLayers; } std::unordered_set &HPPImage::get_views() diff --git a/framework/core/hpp_image.h b/framework/core/hpp_image.h index 39277d6a1c..93708e9420 100644 --- a/framework/core/hpp_image.h +++ b/framework/core/hpp_image.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. +/* Copyright (c) 2021-2024, NVIDIA CORPORATION. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -18,8 +18,8 @@ #pragma once #include "core/hpp_vulkan_resource.h" +#include "hpp_allocated.h" #include -#include namespace vkb { @@ -28,7 +28,82 @@ namespace core class HPPDevice; class HPPImageView; -class HPPImage : public vkb::core::HPPVulkanResource +struct HPPImageBuilder : public HPPAllocatedBuilder +{ + private: + using Parent = HPPAllocatedBuilder; + + public: + HPPImageBuilder(vk::Extent3D const &extent) : + // Better reasonable defaults than vk::ImageCreateInfo default ctor + Parent(vk::ImageCreateInfo{{}, vk::ImageType::e2D, vk::Format::eR8G8B8A8Unorm, extent, 1, 1}) + { + } + + HPPImageBuilder(vk::Extent2D const &extent) : + HPPImageBuilder(vk::Extent3D{extent.width, extent.height, 1}) + { + } + + HPPImageBuilder &with_format(vk::Format format) + { + create_info.format = format; + return *this; + } + + HPPImageBuilder &with_image_type(vk::ImageType type) + { + create_info.imageType = type; + return *this; + } + + HPPImageBuilder &with_array_layers(uint32_t layers) + { + create_info.arrayLayers = layers; + return *this; + } + + HPPImageBuilder &with_mip_levels(uint32_t levels) + { + create_info.mipLevels = levels; + return *this; + } + + HPPImageBuilder &with_sample_count(vk::SampleCountFlagBits sample_count) + { + create_info.samples = sample_count; + return *this; + } + + HPPImageBuilder &with_tiling(vk::ImageTiling tiling) + { + create_info.tiling = tiling; + return *this; + } + + HPPImageBuilder &with_usage(vk::ImageUsageFlags usage) + { + create_info.usage = usage; + return *this; + } + + HPPImageBuilder &with_flags(vk::ImageCreateFlags flags) + { + create_info.flags = flags; + return *this; + } + + HPPImageBuilder &with_implicit_sharing_mode() + { + if (create_info.queueFamilyIndexCount != 0) + { + create_info.sharingMode = vk::SharingMode::eConcurrent; + } + return *this; + } +}; + +class HPPImage : public HPPAllocated { public: HPPImage(HPPDevice &device, @@ -38,11 +113,12 @@ class HPPImage : public vkb::core::HPPVulkanResource vk::ImageUsageFlags image_usage, vk::SampleCountFlagBits sample_count = vk::SampleCountFlagBits::e1); + // [[deprecated("Use the HPPImageBuilder ctor instead")]] HPPImage(HPPDevice &device, const vk::Extent3D &extent, vk::Format format, vk::ImageUsageFlags image_usage, - VmaMemoryUsage memory_usage, + VmaMemoryUsage memory_usage = VMA_MEMORY_USAGE_AUTO, vk::SampleCountFlagBits sample_count = vk::SampleCountFlagBits::e1, uint32_t mip_levels = 1, uint32_t array_layers = 1, @@ -51,29 +127,25 @@ class HPPImage : public vkb::core::HPPVulkanResource uint32_t num_queue_families = 0, const uint32_t *queue_families = nullptr); + HPPImage(HPPDevice &device, + HPPImageBuilder const &builder); + HPPImage(const HPPImage &) = delete; - HPPImage(HPPImage &&other); + HPPImage(HPPImage &&other) noexcept; - ~HPPImage() override; + ~HPPImage(); HPPImage &operator=(const HPPImage &) = delete; HPPImage &operator=(HPPImage &&) = delete; - VmaAllocation get_memory() const; - /** * @brief Maps vulkan memory to an host visible address * @return Pointer to host visible memory */ uint8_t *map(); - /** - * @brief Unmaps vulkan memory from the host visible address - */ - void unmap(); - vk::ImageType get_type() const; const vk::Extent3D &get_extent() const; vk::Format get_format() const; @@ -85,18 +157,9 @@ class HPPImage : public vkb::core::HPPVulkanResource std::unordered_set &get_views(); private: - VmaAllocation memory = VK_NULL_HANDLE; - vk::ImageType type; - vk::Extent3D extent; - vk::Format format; - vk::ImageUsageFlags usage; - vk::SampleCountFlagBits sample_count; - vk::ImageTiling tiling; + vk::ImageCreateInfo create_info; vk::ImageSubresource subresource; - uint32_t array_layer_count = 0; std::unordered_set views; /// HPPImage views referring to this image - uint8_t *mapped_data = nullptr; - bool mapped = false; /// Whether it was mapped with vmaMapMemory }; } // namespace core } // namespace vkb diff --git a/framework/core/image.cpp b/framework/core/image.cpp index 81878d06cf..af37330369 100644 --- a/framework/core/image.cpp +++ b/framework/core/image.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2019-2023, Arm Limited and Contributors +/* Copyright (c) 2019-2024, Arm Limited and Contributors * * SPDX-License-Identifier: Apache-2.0 * @@ -67,6 +67,7 @@ inline VkImageType find_image_type(VkExtent3D extent) namespace core { + Image::Image(Device const &device, const VkExtent3D &extent, VkFormat format, @@ -79,88 +80,53 @@ Image::Image(Device const &device, VkImageCreateFlags flags, uint32_t num_queue_families, const uint32_t *queue_families) : - VulkanResource{VK_NULL_HANDLE, &device}, - type{find_image_type(extent)}, - extent{extent}, - format{format}, - sample_count{sample_count}, - usage{image_usage}, - array_layer_count{array_layers}, - tiling{tiling} -{ - assert(mip_levels > 0 && "Image should have at least one level"); - assert(array_layers > 0 && "Image should have at least one layer"); - - subresource.mipLevel = mip_levels; - subresource.arrayLayer = array_layers; - - VkImageCreateInfo image_info{VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO}; - image_info.flags = flags; - image_info.imageType = type; - image_info.format = format; - image_info.extent = extent; - image_info.mipLevels = mip_levels; - image_info.arrayLayers = array_layers; - image_info.samples = sample_count; - image_info.tiling = tiling; - image_info.usage = image_usage; - - if (num_queue_families != 0) - { - image_info.sharingMode = VK_SHARING_MODE_CONCURRENT; - image_info.queueFamilyIndexCount = num_queue_families; - image_info.pQueueFamilyIndices = queue_families; - } - - VmaAllocationCreateInfo memory_info{}; - memory_info.usage = memory_usage; - - if (image_usage & VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT) - { - memory_info.preferredFlags = VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT; - } - - auto result = vmaCreateImage(device.get_memory_allocator(), - &image_info, &memory_info, - &handle, &memory, - nullptr); - - if (result != VK_SUCCESS) + // Pass through to the ImageBuilder ctor + Image(device, + ImageBuilder(extent) + .with_format(format) + .with_image_type(find_image_type(extent)) + .with_usage(image_usage) + .with_mip_levels(mip_levels) + .with_array_layers(array_layers) + .with_tiling(tiling) + .with_flags(flags) + .with_vma_usage(memory_usage) + .with_sample_count(sample_count) + .with_queue_families(num_queue_families, queue_families) + .with_implicit_sharing_mode()) +{ +} + +Image::Image(Device const &device, ImageBuilder const &builder) : + Allocated{builder.alloc_create_info, VK_NULL_HANDLE, &device}, create_info(builder.create_info) +{ + handle = create_image(create_info); + subresource.arrayLayer = create_info.arrayLayers; + subresource.mipLevel = create_info.mipLevels; + if (!builder.debug_name.empty()) { - throw VulkanException{result, "Cannot create Image"}; + set_debug_name(builder.debug_name); } } Image::Image(Device const &device, VkImage handle, const VkExtent3D &extent, VkFormat format, VkImageUsageFlags image_usage, VkSampleCountFlagBits sample_count) : - VulkanResource{handle, &device}, - type{find_image_type(extent)}, - extent{extent}, - format{format}, - sample_count{sample_count}, - usage{image_usage} -{ - subresource.mipLevel = 1; - subresource.arrayLayer = 1; + Allocated{handle, &device} +{ + create_info.extent = extent; + create_info.imageType = find_image_type(extent); + create_info.format = format; + create_info.samples = sample_count; + create_info.usage = image_usage; + subresource.arrayLayer = create_info.arrayLayers = 1; + subresource.mipLevel = create_info.mipLevels = 1; } -Image::Image(Image &&other) : - VulkanResource{std::move(other)}, - memory{other.memory}, - type{other.type}, - extent{other.extent}, - format{other.format}, - sample_count{other.sample_count}, - usage{other.usage}, - tiling{other.tiling}, - subresource{other.subresource}, - views(std::exchange(other.views, {})), - mapped_data{other.mapped_data}, - mapped{other.mapped} -{ - other.memory = VK_NULL_HANDLE; - other.mapped_data = nullptr; - other.mapped = false; - +Image::Image(Image &&other) noexcept : + Allocated{std::move(other)}, + create_info{std::exchange(other.create_info, {})}, + subresource{std::exchange(other.subresource, {})}, + views(std::exchange(other.views, {})) +{ // Update image views references to this image to avoid dangling pointers for (auto &view : views) { @@ -170,80 +136,47 @@ Image::Image(Image &&other) : Image::~Image() { - if (handle != VK_NULL_HANDLE && memory != VK_NULL_HANDLE) - { - unmap(); - vmaDestroyImage(device->get_memory_allocator(), handle, memory); - } -} - -VmaAllocation Image::get_memory() const -{ - return memory; -} - -uint8_t *Image::map() -{ - if (!mapped_data) - { - if (tiling != VK_IMAGE_TILING_LINEAR) - { - LOGW("Mapping image memory that is not linear"); - } - VK_CHECK(vmaMapMemory(device->get_memory_allocator(), memory, reinterpret_cast(&mapped_data))); - mapped = true; - } - return mapped_data; -} - -void Image::unmap() -{ - if (mapped) - { - vmaUnmapMemory(device->get_memory_allocator(), memory); - mapped_data = nullptr; - mapped = false; - } + destroy_image(); } VkImageType Image::get_type() const { - return type; + return create_info.imageType; } const VkExtent3D &Image::get_extent() const { - return extent; + return create_info.extent; } VkFormat Image::get_format() const { - return format; + return create_info.format; } VkSampleCountFlagBits Image::get_sample_count() const { - return sample_count; + return create_info.samples; } VkImageUsageFlags Image::get_usage() const { - return usage; + return create_info.usage; } VkImageTiling Image::get_tiling() const { - return tiling; + return create_info.tiling; } -VkImageSubresource Image::get_subresource() const +const VkImageSubresource &Image::get_subresource() const { return subresource; } uint32_t Image::get_array_layer_count() const { - return array_layer_count; + return create_info.arrayLayers; } std::unordered_set &Image::get_views() diff --git a/framework/core/image.h b/framework/core/image.h index 53af4ce668..24e4f74e7e 100644 --- a/framework/core/image.h +++ b/framework/core/image.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2019-2021, Arm Limited and Contributors +/* Copyright (c) 2019-2024, Arm Limited and Contributors * * SPDX-License-Identifier: Apache-2.0 * @@ -21,6 +21,7 @@ #include "common/helpers.h" #include "common/vk_common.h" +#include "core/allocated.h" #include "core/vulkan_resource.h" namespace vkb @@ -29,33 +30,114 @@ class Device; namespace core { + +struct ImageBuilder : public AllocatedBuilder +{ + ImageBuilder(VkExtent3D const &extent) : + AllocatedBuilder(VkImageCreateInfo{VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, nullptr}) + { + create_info.extent = extent; + create_info.arrayLayers = 1; + create_info.mipLevels = 1; + create_info.imageType = VK_IMAGE_TYPE_2D; + create_info.format = VK_FORMAT_R8G8B8A8_UNORM; + } + + ImageBuilder &with_format(VkFormat format) + { + create_info.format = format; + return *this; + } + + ImageBuilder &with_usage(VkImageUsageFlags usage) + { + create_info.usage = usage; + return *this; + } + + ImageBuilder &with_sharing_mode(VkSharingMode sharing_mode) + { + create_info.sharingMode = sharing_mode; + return *this; + } + + ImageBuilder &with_flags(VkImageCreateFlags flags) + { + create_info.flags = flags; + return *this; + } + + ImageBuilder &with_image_type(VkImageType type) + { + create_info.imageType = type; + return *this; + } + + ImageBuilder &with_array_layers(uint32_t layers) + { + create_info.arrayLayers = layers; + return *this; + } + + ImageBuilder &with_mip_levels(uint32_t levels) + { + create_info.mipLevels = levels; + return *this; + } + + ImageBuilder &with_sample_count(VkSampleCountFlagBits sample_count) + { + create_info.samples = sample_count; + return *this; + } + + ImageBuilder &with_tiling(VkImageTiling tiling) + { + create_info.tiling = tiling; + return *this; + } + + ImageBuilder &with_implicit_sharing_mode() + { + if (create_info.queueFamilyIndexCount != 0) + { + create_info.sharingMode = VK_SHARING_MODE_CONCURRENT; + } + return *this; + } +}; + class ImageView; -class Image : public VulkanResource +class Image : public vkb::core::Allocated { + VkImageCreateInfo create_info; + public: - Image(Device const & device, + Image(Device const &device, VkImage handle, - const VkExtent3D & extent, + const VkExtent3D &extent, VkFormat format, VkImageUsageFlags image_usage, VkSampleCountFlagBits sample_count = VK_SAMPLE_COUNT_1_BIT); - Image(Device const & device, - const VkExtent3D & extent, + Image(Device const &device, + const VkExtent3D &extent, VkFormat format, VkImageUsageFlags image_usage, - VmaMemoryUsage memory_usage, + VmaMemoryUsage memory_usage = VMA_MEMORY_USAGE_AUTO, VkSampleCountFlagBits sample_count = VK_SAMPLE_COUNT_1_BIT, uint32_t mip_levels = 1, uint32_t array_layers = 1, VkImageTiling tiling = VK_IMAGE_TILING_OPTIMAL, VkImageCreateFlags flags = 0, uint32_t num_queue_families = 0, - const uint32_t * queue_families = nullptr); + const uint32_t *queue_families = nullptr); + Image(Device const &device, + ImageBuilder const &builder); Image(const Image &) = delete; - Image(Image &&other); + Image(Image &&other) noexcept; ~Image() override; @@ -63,19 +145,6 @@ class Image : public VulkanResource Image &operator=(Image &&) = delete; - VmaAllocation get_memory() const; - - /** - * @brief Maps vulkan memory to an host visible address - * @return Pointer to host visible memory - */ - uint8_t *map(); - - /** - * @brief Unmaps vulkan memory from the host visible address - */ - void unmap(); - VkImageType get_type() const; const VkExtent3D &get_extent() const; @@ -88,38 +157,16 @@ class Image : public VulkanResource VkImageTiling get_tiling() const; - VkImageSubresource get_subresource() const; + const VkImageSubresource &get_subresource() const; uint32_t get_array_layer_count() const; std::unordered_set &get_views(); private: - VmaAllocation memory{VK_NULL_HANDLE}; - - VkImageType type{}; - - VkExtent3D extent{}; - - VkFormat format{}; - - VkImageUsageFlags usage{}; - - VkSampleCountFlagBits sample_count{}; - - VkImageTiling tiling{}; - - VkImageSubresource subresource{}; - - uint32_t array_layer_count{0}; - /// Image views referring to this image std::unordered_set views; - - uint8_t *mapped_data{nullptr}; - - /// Whether it was mapped with vmaMapMemory - bool mapped{false}; + VkImageSubresource subresource{}; }; } // namespace core } // namespace vkb diff --git a/framework/core/image_view.h b/framework/core/image_view.h index e6eb593eda..9b5a10e9fe 100644 --- a/framework/core/image_view.h +++ b/framework/core/image_view.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2019-2021, Arm Limited and Contributors +/* Copyright (c) 2019-2024, Arm Limited and Contributors * * SPDX-License-Identifier: Apache-2.0 * @@ -26,7 +26,7 @@ namespace vkb { namespace core { -class ImageView : public VulkanResource +class ImageView : public VulkanResource { public: ImageView(Image &image, VkImageViewType view_type, VkFormat format = VK_FORMAT_UNDEFINED, diff --git a/framework/core/render_pass.h b/framework/core/render_pass.h index f83290d50d..8c60321acb 100644 --- a/framework/core/render_pass.h +++ b/framework/core/render_pass.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2019-2021, Arm Limited and Contributors +/* Copyright (c) 2019-2024, Arm Limited and Contributors * * SPDX-License-Identifier: Apache-2.0 * @@ -43,13 +43,13 @@ struct SubpassInfo std::string debug_name; }; -class RenderPass : public core::VulkanResource +class RenderPass : public core::VulkanResource { public: - RenderPass(Device & device, - const std::vector & attachments, + RenderPass(Device &device, + const std::vector &attachments, const std::vector &load_store_infos, - const std::vector & subpasses); + const std::vector &subpasses); RenderPass(const RenderPass &) = delete; diff --git a/framework/core/sampler.h b/framework/core/sampler.h index e79c253954..66a099d64c 100644 --- a/framework/core/sampler.h +++ b/framework/core/sampler.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2019-2021, Arm Limited and Contributors +/* Copyright (c) 2019-2024, Arm Limited and Contributors * * SPDX-License-Identifier: Apache-2.0 * @@ -30,7 +30,7 @@ namespace core /** * @brief Represents a Vulkan Sampler */ -class Sampler : public VulkanResource +class Sampler : public VulkanResource { public: /** diff --git a/framework/core/shader_binding_table.h b/framework/core/shader_binding_table.h index c822d2e931..023b5a5954 100644 --- a/framework/core/shader_binding_table.h +++ b/framework/core/shader_binding_table.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2021, Sascha Willems +/* Copyright (c) 2021-2024, Sascha Willems * * SPDX-License-Identifier: Apache-2.0 * @@ -39,10 +39,10 @@ class ShaderBindingTable * @param handle_size_aligned Aligned shader group handle size * @param memory_usage The memory usage of the shader binding table */ - ShaderBindingTable(Device & device, + ShaderBindingTable(Device &device, uint32_t handle_count, VkDeviceSize handle_size_aligned, - VmaMemoryUsage memory_usage = VMA_MEMORY_USAGE_CPU_TO_GPU); + VmaMemoryUsage memory_usage = VMA_MEMORY_USAGE_AUTO); ~ShaderBindingTable(); diff --git a/framework/core/vulkan_resource.h b/framework/core/vulkan_resource.h index e20f4b6a43..8d8b06a632 100644 --- a/framework/core/vulkan_resource.h +++ b/framework/core/vulkan_resource.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2021, Arm Limited and Contributors +/* Copyright (c) 2021-2024, Arm Limited and Contributors * * SPDX-License-Identifier: Apache-2.0 * @@ -33,11 +33,60 @@ namespace detail void set_debug_name(const Device *device, VkObjectType object_type, uint64_t handle, const char *debug_name); } +template +constexpr VkObjectType get_object_type(const HandleType &handle) +{ + throw std::runtime_error("Unknown handle type"); + return static_cast(-1); +} + +template <> +constexpr VkObjectType get_object_type(const VkImage &handle) +{ + return VK_OBJECT_TYPE_IMAGE; +} + +template <> +constexpr VkObjectType get_object_type(const VkImageView &handle) +{ + return VK_OBJECT_TYPE_IMAGE_VIEW; +} + +template <> +constexpr VkObjectType get_object_type(const VkRenderPass &handle) +{ + return VK_OBJECT_TYPE_RENDER_PASS; +} + +template <> +constexpr VkObjectType get_object_type(const VkSampler &handle) +{ + return VK_OBJECT_TYPE_SAMPLER; +} + +template <> +constexpr VkObjectType get_object_type(const VkBuffer &handle) +{ + return VK_OBJECT_TYPE_BUFFER; +} + +template <> +constexpr VkObjectType get_object_type(const VkDevice &handle) +{ + return VK_OBJECT_TYPE_DEVICE; +} + +template <> +constexpr VkObjectType get_object_type(const VkCommandBuffer &handle) +{ + return VK_OBJECT_TYPE_COMMAND_BUFFER; +} + /// Inherit this for any Vulkan object with a handle of type `THandle`. /// /// This allows the derived class to store a Vulkan handle, and also a pointer to the parent Device. /// It also allow for adding debug data to any Vulkan object. -template +template class VulkanResource { public: @@ -46,7 +95,7 @@ class VulkanResource { } - VulkanResource(const VulkanResource &) = delete; + VulkanResource(const VulkanResource &) = delete; VulkanResource &operator=(const VulkanResource &) = delete; VulkanResource(VulkanResource &&other) : @@ -70,9 +119,9 @@ class VulkanResource virtual ~VulkanResource() = default; - inline VkObjectType get_object_type() const + constexpr VkObjectType get_object_type() const { - return OBJECT_TYPE; + return vkb::core::get_object_type(handle); } inline Device &get_device() const @@ -104,12 +153,12 @@ class VulkanResource inline void set_debug_name(const std::string &name) { debug_name = name; - detail::set_debug_name(device, OBJECT_TYPE, get_handle_u64(), debug_name.c_str()); + detail::set_debug_name(device, get_object_type(), get_handle_u64(), debug_name.c_str()); } protected: THandle handle; - Device * device; + Device *device; std::string debug_name; }; diff --git a/framework/gltf_loader.cpp b/framework/gltf_loader.cpp index 6e6eddc27d..6765c4d9bf 100644 --- a/framework/gltf_loader.cpp +++ b/framework/gltf_loader.cpp @@ -1,5 +1,5 @@ -/* Copyright (c) 2018-2023, Arm Limited and Contributors - * Copyright (c) 2019-2023, Sascha Willems +/* Copyright (c) 2018-2024, Arm Limited and Contributors + * Copyright (c) 2019-2024, Sascha Willems * * SPDX-License-Identifier: Apache-2.0 * @@ -349,7 +349,9 @@ inline void prepare_meshlets(std::vector &meshlets, std::unique_ptr(index_data.data()) + i); if (vertices.insert(meshlet.indices[meshlet.index_count]).second) + { ++meshlet.vertex_count; + } meshlet.index_count++; triangle_check = triangle_check < 3 ? ++triangle_check : 1; @@ -358,7 +360,9 @@ inline void prepare_meshlets(std::vector &meshlets, std::unique_ptrvertex_indices - 1) { if (i == submesh->vertex_indices - 1) + { assert(triangle_check == 3); + } uint32_t counter = 0; for (auto v : vertices) @@ -587,15 +591,10 @@ sg::Scene GLTFLoader::load_scene(int scene_index) auto &image = image_components[image_index]; - core::Buffer stage_buffer{device, - image->get_data().size(), - VK_BUFFER_USAGE_TRANSFER_SRC_BIT, - VMA_MEMORY_USAGE_CPU_ONLY}; + core::Buffer stage_buffer = core::Buffer::create_staging_buffer(device, image->get_data()); batch_size += image->get_data().size(); - stage_buffer.update(image->get_data()); - upload_image_to_gpu(command_buffer, stage_buffer, *image); transient_buffers.push_back(std::move(stage_buffer)); @@ -735,6 +734,7 @@ sg::Scene GLTFLoader::load_scene(int scene_index) submesh->vertices_count = to_u32(model.accessors[attribute.second].count); } + // FIXME this is low performance as it renders from a host visible buffer core::Buffer buffer{device, vertex_data.size(), VK_BUFFER_USAGE_VERTEX_BUFFER_BIT, @@ -1146,12 +1146,7 @@ std::unique_ptr GLTFLoader::load_model(uint32_t index, bool storage aligned_vertex_data.push_back(vert); } - core::Buffer stage_buffer{device, - aligned_vertex_data.size() * sizeof(AlignedVertex), - VK_BUFFER_USAGE_TRANSFER_SRC_BIT, - VMA_MEMORY_USAGE_CPU_ONLY}; - - stage_buffer.update(aligned_vertex_data.data(), aligned_vertex_data.size() * sizeof(AlignedVertex)); + core::Buffer stage_buffer = core::Buffer::create_staging_buffer(device, aligned_vertex_data); core::Buffer buffer{device, aligned_vertex_data.size() * sizeof(AlignedVertex), @@ -1179,12 +1174,7 @@ std::unique_ptr GLTFLoader::load_model(uint32_t index, bool storage vertex_data.push_back(vert); } - core::Buffer stage_buffer{device, - vertex_data.size() * sizeof(Vertex), - VK_BUFFER_USAGE_TRANSFER_SRC_BIT, - VMA_MEMORY_USAGE_CPU_ONLY}; - - stage_buffer.update(vertex_data.data(), vertex_data.size() * sizeof(Vertex)); + core::Buffer stage_buffer = core::Buffer::create_staging_buffer(device, vertex_data); core::Buffer buffer{device, vertex_data.size() * sizeof(Vertex), @@ -1239,14 +1229,9 @@ std::unique_ptr GLTFLoader::load_model(uint32_t index, bool storage prepare_meshlets(meshlets, submesh, index_data); // vertex_indices and index_buffer are used for meshlets now - submesh->vertex_indices = (uint32_t) meshlets.size(); + submesh->vertex_indices = static_cast(meshlets.size()); - core::Buffer stage_buffer{device, - meshlets.size() * sizeof(Meshlet), - VK_BUFFER_USAGE_TRANSFER_SRC_BIT, - VMA_MEMORY_USAGE_CPU_ONLY}; - - stage_buffer.update(meshlets.data(), meshlets.size() * sizeof(Meshlet)); + core::Buffer stage_buffer = core::Buffer::create_staging_buffer(device, meshlets); submesh->index_buffer = std::make_unique(device, meshlets.size() * sizeof(Meshlet), @@ -1259,12 +1244,7 @@ std::unique_ptr GLTFLoader::load_model(uint32_t index, bool storage } else { - core::Buffer stage_buffer{device, - index_data.size(), - VK_BUFFER_USAGE_TRANSFER_SRC_BIT, - VMA_MEMORY_USAGE_CPU_ONLY}; - - stage_buffer.update(index_data); + core::Buffer stage_buffer = core::Buffer::create_staging_buffer(device, index_data); submesh->index_buffer = std::make_unique(device, index_data.size(), diff --git a/framework/gui.cpp b/framework/gui.cpp index 1c9b687778..bce5e83786 100644 --- a/framework/gui.cpp +++ b/framework/gui.cpp @@ -1,5 +1,5 @@ -/* Copyright (c) 2018-2023, Arm Limited and Contributors - * Copyright (c) 2019-2023, Sascha Willems +/* Copyright (c) 2018-2024, Arm Limited and Contributors + * Copyright (c) 2019-2024, Sascha Willems * * SPDX-License-Identifier: Apache-2.0 * @@ -49,10 +49,10 @@ namespace vkb { namespace { -void upload_draw_data(ImDrawData *draw_data, const uint8_t *vertex_data, const uint8_t *index_data) +void upload_draw_data(const ImDrawData *draw_data, uint8_t *vertex_data, uint8_t *index_data) { - ImDrawVert *vtx_dst = (ImDrawVert *) vertex_data; - ImDrawIdx *idx_dst = (ImDrawIdx *) index_data; + ImDrawVert *vtx_dst = reinterpret_cast(vertex_data); + ImDrawIdx *idx_dst = reinterpret_cast(index_data); for (int n = 0; n < draw_data->CmdListsCount; n++) { @@ -177,8 +177,7 @@ Gui::Gui(VulkanSample &sample_, const Window &window, const Stats *stats, // Upload font data into the vulkan image memory { - core::Buffer stage_buffer{device, upload_size, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, VMA_MEMORY_USAGE_CPU_ONLY, 0}; - stage_buffer.update({font_data, font_data + upload_size}); + core::Buffer stage_buffer = core::Buffer::create_staging_buffer(device, upload_size, font_data); auto &command_buffer = device.request_command_buffer(); @@ -1141,7 +1140,9 @@ bool Drawer::radio_button(const char *caption, int32_t *selectedOption, const in { bool res = ImGui::RadioButton(caption, selectedOption, elementOption); if (res) + { dirty = true; + } return res; } diff --git a/framework/hpp_api_vulkan_sample.cpp b/framework/hpp_api_vulkan_sample.cpp index a16212b1d1..cdd1b23528 100644 --- a/framework/hpp_api_vulkan_sample.cpp +++ b/framework/hpp_api_vulkan_sample.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. +/* Copyright (c) 2021-2024, NVIDIA CORPORATION. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -838,9 +838,7 @@ HPPTexture HPPApiVulkanSample::load_texture(const std::string &file, vkb::scene_ vk::CommandBuffer command_buffer = get_device()->create_command_buffer(vk::CommandBufferLevel::ePrimary, true); - vkb::core::HPPBuffer stage_buffer{*get_device(), texture.image->get_data().size(), vk::BufferUsageFlagBits::eTransferSrc, VMA_MEMORY_USAGE_CPU_ONLY}; - - stage_buffer.update(texture.image->get_data()); + vkb::core::HPPBuffer stage_buffer = vkb::core::HPPBuffer::create_staging_buffer(*get_device(), texture.image->get_data()); // Setup buffer copy regions for each mip level std::vector bufferCopyRegions; @@ -898,9 +896,7 @@ HPPTexture HPPApiVulkanSample::load_texture_array(const std::string &file, vkb:: vk::CommandBuffer command_buffer = get_device()->create_command_buffer(vk::CommandBufferLevel::ePrimary, true); - vkb::core::HPPBuffer stage_buffer{*get_device(), texture.image->get_data().size(), vk::BufferUsageFlagBits::eTransferSrc, VMA_MEMORY_USAGE_CPU_ONLY}; - - stage_buffer.update(texture.image->get_data()); + vkb::core::HPPBuffer stage_buffer = vkb::core::HPPBuffer::create_staging_buffer(*get_device(), texture.image->get_data()); // Setup buffer copy regions for each mip level std::vector buffer_copy_regions; @@ -964,9 +960,7 @@ HPPTexture HPPApiVulkanSample::load_texture_cubemap(const std::string &file, vkb vk::CommandBuffer command_buffer = get_device()->create_command_buffer(vk::CommandBufferLevel::ePrimary, true); - vkb::core::HPPBuffer stage_buffer{*get_device(), texture.image->get_data().size(), vk::BufferUsageFlagBits::eTransferSrc, VMA_MEMORY_USAGE_CPU_ONLY}; - - stage_buffer.update(texture.image->get_data()); + vkb::core::HPPBuffer stage_buffer = vkb::core::HPPBuffer::create_staging_buffer(*get_device(), texture.image->get_data()); // Setup buffer copy regions for each mip level std::vector buffer_copy_regions; diff --git a/framework/hpp_buffer_pool.h b/framework/hpp_buffer_pool.h index ed1bf17d97..6a186ee9c1 100644 --- a/framework/hpp_buffer_pool.h +++ b/framework/hpp_buffer_pool.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. +/* Copyright (c) 2023-2024, NVIDIA CORPORATION. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -80,7 +80,7 @@ class HPPBufferPool : private vkb::BufferPool using vkb::BufferPool::reset; HPPBufferPool( - vkb::core::HPPDevice &device, vk::DeviceSize block_size, vk::BufferUsageFlags usage, VmaMemoryUsage memory_usage = VMA_MEMORY_USAGE_CPU_TO_GPU) : + vkb::core::HPPDevice &device, vk::DeviceSize block_size, vk::BufferUsageFlags usage, VmaMemoryUsage memory_usage = VMA_MEMORY_USAGE_AUTO) : vkb::BufferPool( reinterpret_cast(device), static_cast(block_size), static_cast(usage), memory_usage) { diff --git a/framework/hpp_gui.cpp b/framework/hpp_gui.cpp index f518576626..626773e941 100644 --- a/framework/hpp_gui.cpp +++ b/framework/hpp_gui.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. +/* Copyright (c) 2023-2024, NVIDIA CORPORATION. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -26,10 +26,10 @@ namespace vkb { namespace { -void upload_draw_data(const ImDrawData *draw_data, const uint8_t *vertex_data, const uint8_t *index_data) +void upload_draw_data(const ImDrawData *draw_data, uint8_t *vertex_data, uint8_t *index_data) { - ImDrawVert *vtx_dst = (ImDrawVert *) vertex_data; - ImDrawIdx *idx_dst = (ImDrawIdx *) index_data; + ImDrawVert *vtx_dst = reinterpret_cast(vertex_data); + ImDrawIdx *idx_dst = reinterpret_cast(index_data); for (int n = 0; n < draw_data->CmdListsCount; n++) { @@ -133,9 +133,10 @@ HPPGui::HPPGui(HPPVulkanSample &sample_, const vkb::Window &window, const vkb::s // Create target image for copy vk::Extent3D font_extent(to_u32(tex_width), to_u32(tex_height), 1u); - font_image = std::make_unique(device, font_extent, vk::Format::eR8G8B8A8Unorm, - vk::ImageUsageFlagBits::eSampled | vk::ImageUsageFlagBits::eTransferDst, - VMA_MEMORY_USAGE_GPU_ONLY); + vkb::core::HPPImageBuilder image_builder(font_extent); + image_builder.with_format(vk::Format::eR8G8B8A8Unorm); + image_builder.with_usage(vk::ImageUsageFlagBits::eSampled | vk::ImageUsageFlagBits::eTransferDst); + font_image = std::make_unique(device, image_builder); font_image->set_debug_name("GUI font image"); font_image_view = std::make_unique(*font_image, vk::ImageViewType::e2D); @@ -143,8 +144,7 @@ HPPGui::HPPGui(HPPVulkanSample &sample_, const vkb::Window &window, const vkb::s // Upload font data into the vulkan image memory { - vkb::core::HPPBuffer stage_buffer(device, upload_size, vk::BufferUsageFlagBits::eTransferSrc, VMA_MEMORY_USAGE_CPU_ONLY, 0); - stage_buffer.update({font_data, font_data + upload_size}); + vkb::core::HPPBuffer stage_buffer = vkb::core::HPPBuffer::create_staging_buffer(device, upload_size, font_data); auto &command_buffer = device.get_command_pool().request_command_buffer(); @@ -225,10 +225,14 @@ HPPGui::HPPGui(HPPVulkanSample &sample_, const vkb::Window &window, const vkb::s if (explicit_update) { - vertex_buffer = std::make_unique(sample.get_render_context().get_device(), 1, vk::BufferUsageFlagBits::eVertexBuffer, VMA_MEMORY_USAGE_GPU_TO_CPU); + vkb::core::HPPBufferBuilder vertex_buffer_builder(1); + vertex_buffer_builder.with_usage(vk::BufferUsageFlagBits::eVertexBuffer).with_vma_usage(VMA_MEMORY_USAGE_GPU_TO_CPU); + vertex_buffer = std::make_unique(sample.get_render_context().get_device(), vertex_buffer_builder); vertex_buffer->set_debug_name("GUI vertex buffer"); - index_buffer = std::make_unique(sample.get_render_context().get_device(), 1, vk::BufferUsageFlagBits::eIndexBuffer, VMA_MEMORY_USAGE_GPU_TO_CPU); + vkb::core::HPPBufferBuilder index_buffer_builder(1); + index_buffer_builder.with_usage(vk::BufferUsageFlagBits::eIndexBuffer).with_vma_usage(VMA_MEMORY_USAGE_GPU_TO_CPU); + index_buffer = std::make_unique(sample.get_render_context().get_device(), index_buffer_builder); index_buffer->set_debug_name("GUI index buffer"); } } @@ -691,7 +695,9 @@ bool HPPGui::is_debug_view_active() const HPPGui::StatsView::StatsView(const vkb::stats::HPPStats *stats) { if (stats == nullptr) + { return; + } // Request graph data information for each stat and record it in graph_map const std::set &indices = stats->get_requested_stats(); @@ -1061,7 +1067,7 @@ bool HPPDrawer::checkbox(const std::string &caption, int32_t *value) if (res) { dirty = true; - }; + } return res; } @@ -1069,8 +1075,9 @@ bool HPPDrawer::radio_button(const char *caption, int32_t *selectedOption, const { bool res = ImGui::RadioButton(caption, selectedOption, elementOption); if (res) + { dirty = true; - + } return res; } @@ -1080,7 +1087,7 @@ bool HPPDrawer::input_float(const std::string &caption, float *value, float step if (res) { dirty = true; - }; + } return res; } diff --git a/framework/rendering/hpp_render_context.cpp b/framework/rendering/hpp_render_context.cpp index 8b72c1216b..ed44c32855 100644 --- a/framework/rendering/hpp_render_context.cpp +++ b/framework/rendering/hpp_render_context.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. +/* Copyright (c) 2023-2024, NVIDIA CORPORATION. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -70,13 +70,12 @@ void HPPRenderContext::prepare(size_t thread_count, vkb::rendering::HPPRenderTar { // Otherwise, create a single RenderFrame swapchain = nullptr; + // We can use any format here that we like + core::HPPImageBuilder builder(surface_extent); + builder.with_format(DEFAULT_VK_FORMAT); + builder.with_usage(vk::ImageUsageFlagBits::eColorAttachment | vk::ImageUsageFlagBits::eTransferSrc); - auto color_image = vkb::core::HPPImage{device, - vk::Extent3D{surface_extent.width, surface_extent.height, 1}, - DEFAULT_VK_FORMAT, // We can use any format here that we like - vk::ImageUsageFlagBits::eColorAttachment | vk::ImageUsageFlagBits::eTransferSrc, - VMA_MEMORY_USAGE_GPU_ONLY}; - + auto color_image = vkb::core::HPPImage{device, builder}; auto render_target = create_render_target_func(std::move(color_image)); frames.emplace_back(std::make_unique(device, std::move(render_target), thread_count)); } diff --git a/framework/rendering/hpp_render_context.h b/framework/rendering/hpp_render_context.h index 8fb312c24f..296a58bd07 100644 --- a/framework/rendering/hpp_render_context.h +++ b/framework/rendering/hpp_render_context.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. +/* Copyright (c) 2022-2024, NVIDIA CORPORATION. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -196,7 +196,7 @@ class HPPRenderContext /** * @brief Handles surface changes, only applicable if the render_context makes use of a swapchain */ - virtual bool handle_surface_changes(bool force_update = false); + bool handle_surface_changes(bool force_update = false); /** * @brief Returns the WSI acquire semaphore. Only to be used in very special circumstances. diff --git a/framework/rendering/hpp_render_target.cpp b/framework/rendering/hpp_render_target.cpp index 8791312d51..cbf2d20ec8 100644 --- a/framework/rendering/hpp_render_target.cpp +++ b/framework/rendering/hpp_render_target.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. +/* Copyright (c) 2022-2024, NVIDIA CORPORATION. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -26,10 +26,10 @@ namespace rendering const HPPRenderTarget::CreateFunc HPPRenderTarget::DEFAULT_CREATE_FUNC = [](core::HPPImage &&swapchain_image) -> std::unique_ptr { vk::Format depth_format = common::get_suitable_depth_format(swapchain_image.get_device().get_gpu().get_handle()); - core::HPPImage depth_image{swapchain_image.get_device(), swapchain_image.get_extent(), - depth_format, - vk::ImageUsageFlagBits::eDepthStencilAttachment | vk::ImageUsageFlagBits::eTransientAttachment, - VMA_MEMORY_USAGE_GPU_ONLY}; + core::HPPImageBuilder builder(swapchain_image.get_extent()); + builder.with_format(depth_format); + builder.with_usage(vk::ImageUsageFlagBits::eDepthStencilAttachment | vk::ImageUsageFlagBits::eTransientAttachment); + core::HPPImage depth_image{swapchain_image.get_device(), builder}; std::vector images; images.push_back(std::move(swapchain_image)); diff --git a/framework/rendering/render_context.h b/framework/rendering/render_context.h index 44798653d4..c06c3a8f24 100644 --- a/framework/rendering/render_context.h +++ b/framework/rendering/render_context.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2019-2023, Arm Limited and Contributors +/* Copyright (c) 2019-2024, Arm Limited and Contributors * * SPDX-License-Identifier: Apache-2.0 * @@ -217,7 +217,7 @@ class RenderContext /** * @brief Handles surface changes, only applicable if the render_context makes use of a swapchain */ - virtual bool handle_surface_changes(bool force_update = false); + bool handle_surface_changes(bool force_update = false); /** * @brief Returns the WSI acquire semaphore. Only to be used in very special circumstances. diff --git a/samples/api/compute_nbody/compute_nbody.cpp b/samples/api/compute_nbody/compute_nbody.cpp index 659b901e82..734f634ba7 100644 --- a/samples/api/compute_nbody/compute_nbody.cpp +++ b/samples/api/compute_nbody/compute_nbody.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2019-2023, Sascha Willems +/* Copyright (c) 2019-2024, Sascha Willems * * SPDX-License-Identifier: Apache-2.0 * @@ -322,8 +322,7 @@ void ComputeNBody::prepare_storage_buffers() // Staging // SSBO won't be changed on the host after upload so copy to device local memory - vkb::core::Buffer staging_buffer{get_device(), storage_buffer_size, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, VMA_MEMORY_USAGE_CPU_ONLY}; - staging_buffer.update(particle_buffer.data(), storage_buffer_size); + vkb::core::Buffer staging_buffer = vkb::core::Buffer::create_staging_buffer(get_device(), particle_buffer); compute.storage_buffer = std::make_unique(get_device(), storage_buffer_size, diff --git a/samples/api/hpp_compute_nbody/hpp_compute_nbody.cpp b/samples/api/hpp_compute_nbody/hpp_compute_nbody.cpp index 6e1986afdb..6062635708 100644 --- a/samples/api/hpp_compute_nbody/hpp_compute_nbody.cpp +++ b/samples/api/hpp_compute_nbody/hpp_compute_nbody.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. +/* Copyright (c) 2022-2024, NVIDIA CORPORATION. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -371,7 +371,7 @@ void HPPComputeNBody::initializeCamera() camera.type = vkb::CameraType::LookAt; // Note: Using reversed depth-buffer for increased precision, so Z-Near and Z-Far are flipped - camera.set_perspective(60.0f, (float) extent.width / (float) extent.height, 512.0f, 0.1f); + camera.set_perspective(60.0f, static_cast(extent.width) / static_cast(extent.height), 512.0f, 0.1f); camera.set_rotation(glm::vec3(-26.0f, 75.0f, 0.0f)); camera.set_translation(glm::vec3(0.0f, 0.0f, -14.0f)); camera.translation_speed = 2.5f; @@ -513,7 +513,7 @@ void HPPComputeNBody::prepare_compute_storage_buffers() // Initial particle positions std::vector particle_buffer(compute.ubo.particle_count); - std::default_random_engine rnd_engine(lock_simulation_speed ? 0 : (unsigned) time(nullptr)); + std::default_random_engine rnd_engine(lock_simulation_speed ? 0 : static_cast(time(nullptr))); std::normal_distribution rnd_distribution(0.0f, 1.0f); for (uint32_t i = 0; i < static_cast(attractors.size()); i++) @@ -547,7 +547,7 @@ void HPPComputeNBody::prepare_compute_storage_buffers() } // Color gradient offset - particle.vel.w = (float) i * 1.0f / static_cast(attractors.size()); + particle.vel.w = static_cast(i) * 1.0f / static_cast(attractors.size()); } } @@ -555,8 +555,7 @@ void HPPComputeNBody::prepare_compute_storage_buffers() // Staging // SSBO won't be changed on the host after upload so copy to device local memory - vkb::core::HPPBuffer staging_buffer(*get_device(), storage_buffer_size, vk::BufferUsageFlagBits::eTransferSrc, VMA_MEMORY_USAGE_CPU_ONLY); - staging_buffer.update(particle_buffer.data(), storage_buffer_size); + vkb::core::HPPBuffer staging_buffer = vkb::core::HPPBuffer::create_staging_buffer(*get_device(), particle_buffer); compute.storage_buffer = std::make_unique(*get_device(), storage_buffer_size, diff --git a/samples/api/instancing/instancing.cpp b/samples/api/instancing/instancing.cpp index 7535f114f8..9e9e9d4e45 100644 --- a/samples/api/instancing/instancing.cpp +++ b/samples/api/instancing/instancing.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2019-2023, Sascha Willems +/* Copyright (c) 2019-2024, Sascha Willems * * SPDX-License-Identifier: Apache-2.0 * @@ -402,8 +402,7 @@ void Instancing::prepare_instance_data() // On devices with separate memory types for host visible and device local memory this will result in better performance // On devices with unified memory types (DEVICE_LOCAL_BIT and HOST_VISIBLE_BIT supported at once) this isn't necessary and you could skip the staging - vkb::core::Buffer staging_buffer(get_device(), instance_buffer.size, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, VMA_MEMORY_USAGE_CPU_TO_GPU); - staging_buffer.update(instance_data); + vkb::core::Buffer staging_buffer = vkb::core::Buffer::create_staging_buffer(get_device(), instance_data); instance_buffer.buffer = std::make_unique(get_device(), instance_buffer.size, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT, VMA_MEMORY_USAGE_GPU_ONLY); diff --git a/samples/extensions/conservative_rasterization/conservative_rasterization.cpp b/samples/extensions/conservative_rasterization/conservative_rasterization.cpp index 06d98e79a6..cfd757480f 100644 --- a/samples/extensions/conservative_rasterization/conservative_rasterization.cpp +++ b/samples/extensions/conservative_rasterization/conservative_rasterization.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2019-2023, Sascha Willems +/* Copyright (c) 2019-2024, Sascha Willems * * SPDX-License-Identifier: Apache-2.0 * @@ -353,11 +353,8 @@ void ConservativeRasterization::load_assets() uint32_t index_buffer_size = triangle.index_count * sizeof(uint32_t); // Host visible source buffers (staging) - vkb::core::Buffer vertex_staging_buffer{get_device(), vertex_buffer_size, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, VMA_MEMORY_USAGE_CPU_ONLY}; - vertex_staging_buffer.update(vertex_buffer.data(), vertex_buffer_size); - - vkb::core::Buffer index_staging_buffer{get_device(), index_buffer_size, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, VMA_MEMORY_USAGE_CPU_ONLY}; - index_staging_buffer.update(index_buffer.data(), index_buffer_size); + vkb::core::Buffer vertex_staging_buffer = vkb::core::Buffer::create_staging_buffer(get_device(), vertex_buffer); + vkb::core::Buffer index_staging_buffer = vkb::core::Buffer::create_staging_buffer(get_device(), index_buffer); // Device local destination buffers triangle.vertices = std::make_unique(get_device(), diff --git a/samples/extensions/ray_tracing_basic/ray_tracing_basic.cpp b/samples/extensions/ray_tracing_basic/ray_tracing_basic.cpp index ab048f86e5..6c688f8a56 100644 --- a/samples/extensions/ray_tracing_basic/ray_tracing_basic.cpp +++ b/samples/extensions/ray_tracing_basic/ray_tracing_basic.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2019-2023, Sascha Willems +/* Copyright (c) 2019-2024, Sascha Willems * * SPDX-License-Identifier: Apache-2.0 * @@ -470,13 +470,19 @@ void RaytracingBasic::create_shader_binding_tables() const uint32_t group_count = static_cast(shader_groups.size()); const uint32_t sbt_size = group_count * handle_size_aligned; const VkBufferUsageFlags sbt_buffer_usage_flags = VK_BUFFER_USAGE_SHADER_BINDING_TABLE_BIT_KHR | VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT; - const VmaMemoryUsage sbt_memory_usage = VMA_MEMORY_USAGE_CPU_TO_GPU; + const VmaMemoryUsage sbt_memory_usage = VMA_MEMORY_USAGE_AUTO_PREFER_HOST; // Raygen // Create binding table buffers for each shader type - raygen_shader_binding_table = std::make_unique(get_device(), handle_size, sbt_buffer_usage_flags, sbt_memory_usage, 0); - miss_shader_binding_table = std::make_unique(get_device(), handle_size, sbt_buffer_usage_flags, sbt_memory_usage, 0); - hit_shader_binding_table = std::make_unique(get_device(), handle_size, sbt_buffer_usage_flags, sbt_memory_usage, 0); + vkb::core::BufferBuilder builder{handle_size}; + builder + .with_usage(sbt_buffer_usage_flags) + .with_vma_usage(sbt_memory_usage) + .with_vma_flags(VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT); + + raygen_shader_binding_table = std::make_unique(get_device(), builder); + miss_shader_binding_table = std::make_unique(get_device(), builder); + hit_shader_binding_table = std::make_unique(get_device(), builder); // Copy the pipeline's shader handles into a host buffer std::vector shader_handle_storage(sbt_size); diff --git a/samples/extensions/ray_tracing_extended/ray_tracing_extended.cpp b/samples/extensions/ray_tracing_extended/ray_tracing_extended.cpp index ef87207d7e..4d4f655d54 100644 --- a/samples/extensions/ray_tracing_extended/ray_tracing_extended.cpp +++ b/samples/extensions/ray_tracing_extended/ray_tracing_extended.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2021-2023 Holochip Corporation +/* Copyright (c) 2021-2024 Holochip Corporation * * SPDX-License-Identifier: Apache-2.0 * @@ -43,7 +43,8 @@ struct QuickTimer { if (print_on_exit) { - using namespace std::chrono; + using std::chrono::duration_cast; + using std::chrono::microseconds; const auto dur = duration_cast(clock::now() - start).count(); LOGI(fmt::format("{:s} duration: {:f} ms", name, dur / 1000.)) } @@ -534,7 +535,12 @@ void RaytracingExtended::create_bottom_level_acceleration_structure(bool is_upda VkTransformMatrixKHR RaytracingExtended::calculate_rotation(glm::vec3 pt, float scale, bool freeze_z) { - using namespace glm; + using glm::abs; + using glm::cross; + using glm::dot; + using glm::normalize; + using glm::vec3; + auto normal = normalize(pt + camera.position); if (freeze_z) { @@ -851,7 +857,7 @@ void RaytracingExtended::create_shader_binding_tables() auto group_count = static_cast(shader_groups.size()); const uint32_t sbt_size = group_count * handle_size_aligned; const VkBufferUsageFlags sbt_buffer_usage_flags = VK_BUFFER_USAGE_SHADER_BINDING_TABLE_BIT_KHR | VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT; - const VmaMemoryUsage sbt_memory_usage = VMA_MEMORY_USAGE_CPU_TO_GPU; + const VmaMemoryUsage sbt_memory_usage = VMA_MEMORY_USAGE_AUTO_PREFER_HOST; // Raygen // Create binding table buffers for each shader type diff --git a/samples/extensions/ray_tracing_reflection/ray_tracing_reflection.cpp b/samples/extensions/ray_tracing_reflection/ray_tracing_reflection.cpp index eb96bffbd9..408f6f7bf4 100644 --- a/samples/extensions/ray_tracing_reflection/ray_tracing_reflection.cpp +++ b/samples/extensions/ray_tracing_reflection/ray_tracing_reflection.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2021-2024, NVIDIA CORPORATION. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,7 +13,7 @@ * See the License for the specific language governing permissions and * limitations under the License. * - * SPDX-FileCopyrightText: Copyright (c) 2014-2023 NVIDIA CORPORATION + * SPDX-FileCopyrightText: Copyright (c) 2014-2024 NVIDIA CORPORATION * SPDX-License-Identifier: Apache-2.0 */ @@ -571,7 +571,7 @@ void RaytracingReflection::create_shader_binding_tables() const uint32_t handle_size_aligned = aligned_size(handle_size, handle_alignment); const VkBufferUsageFlags sbt_buffer_usage_flags = VK_BUFFER_USAGE_SHADER_BINDING_TABLE_BIT_KHR | VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT; - const VmaMemoryUsage sbt_memory_usage = VMA_MEMORY_USAGE_CPU_TO_GPU; + const VmaMemoryUsage sbt_memory_usage = VMA_MEMORY_USAGE_AUTO_PREFER_HOST; // Create binding table buffers for each shader type raygen_shader_binding_table = std::make_unique(get_device(), handle_size_aligned * rgen_index.size(), sbt_buffer_usage_flags, sbt_memory_usage, 0); diff --git a/samples/extensions/synchronization_2/synchronization_2.cpp b/samples/extensions/synchronization_2/synchronization_2.cpp index 3980c2cff4..5779e3c42d 100644 --- a/samples/extensions/synchronization_2/synchronization_2.cpp +++ b/samples/extensions/synchronization_2/synchronization_2.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2021-2023, Sascha Willems +/* Copyright (c) 2021-2024, Sascha Willems * * SPDX-License-Identifier: Apache-2.0 * @@ -307,8 +307,7 @@ void Synchronization2::prepare_storage_buffers() // Staging // SSBO won't be changed on the host after upload so copy to device local memory - vkb::core::Buffer staging_buffer{get_device(), storage_buffer_size, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, VMA_MEMORY_USAGE_CPU_ONLY}; - staging_buffer.update(particle_buffer.data(), static_cast(storage_buffer_size)); + vkb::core::Buffer staging_buffer = vkb::core::Buffer::create_staging_buffer(get_device(), particle_buffer); compute.storage_buffer = std::make_unique(get_device(), storage_buffer_size, diff --git a/third_party/CMakeLists.txt b/third_party/CMakeLists.txt index 1f35a2c307..c7d1763536 100644 --- a/third_party/CMakeLists.txt +++ b/third_party/CMakeLists.txt @@ -378,7 +378,7 @@ set(CLI11_CUDA_TESTS OFF) set(CLI11_CLANG_TIDY OFF) add_subdirectory(cli11) if (TARGET CLI11) - set_property(TARGET CLI11 PROPERTY FOLDER "ThirdParty") + #set_property(TARGET CLI11 PROPERTY FOLDER "ThirdParty") endif() # OpenCL