forked from KhronosGroup/Vulkan-LoaderAndValidationLayers
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathcore_validation.h
194 lines (172 loc) · 6.24 KB
/
core_validation.h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
/* Copyright (c) 2015-2016 The Khronos Group Inc.
* Copyright (c) 2015-2016 Valve Corporation
* Copyright (c) 2015-2016 LunarG, Inc.
* Copyright (C) 2015-2016 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Author: Courtney Goeltzenleuchter <[email protected]>
* Author: Tobin Ehlis <[email protected]>
* Author: Chris Forbes <[email protected]>
* Author: Mark Lobodzinski <[email protected]>
*/
#ifndef NOEXCEPT
// Check for noexcept support
#if defined(__clang__)
#if __has_feature(cxx_noexcept)
#define HAS_NOEXCEPT
#endif
#else
#if defined(__GXX_EXPERIMENTAL_CXX0X__) && __GNUC__ * 10 + __GNUC_MINOR__ >= 46
#define HAS_NOEXCEPT
#else
#if defined(_MSC_FULL_VER) && _MSC_FULL_VER >= 190023026 && defined(_HAS_EXCEPTIONS) && _HAS_EXCEPTIONS
#define HAS_NOEXCEPT
#endif
#endif
#endif
#ifdef HAS_NOEXCEPT
#define NOEXCEPT noexcept
#else
#define NOEXCEPT
#endif
#endif
#pragma once
#include "core_validation_error_enums.h"
#include "vk_validation_error_messages.h"
#include "core_validation_types.h"
#include "descriptor_sets.h"
#include "vk_layer_logging.h"
#include "vulkan/vk_layer.h"
#include <atomic>
#include <functional>
#include <memory>
#include <unordered_map>
#include <unordered_set>
#include <vector>
#include <list>
#include <deque>
/*
* MTMTODO : Update this comment
* Data Structure overview
* There are 4 global STL(' maps
* cbMap -- map of command Buffer (CB) objects to MT_CB_INFO structures
* Each MT_CB_INFO struct has an stl list container with
* memory objects that are referenced by this CB
* memObjMap -- map of Memory Objects to MT_MEM_OBJ_INFO structures
* Each MT_MEM_OBJ_INFO has two stl list containers with:
* -- all CBs referencing this mem obj
* -- all VK Objects that are bound to this memory
* objectMap -- map of objects to MT_OBJ_INFO structures
*
* Algorithm overview
* These are the primary events that should happen related to different objects
* 1. Command buffers
* CREATION - Add object,structure to map
* CMD BIND - If mem associated, add mem reference to list container
* DESTROY - Remove from map, decrement (and report) mem references
* 2. Mem Objects
* CREATION - Add object,structure to map
* OBJ BIND - Add obj structure to list container for that mem node
* CMB BIND - If mem-related add CB structure to list container for that mem node
* DESTROY - Flag as errors any remaining refs and remove from map
* 3. Generic Objects
* MEM BIND - DESTROY any previous binding, Add obj node w/ ref to map, add obj ref to list container for that mem node
* DESTROY - If mem bound, remove reference list container for that memInfo, remove object ref from map
*/
// TODO : Is there a way to track when Cmd Buffer finishes & remove mem references at that point?
// TODO : Could potentially store a list of freed mem allocs to flag when they're incorrectly used
struct GENERIC_HEADER {
VkStructureType sType;
const void *pNext;
};
enum FENCE_STATE { FENCE_UNSIGNALED, FENCE_INFLIGHT, FENCE_RETIRED };
class FENCE_NODE {
public:
VkFence fence;
VkFenceCreateInfo createInfo;
std::pair<VkQueue, uint64_t> signaler;
FENCE_STATE state;
// Default constructor
FENCE_NODE() : state(FENCE_UNSIGNALED) {}
};
class SEMAPHORE_NODE : public BASE_NODE {
public:
std::pair<VkQueue, uint64_t> signaler;
bool signaled;
};
class EVENT_STATE : public BASE_NODE {
public:
int write_in_use;
bool needsSignaled;
VkPipelineStageFlags stageMask;
};
class QUEUE_STATE {
public:
VkQueue queue;
uint32_t queueFamilyIndex;
std::unordered_map<VkEvent, VkPipelineStageFlags> eventToStageMap;
std::unordered_map<QueryObject, bool> queryToStateMap; // 0 is unavailable, 1 is available
uint64_t seq;
std::deque<CB_SUBMISSION> submissions;
};
class QUERY_POOL_NODE : public BASE_NODE {
public:
VkQueryPoolCreateInfo createInfo;
};
// Stuff from Device Limits Layer
enum CALL_STATE {
UNCALLED, // Function has not been called
QUERY_COUNT, // Function called once to query a count
QUERY_DETAILS, // Function called w/ a count to query details
};
struct PHYSICAL_DEVICE_STATE {
// Track the call state and array sizes for various query functions
CALL_STATE vkGetPhysicalDeviceQueueFamilyPropertiesState = UNCALLED;
uint32_t queueFamilyPropertiesCount = 0;
CALL_STATE vkGetPhysicalDeviceLayerPropertiesState = UNCALLED;
CALL_STATE vkGetPhysicalDeviceExtensionPropertiesState = UNCALLED;
CALL_STATE vkGetPhysicalDeviceFeaturesState = UNCALLED;
CALL_STATE vkGetPhysicalDeviceSurfaceCapabilitiesKHRState = UNCALLED;
CALL_STATE vkGetPhysicalDeviceSurfacePresentModesKHRState = UNCALLED;
CALL_STATE vkGetPhysicalDeviceSurfaceFormatsKHRState = UNCALLED;
VkPhysicalDeviceFeatures features = {};
VkPhysicalDevice phys_device = VK_NULL_HANDLE;
std::vector<VkQueueFamilyProperties> queue_family_properties;
VkSurfaceCapabilitiesKHR surfaceCapabilities = {};
std::vector<VkPresentModeKHR> present_modes;
std::vector<VkSurfaceFormatKHR> surface_formats;
};
struct GpuQueue {
VkPhysicalDevice gpu;
uint32_t queue_family_index;
};
inline bool operator==(GpuQueue const &lhs, GpuQueue const &rhs) {
return (lhs.gpu == rhs.gpu && lhs.queue_family_index == rhs.queue_family_index);
}
namespace std {
template <>
struct hash<GpuQueue> {
size_t operator()(GpuQueue gq) const throw() {
return hash<uint64_t>()((uint64_t)(gq.gpu)) ^ hash<uint32_t>()(gq.queue_family_index);
}
};
}
struct SURFACE_STATE {
VkSurfaceKHR surface = VK_NULL_HANDLE;
SWAPCHAIN_NODE *swapchain = nullptr;
SWAPCHAIN_NODE *old_swapchain = nullptr;
std::unordered_map<GpuQueue, bool> gpu_queue_support;
SURFACE_STATE() {}
SURFACE_STATE(VkSurfaceKHR surface) : surface(surface) {}
};