forked from KhronosGroup/Vulkan-LoaderAndValidationLayers
-
Notifications
You must be signed in to change notification settings - Fork 0
/
core_validation.cpp
12156 lines (11102 loc) · 668 KB
/
core_validation.cpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
/* Copyright (c) 2015-2017 The Khronos Group Inc.
* Copyright (c) 2015-2017 Valve Corporation
* Copyright (c) 2015-2017 LunarG, Inc.
* Copyright (C) 2015-2017 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Author: Cody Northrop <[email protected]>
* Author: Michael Lentine <[email protected]>
* Author: Tobin Ehlis <[email protected]>
* Author: Chia-I Wu <[email protected]>
* Author: Chris Forbes <[email protected]>
* Author: Mark Lobodzinski <[email protected]>
* Author: Ian Elliott <[email protected]>
* Author: Dave Houlton <[email protected]>
* Author: Dustin Graves <[email protected]>
* Author: Jeremy Hayes <[email protected]>
* Author: Jon Ashburn <[email protected]>
* Author: Karl Schultz <[email protected]>
* Author: Mark Young <[email protected]>
* Author: Mike Schuchardt <[email protected]>
* Author: Mike Weiblen <[email protected]>
* Author: Tony Barbour <[email protected]>
*/
// Allow use of STL min and max functions in Windows
#define NOMINMAX
#include <SPIRV/spirv.hpp>
#include <algorithm>
#include <assert.h>
#include <iostream>
#include <list>
#include <map>
#include <mutex>
#include <set>
#include <sstream>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <string>
#include <tuple>
#include "vk_loader_platform.h"
#include "vk_dispatch_table_helper.h"
#include "vk_enum_string_helper.h"
#if defined(__GNUC__)
#pragma GCC diagnostic ignored "-Wwrite-strings"
#endif
#if defined(__GNUC__)
#pragma GCC diagnostic warning "-Wwrite-strings"
#endif
#include "core_validation.h"
#include "buffer_validation.h"
#include "vk_layer_table.h"
#include "vk_layer_data.h"
#include "vk_layer_extension_utils.h"
#include "vk_layer_utils.h"
#include "spirv-tools/libspirv.h"
#if defined __ANDROID__
#include <android/log.h>
#define LOGCONSOLE(...) ((void)__android_log_print(ANDROID_LOG_INFO, "DS", __VA_ARGS__))
#else
#define LOGCONSOLE(...) \
{ \
printf(__VA_ARGS__); \
printf("\n"); \
}
#endif
// This intentionally includes a cpp file
#include "vk_safe_struct.cpp"
namespace core_validation {
using std::unordered_map;
using std::unordered_set;
using std::unique_ptr;
using std::vector;
using std::string;
using std::stringstream;
using std::max;
// WSI Image Objects bypass usual Image Object creation methods. A special Memory
// Object value will be used to identify them internally.
static const VkDeviceMemory MEMTRACKER_SWAP_CHAIN_IMAGE_KEY = (VkDeviceMemory)(-1);
// 2nd special memory handle used to flag object as unbound from memory
static const VkDeviceMemory MEMORY_UNBOUND = VkDeviceMemory(~((uint64_t)(0)) - 1);
// A special value of (0xFFFFFFFF, 0xFFFFFFFF) indicates that the surface size will be determined
// by the extent of a swapchain targeting the surface.
static const uint32_t kSurfaceSizeFromSwapchain = 0xFFFFFFFFu;
// fwd decls
struct shader_module;
struct instance_layer_data {
VkInstance instance = VK_NULL_HANDLE;
debug_report_data *report_data = nullptr;
std::vector<VkDebugReportCallbackEXT> logging_callback;
VkLayerInstanceDispatchTable dispatch_table;
CALL_STATE vkEnumeratePhysicalDevicesState = UNCALLED;
uint32_t physical_devices_count = 0;
CALL_STATE vkEnumeratePhysicalDeviceGroupsState = UNCALLED;
uint32_t physical_device_groups_count = 0;
CHECK_DISABLED disabled = {};
unordered_map<VkPhysicalDevice, PHYSICAL_DEVICE_STATE> physical_device_map;
unordered_map<VkSurfaceKHR, SURFACE_STATE> surface_map;
bool surfaceExtensionEnabled = false;
bool displayExtensionEnabled = false;
bool androidSurfaceExtensionEnabled = false;
bool mirSurfaceExtensionEnabled = false;
bool waylandSurfaceExtensionEnabled = false;
bool win32SurfaceExtensionEnabled = false;
bool xcbSurfaceExtensionEnabled = false;
bool xlibSurfaceExtensionEnabled = false;
};
struct layer_data {
debug_report_data *report_data = nullptr;
VkLayerDispatchTable dispatch_table;
devExts device_extensions = {};
unordered_set<VkQueue> queues; // All queues under given device
// Global set of all cmdBuffers that are inFlight on this device
unordered_set<VkCommandBuffer> globalInFlightCmdBuffers;
// Layer specific data
unordered_map<VkSampler, unique_ptr<SAMPLER_STATE>> samplerMap;
unordered_map<VkImageView, unique_ptr<IMAGE_VIEW_STATE>> imageViewMap;
unordered_map<VkImage, unique_ptr<IMAGE_STATE>> imageMap;
unordered_map<VkBufferView, unique_ptr<BUFFER_VIEW_STATE>> bufferViewMap;
unordered_map<VkBuffer, unique_ptr<BUFFER_STATE>> bufferMap;
unordered_map<VkPipeline, PIPELINE_STATE *> pipelineMap;
unordered_map<VkCommandPool, COMMAND_POOL_NODE> commandPoolMap;
unordered_map<VkDescriptorPool, DESCRIPTOR_POOL_STATE *> descriptorPoolMap;
unordered_map<VkDescriptorSet, cvdescriptorset::DescriptorSet *> setMap;
unordered_map<VkDescriptorSetLayout, cvdescriptorset::DescriptorSetLayout *> descriptorSetLayoutMap;
unordered_map<VkPipelineLayout, PIPELINE_LAYOUT_NODE> pipelineLayoutMap;
unordered_map<VkDeviceMemory, unique_ptr<DEVICE_MEM_INFO>> memObjMap;
unordered_map<VkFence, FENCE_NODE> fenceMap;
unordered_map<VkQueue, QUEUE_STATE> queueMap;
unordered_map<VkEvent, EVENT_STATE> eventMap;
unordered_map<QueryObject, bool> queryToStateMap;
unordered_map<VkQueryPool, QUERY_POOL_NODE> queryPoolMap;
unordered_map<VkSemaphore, SEMAPHORE_NODE> semaphoreMap;
unordered_map<VkCommandBuffer, GLOBAL_CB_NODE *> commandBufferMap;
unordered_map<VkFramebuffer, unique_ptr<FRAMEBUFFER_STATE>> frameBufferMap;
unordered_map<VkImage, vector<ImageSubresourcePair>> imageSubresourceMap;
unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> imageLayoutMap;
unordered_map<VkRenderPass, unique_ptr<RENDER_PASS_STATE>> renderPassMap;
unordered_map<VkShaderModule, unique_ptr<shader_module>> shaderModuleMap;
unordered_map<VkDescriptorUpdateTemplateKHR, unique_ptr<TEMPLATE_STATE>> desc_template_map;
VkDevice device = VK_NULL_HANDLE;
VkPhysicalDevice physical_device = VK_NULL_HANDLE;
instance_layer_data *instance_data = nullptr; // from device to enclosing instance
VkPhysicalDeviceFeatures enabled_features = {};
// Device specific data
PHYS_DEV_PROPERTIES_NODE phys_dev_properties = {};
VkPhysicalDeviceMemoryProperties phys_dev_mem_props = {};
VkPhysicalDeviceProperties phys_dev_props = {};
};
// TODO : Do we need to guard access to layer_data_map w/ lock?
static unordered_map<void *, layer_data *> layer_data_map;
static unordered_map<void *, instance_layer_data *> instance_layer_data_map;
static uint32_t loader_layer_if_version = CURRENT_LOADER_LAYER_INTERFACE_VERSION;
static const VkLayerProperties global_layer = {
"VK_LAYER_LUNARG_core_validation", VK_LAYER_API_VERSION, 1, "LunarG Validation Layer",
};
template <class TCreateInfo>
void ValidateLayerOrdering(const TCreateInfo &createInfo) {
bool foundLayer = false;
for (uint32_t i = 0; i < createInfo.enabledLayerCount; ++i) {
if (!strcmp(createInfo.ppEnabledLayerNames[i], global_layer.layerName)) {
foundLayer = true;
}
// This has to be logged to console as we don't have a callback at this point.
if (!foundLayer && !strcmp(createInfo.ppEnabledLayerNames[0], "VK_LAYER_GOOGLE_unique_objects")) {
LOGCONSOLE("Cannot activate layer VK_LAYER_GOOGLE_unique_objects prior to activating %s.", global_layer.layerName);
}
}
}
// Code imported from shader_checker
static void build_def_index(shader_module *);
// A forward iterator over spirv instructions. Provides easy access to len, opcode, and content words
// without the caller needing to care too much about the physical SPIRV module layout.
struct spirv_inst_iter {
std::vector<uint32_t>::const_iterator zero;
std::vector<uint32_t>::const_iterator it;
uint32_t len() {
auto result = *it >> 16;
assert(result > 0);
return result;
}
uint32_t opcode() { return *it & 0x0ffffu; }
uint32_t const &word(unsigned n) {
assert(n < len());
return it[n];
}
uint32_t offset() { return (uint32_t)(it - zero); }
spirv_inst_iter() {}
spirv_inst_iter(std::vector<uint32_t>::const_iterator zero, std::vector<uint32_t>::const_iterator it) : zero(zero), it(it) {}
bool operator==(spirv_inst_iter const &other) { return it == other.it; }
bool operator!=(spirv_inst_iter const &other) { return it != other.it; }
spirv_inst_iter operator++(int) { // x++
spirv_inst_iter ii = *this;
it += len();
return ii;
}
spirv_inst_iter operator++() { // ++x;
it += len();
return *this;
}
// The iterator and the value are the same thing.
spirv_inst_iter &operator*() { return *this; }
spirv_inst_iter const &operator*() const { return *this; }
};
struct shader_module {
// The spirv image itself
vector<uint32_t> words;
// A mapping of <id> to the first word of its def. this is useful because walking type
// trees, constant expressions, etc requires jumping all over the instruction stream.
unordered_map<unsigned, unsigned> def_index;
bool has_valid_spirv;
shader_module(VkShaderModuleCreateInfo const *pCreateInfo)
: words((uint32_t *)pCreateInfo->pCode, (uint32_t *)pCreateInfo->pCode + pCreateInfo->codeSize / sizeof(uint32_t)),
def_index(),
has_valid_spirv(true) {
build_def_index(this);
}
shader_module() : has_valid_spirv(false) {}
// Expose begin() / end() to enable range-based for
spirv_inst_iter begin() const { return spirv_inst_iter(words.begin(), words.begin() + 5); } // First insn
spirv_inst_iter end() const { return spirv_inst_iter(words.begin(), words.end()); } // Just past last insn
// Given an offset into the module, produce an iterator there.
spirv_inst_iter at(unsigned offset) const { return spirv_inst_iter(words.begin(), words.begin() + offset); }
// Gets an iterator to the definition of an id
spirv_inst_iter get_def(unsigned id) const {
auto it = def_index.find(id);
if (it == def_index.end()) {
return end();
}
return at(it->second);
}
};
// TODO : This can be much smarter, using separate locks for separate global data
static std::mutex global_lock;
// Return IMAGE_VIEW_STATE ptr for specified imageView or else NULL
IMAGE_VIEW_STATE *GetImageViewState(const layer_data *dev_data, VkImageView image_view) {
auto iv_it = dev_data->imageViewMap.find(image_view);
if (iv_it == dev_data->imageViewMap.end()) {
return nullptr;
}
return iv_it->second.get();
}
// Return sampler node ptr for specified sampler or else NULL
SAMPLER_STATE *GetSamplerState(const layer_data *dev_data, VkSampler sampler) {
auto sampler_it = dev_data->samplerMap.find(sampler);
if (sampler_it == dev_data->samplerMap.end()) {
return nullptr;
}
return sampler_it->second.get();
}
// Return image state ptr for specified image or else NULL
IMAGE_STATE *GetImageState(const layer_data *dev_data, VkImage image) {
auto img_it = dev_data->imageMap.find(image);
if (img_it == dev_data->imageMap.end()) {
return nullptr;
}
return img_it->second.get();
}
// Return buffer state ptr for specified buffer or else NULL
BUFFER_STATE *GetBufferState(const layer_data *dev_data, VkBuffer buffer) {
auto buff_it = dev_data->bufferMap.find(buffer);
if (buff_it == dev_data->bufferMap.end()) {
return nullptr;
}
return buff_it->second.get();
}
// Return swapchain node for specified swapchain or else NULL
SWAPCHAIN_NODE *GetSwapchainNode(const layer_data *dev_data, VkSwapchainKHR swapchain) {
auto swp_it = dev_data->device_extensions.swapchainMap.find(swapchain);
if (swp_it == dev_data->device_extensions.swapchainMap.end()) {
return nullptr;
}
return swp_it->second.get();
}
// Return swapchain for specified image or else NULL
VkSwapchainKHR GetSwapchainFromImage(const layer_data *dev_data, VkImage image) {
auto img_it = dev_data->device_extensions.imageToSwapchainMap.find(image);
if (img_it == dev_data->device_extensions.imageToSwapchainMap.end()) {
return VK_NULL_HANDLE;
}
return img_it->second;
}
// Return buffer node ptr for specified buffer or else NULL
BUFFER_VIEW_STATE *GetBufferViewState(const layer_data *dev_data, VkBufferView buffer_view) {
auto bv_it = dev_data->bufferViewMap.find(buffer_view);
if (bv_it == dev_data->bufferViewMap.end()) {
return nullptr;
}
return bv_it->second.get();
}
FENCE_NODE *GetFenceNode(layer_data *dev_data, VkFence fence) {
auto it = dev_data->fenceMap.find(fence);
if (it == dev_data->fenceMap.end()) {
return nullptr;
}
return &it->second;
}
EVENT_STATE *GetEventNode(layer_data *dev_data, VkEvent event) {
auto it = dev_data->eventMap.find(event);
if (it == dev_data->eventMap.end()) {
return nullptr;
}
return &it->second;
}
QUERY_POOL_NODE *GetQueryPoolNode(layer_data *dev_data, VkQueryPool query_pool) {
auto it = dev_data->queryPoolMap.find(query_pool);
if (it == dev_data->queryPoolMap.end()) {
return nullptr;
}
return &it->second;
}
QUEUE_STATE *GetQueueState(layer_data *dev_data, VkQueue queue) {
auto it = dev_data->queueMap.find(queue);
if (it == dev_data->queueMap.end()) {
return nullptr;
}
return &it->second;
}
SEMAPHORE_NODE *GetSemaphoreNode(layer_data *dev_data, VkSemaphore semaphore) {
auto it = dev_data->semaphoreMap.find(semaphore);
if (it == dev_data->semaphoreMap.end()) {
return nullptr;
}
return &it->second;
}
COMMAND_POOL_NODE *GetCommandPoolNode(layer_data *dev_data, VkCommandPool pool) {
auto it = dev_data->commandPoolMap.find(pool);
if (it == dev_data->commandPoolMap.end()) {
return nullptr;
}
return &it->second;
}
PHYSICAL_DEVICE_STATE *GetPhysicalDeviceState(instance_layer_data *instance_data, VkPhysicalDevice phys) {
auto it = instance_data->physical_device_map.find(phys);
if (it == instance_data->physical_device_map.end()) {
return nullptr;
}
return &it->second;
}
SURFACE_STATE *GetSurfaceState(instance_layer_data *instance_data, VkSurfaceKHR surface) {
auto it = instance_data->surface_map.find(surface);
if (it == instance_data->surface_map.end()) {
return nullptr;
}
return &it->second;
}
// Return ptr to memory binding for given handle of specified type
static BINDABLE *GetObjectMemBinding(layer_data *dev_data, uint64_t handle, VkDebugReportObjectTypeEXT type) {
switch (type) {
case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT:
return GetImageState(dev_data, VkImage(handle));
case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT:
return GetBufferState(dev_data, VkBuffer(handle));
default:
break;
}
return nullptr;
}
// prototype
GLOBAL_CB_NODE *GetCBNode(layer_data const *, const VkCommandBuffer);
// Return ptr to info in map container containing mem, or NULL if not found
// Calls to this function should be wrapped in mutex
DEVICE_MEM_INFO *GetMemObjInfo(const layer_data *dev_data, const VkDeviceMemory mem) {
auto mem_it = dev_data->memObjMap.find(mem);
if (mem_it == dev_data->memObjMap.end()) {
return NULL;
}
return mem_it->second.get();
}
static void add_mem_obj_info(layer_data *dev_data, void *object, const VkDeviceMemory mem,
const VkMemoryAllocateInfo *pAllocateInfo) {
assert(object != NULL);
dev_data->memObjMap[mem] = unique_ptr<DEVICE_MEM_INFO>(new DEVICE_MEM_INFO(object, mem, pAllocateInfo));
}
// Helper function to print lowercase string of object type
// TODO: Unify string helper functions, this should really come out of a string helper if not there already
static const char *object_type_to_string(VkDebugReportObjectTypeEXT type) {
switch (type) {
case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT:
return "image";
case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT:
return "buffer";
case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT:
return "image view";
case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT:
return "buffer view";
case VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT:
return "swapchain";
case VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT:
return "descriptor set";
case VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT:
return "framebuffer";
case VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT:
return "event";
case VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT:
return "query pool";
case VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT:
return "descriptor pool";
case VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT:
return "command pool";
case VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT:
return "pipeline";
case VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT:
return "sampler";
case VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT:
return "renderpass";
case VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT:
return "device memory";
case VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT:
return "semaphore";
default:
return "unknown";
}
}
// For given bound_object_handle, bound to given mem allocation, verify that the range for the bound object is valid
static bool ValidateMemoryIsValid(layer_data *dev_data, VkDeviceMemory mem, uint64_t bound_object_handle,
VkDebugReportObjectTypeEXT type, const char *functionName) {
DEVICE_MEM_INFO *mem_info = GetMemObjInfo(dev_data, mem);
if (mem_info) {
if (!mem_info->bound_ranges[bound_object_handle].valid) {
return log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
reinterpret_cast<uint64_t &>(mem), __LINE__, MEMTRACK_INVALID_MEM_REGION, "MEM",
"%s: Cannot read invalid region of memory allocation 0x%" PRIx64 " for bound %s object 0x%" PRIx64
", please fill the memory before using.",
functionName, reinterpret_cast<uint64_t &>(mem), object_type_to_string(type), bound_object_handle);
}
}
return false;
}
// For given image_state
// If mem is special swapchain key, then verify that image_state valid member is true
// Else verify that the image's bound memory range is valid
bool ValidateImageMemoryIsValid(layer_data *dev_data, IMAGE_STATE *image_state, const char *functionName) {
if (image_state->binding.mem == MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
if (!image_state->valid) {
return log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
reinterpret_cast<uint64_t &>(image_state->binding.mem), __LINE__, MEMTRACK_INVALID_MEM_REGION, "MEM",
"%s: Cannot read invalid swapchain image 0x%" PRIx64 ", please fill the memory before using.",
functionName, reinterpret_cast<uint64_t &>(image_state->image));
}
} else {
return ValidateMemoryIsValid(dev_data, image_state->binding.mem, reinterpret_cast<uint64_t &>(image_state->image),
VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, functionName);
}
return false;
}
// For given buffer_state, verify that the range it's bound to is valid
bool ValidateBufferMemoryIsValid(layer_data *dev_data, BUFFER_STATE *buffer_state, const char *functionName) {
return ValidateMemoryIsValid(dev_data, buffer_state->binding.mem, reinterpret_cast<uint64_t &>(buffer_state->buffer),
VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, functionName);
}
// For the given memory allocation, set the range bound by the given handle object to the valid param value
static void SetMemoryValid(layer_data *dev_data, VkDeviceMemory mem, uint64_t handle, bool valid) {
DEVICE_MEM_INFO *mem_info = GetMemObjInfo(dev_data, mem);
if (mem_info) {
mem_info->bound_ranges[handle].valid = valid;
}
}
// For given image node
// If mem is special swapchain key, then set entire image_state to valid param value
// Else set the image's bound memory range to valid param value
void SetImageMemoryValid(layer_data *dev_data, IMAGE_STATE *image_state, bool valid) {
if (image_state->binding.mem == MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
image_state->valid = valid;
} else {
SetMemoryValid(dev_data, image_state->binding.mem, reinterpret_cast<uint64_t &>(image_state->image), valid);
}
}
// For given buffer node set the buffer's bound memory range to valid param value
void SetBufferMemoryValid(layer_data *dev_data, BUFFER_STATE *buffer_state, bool valid) {
SetMemoryValid(dev_data, buffer_state->binding.mem, reinterpret_cast<uint64_t &>(buffer_state->buffer), valid);
}
// Find CB Info and add mem reference to list container
// Find Mem Obj Info and add CB reference to list container
static bool update_cmd_buf_and_mem_references(layer_data *dev_data, const VkCommandBuffer cb, const VkDeviceMemory mem,
const char *apiName) {
bool skip_call = false;
// Skip validation if this image was created through WSI
if (mem != MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
// First update CB binding in MemObj mini CB list
DEVICE_MEM_INFO *pMemInfo = GetMemObjInfo(dev_data, mem);
if (pMemInfo) {
// Now update CBInfo's Mem reference list
GLOBAL_CB_NODE *cb_node = GetCBNode(dev_data, cb);
pMemInfo->cb_bindings.insert(cb_node);
// TODO: keep track of all destroyed CBs so we know if this is a stale or simply invalid object
if (cb_node) {
cb_node->memObjs.insert(mem);
}
}
}
return skip_call;
}
// Create binding link between given sampler and command buffer node
void AddCommandBufferBindingSampler(GLOBAL_CB_NODE *cb_node, SAMPLER_STATE *sampler_state) {
sampler_state->cb_bindings.insert(cb_node);
cb_node->object_bindings.insert(
{reinterpret_cast<uint64_t &>(sampler_state->sampler), VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT});
}
// Create binding link between given image node and command buffer node
void AddCommandBufferBindingImage(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, IMAGE_STATE *image_state) {
// Skip validation if this image was created through WSI
if (image_state->binding.mem != MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
// First update CB binding in MemObj mini CB list
for (auto mem_binding : image_state->GetBoundMemory()) {
DEVICE_MEM_INFO *pMemInfo = GetMemObjInfo(dev_data, mem_binding);
if (pMemInfo) {
pMemInfo->cb_bindings.insert(cb_node);
// Now update CBInfo's Mem reference list
cb_node->memObjs.insert(mem_binding);
}
}
// Now update cb binding for image
cb_node->object_bindings.insert({reinterpret_cast<uint64_t &>(image_state->image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT});
image_state->cb_bindings.insert(cb_node);
}
}
// Create binding link between given image view node and its image with command buffer node
void AddCommandBufferBindingImageView(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, IMAGE_VIEW_STATE *view_state) {
// First add bindings for imageView
view_state->cb_bindings.insert(cb_node);
cb_node->object_bindings.insert(
{reinterpret_cast<uint64_t &>(view_state->image_view), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT});
auto image_state = GetImageState(dev_data, view_state->create_info.image);
// Add bindings for image within imageView
if (image_state) {
AddCommandBufferBindingImage(dev_data, cb_node, image_state);
}
}
// Create binding link between given buffer node and command buffer node
void AddCommandBufferBindingBuffer(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, BUFFER_STATE *buffer_state) {
// First update CB binding in MemObj mini CB list
for (auto mem_binding : buffer_state->GetBoundMemory()) {
DEVICE_MEM_INFO *pMemInfo = GetMemObjInfo(dev_data, mem_binding);
if (pMemInfo) {
pMemInfo->cb_bindings.insert(cb_node);
// Now update CBInfo's Mem reference list
cb_node->memObjs.insert(mem_binding);
}
}
// Now update cb binding for buffer
cb_node->object_bindings.insert({reinterpret_cast<uint64_t &>(buffer_state->buffer), VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT});
buffer_state->cb_bindings.insert(cb_node);
}
// Create binding link between given buffer view node and its buffer with command buffer node
void AddCommandBufferBindingBufferView(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, BUFFER_VIEW_STATE *view_state) {
// First add bindings for bufferView
view_state->cb_bindings.insert(cb_node);
cb_node->object_bindings.insert(
{reinterpret_cast<uint64_t &>(view_state->buffer_view), VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT});
auto buffer_state = GetBufferState(dev_data, view_state->create_info.buffer);
// Add bindings for buffer within bufferView
if (buffer_state) {
AddCommandBufferBindingBuffer(dev_data, cb_node, buffer_state);
}
}
// For every mem obj bound to particular CB, free bindings related to that CB
static void clear_cmd_buf_and_mem_references(layer_data *dev_data, GLOBAL_CB_NODE *cb_node) {
if (cb_node) {
if (cb_node->memObjs.size() > 0) {
for (auto mem : cb_node->memObjs) {
DEVICE_MEM_INFO *pInfo = GetMemObjInfo(dev_data, mem);
if (pInfo) {
pInfo->cb_bindings.erase(cb_node);
}
}
cb_node->memObjs.clear();
}
cb_node->validate_functions.clear();
}
}
// Overloaded call to above function when GLOBAL_CB_NODE has not already been looked-up
static void clear_cmd_buf_and_mem_references(layer_data *dev_data, const VkCommandBuffer cb) {
clear_cmd_buf_and_mem_references(dev_data, GetCBNode(dev_data, cb));
}
// Clear a single object binding from given memory object, or report error if binding is missing
static bool ClearMemoryObjectBinding(layer_data *dev_data, uint64_t handle, VkDebugReportObjectTypeEXT type, VkDeviceMemory mem) {
DEVICE_MEM_INFO *mem_info = GetMemObjInfo(dev_data, mem);
// This obj is bound to a memory object. Remove the reference to this object in that memory object's list
if (mem_info) {
mem_info->obj_bindings.erase({handle, type});
}
return false;
}
// ClearMemoryObjectBindings clears the binding of objects to memory
// For the given object it pulls the memory bindings and makes sure that the bindings
// no longer refer to the object being cleared. This occurs when objects are destroyed.
bool ClearMemoryObjectBindings(layer_data *dev_data, uint64_t handle, VkDebugReportObjectTypeEXT type) {
bool skip = false;
BINDABLE *mem_binding = GetObjectMemBinding(dev_data, handle, type);
if (mem_binding) {
if (!mem_binding->sparse) {
skip = ClearMemoryObjectBinding(dev_data, handle, type, mem_binding->binding.mem);
} else { // Sparse, clear all bindings
for (auto &sparse_mem_binding : mem_binding->sparse_bindings) {
skip |= ClearMemoryObjectBinding(dev_data, handle, type, sparse_mem_binding.mem);
}
}
}
return skip;
}
// For given mem object, verify that it is not null or UNBOUND, if it is, report error. Return skip value.
bool VerifyBoundMemoryIsValid(const layer_data *dev_data, VkDeviceMemory mem, uint64_t handle, const char *api_name,
const char *type_name, UNIQUE_VALIDATION_ERROR_CODE error_code) {
bool result = false;
if (VK_NULL_HANDLE == mem) {
result = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, handle,
__LINE__, error_code, "MEM", "%s: Vk%s object 0x%" PRIxLEAST64
" used with no memory bound. Memory should be bound by calling "
"vkBind%sMemory(). %s",
api_name, type_name, handle, type_name, validation_error_map[error_code]);
} else if (MEMORY_UNBOUND == mem) {
result = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, handle,
__LINE__, error_code, "MEM", "%s: Vk%s object 0x%" PRIxLEAST64
" used with no memory bound and previously bound memory was freed. "
"Memory must not be freed prior to this operation. %s",
api_name, type_name, handle, validation_error_map[error_code]);
}
return result;
}
// Check to see if memory was ever bound to this image
bool ValidateMemoryIsBoundToImage(const layer_data *dev_data, const IMAGE_STATE *image_state, const char *api_name,
UNIQUE_VALIDATION_ERROR_CODE error_code) {
bool result = false;
if (0 == (static_cast<uint32_t>(image_state->createInfo.flags) & VK_IMAGE_CREATE_SPARSE_BINDING_BIT)) {
result = VerifyBoundMemoryIsValid(dev_data, image_state->binding.mem,
reinterpret_cast<const uint64_t &>(image_state->image), api_name, "Image", error_code);
}
return result;
}
// Check to see if memory was bound to this buffer
bool ValidateMemoryIsBoundToBuffer(const layer_data *dev_data, const BUFFER_STATE *buffer_state, const char *api_name,
UNIQUE_VALIDATION_ERROR_CODE error_code) {
bool result = false;
if (0 == (static_cast<uint32_t>(buffer_state->createInfo.flags) & VK_BUFFER_CREATE_SPARSE_BINDING_BIT)) {
result = VerifyBoundMemoryIsValid(dev_data, buffer_state->binding.mem,
reinterpret_cast<const uint64_t &>(buffer_state->buffer), api_name, "Buffer", error_code);
}
return result;
}
// SetMemBinding is used to establish immutable, non-sparse binding between a single image/buffer object and memory object.
// Corresponding valid usage checks are in ValidateSetMemBinding().
static void SetMemBinding(layer_data *dev_data, VkDeviceMemory mem, uint64_t handle, VkDebugReportObjectTypeEXT type,
const char *apiName) {
if (mem != VK_NULL_HANDLE) {
BINDABLE *mem_binding = GetObjectMemBinding(dev_data, handle, type);
assert(mem_binding);
DEVICE_MEM_INFO *mem_info = GetMemObjInfo(dev_data, mem);
if (mem_info) {
mem_info->obj_bindings.insert({handle, type});
// For image objects, make sure default memory state is correctly set
// TODO : What's the best/correct way to handle this?
if (VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT == type) {
auto const image_state = GetImageState(dev_data, VkImage(handle));
if (image_state) {
VkImageCreateInfo ici = image_state->createInfo;
if (ici.usage & (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) {
// TODO:: More memory state transition stuff.
}
}
}
mem_binding->binding.mem = mem;
}
}
}
// Valid usage checks for a call to SetMemBinding().
// For NULL mem case, output warning
// Make sure given object is in global object map
// IF a previous binding existed, output validation error
// Otherwise, add reference from objectInfo to memoryInfo
// Add reference off of objInfo
// TODO: We may need to refactor or pass in multiple valid usage statements to handle multiple valid usage conditions.
static bool ValidateSetMemBinding(layer_data *dev_data, VkDeviceMemory mem, uint64_t handle, VkDebugReportObjectTypeEXT type,
const char *apiName) {
bool skip_call = false;
// It's an error to bind an object to NULL memory
if (mem != VK_NULL_HANDLE) {
BINDABLE *mem_binding = GetObjectMemBinding(dev_data, handle, type);
assert(mem_binding);
if (mem_binding->sparse) {
UNIQUE_VALIDATION_ERROR_CODE error_code = VALIDATION_ERROR_00804;
const char *handle_type = "IMAGE";
if (strcmp(apiName, "vkBindBufferMemory()") == 0) {
error_code = VALIDATION_ERROR_00792;
handle_type = "BUFFER";
} else {
assert(strcmp(apiName, "vkBindImageMemory()") == 0);
}
skip_call |=
log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
reinterpret_cast<uint64_t &>(mem), __LINE__, error_code, "MEM",
"In %s, attempting to bind memory (0x%" PRIxLEAST64 ") to object (0x%" PRIxLEAST64
") which was created with sparse memory flags (VK_%s_CREATE_SPARSE_*_BIT). %s",
apiName, reinterpret_cast<uint64_t &>(mem), handle, handle_type, validation_error_map[error_code]);
}
DEVICE_MEM_INFO *mem_info = GetMemObjInfo(dev_data, mem);
if (mem_info) {
DEVICE_MEM_INFO *prev_binding = GetMemObjInfo(dev_data, mem_binding->binding.mem);
if (prev_binding) {
UNIQUE_VALIDATION_ERROR_CODE error_code = VALIDATION_ERROR_00803;
if (strcmp(apiName, "vkBindBufferMemory()") == 0) {
error_code = VALIDATION_ERROR_00791;
} else {
assert(strcmp(apiName, "vkBindImageMemory()") == 0);
}
skip_call |=
log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
reinterpret_cast<uint64_t &>(mem), __LINE__, error_code, "MEM",
"In %s, attempting to bind memory (0x%" PRIxLEAST64 ") to object (0x%" PRIxLEAST64
") which has already been bound to mem object 0x%" PRIxLEAST64 ". %s",
apiName, reinterpret_cast<uint64_t &>(mem), handle, reinterpret_cast<uint64_t &>(prev_binding->mem),
validation_error_map[error_code]);
} else if (mem_binding->binding.mem == MEMORY_UNBOUND) {
skip_call |=
log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
reinterpret_cast<uint64_t &>(mem), __LINE__, MEMTRACK_REBIND_OBJECT, "MEM",
"In %s, attempting to bind memory (0x%" PRIxLEAST64 ") to object (0x%" PRIxLEAST64
") which was previous bound to memory that has since been freed. Memory bindings are immutable in "
"Vulkan so this attempt to bind to new memory is not allowed.",
apiName, reinterpret_cast<uint64_t &>(mem), handle);
}
}
}
return skip_call;
}
// For NULL mem case, clear any previous binding Else...
// Make sure given object is in its object map
// IF a previous binding existed, update binding
// Add reference from objectInfo to memoryInfo
// Add reference off of object's binding info
// Return VK_TRUE if addition is successful, VK_FALSE otherwise
static bool SetSparseMemBinding(layer_data *dev_data, MEM_BINDING binding, uint64_t handle, VkDebugReportObjectTypeEXT type,
const char *apiName) {
bool skip_call = VK_FALSE;
// Handle NULL case separately, just clear previous binding & decrement reference
if (binding.mem == VK_NULL_HANDLE) {
// TODO : This should cause the range of the resource to be unbound according to spec
} else {
BINDABLE *mem_binding = GetObjectMemBinding(dev_data, handle, type);
assert(mem_binding);
assert(mem_binding->sparse);
DEVICE_MEM_INFO *mem_info = GetMemObjInfo(dev_data, binding.mem);
if (mem_info) {
mem_info->obj_bindings.insert({handle, type});
// Need to set mem binding for this object
mem_binding->sparse_bindings.insert(binding);
}
}
return skip_call;
}
// Return a string representation of CMD_TYPE enum
static string cmdTypeToString(CMD_TYPE cmd) {
switch (cmd) {
case CMD_BINDPIPELINE:
return "CMD_BINDPIPELINE";
case CMD_BINDPIPELINEDELTA:
return "CMD_BINDPIPELINEDELTA";
case CMD_SETVIEWPORTSTATE:
return "CMD_SETVIEWPORTSTATE";
case CMD_SETLINEWIDTHSTATE:
return "CMD_SETLINEWIDTHSTATE";
case CMD_SETDEPTHBIASSTATE:
return "CMD_SETDEPTHBIASSTATE";
case CMD_SETBLENDSTATE:
return "CMD_SETBLENDSTATE";
case CMD_SETDEPTHBOUNDSSTATE:
return "CMD_SETDEPTHBOUNDSSTATE";
case CMD_SETSTENCILREADMASKSTATE:
return "CMD_SETSTENCILREADMASKSTATE";
case CMD_SETSTENCILWRITEMASKSTATE:
return "CMD_SETSTENCILWRITEMASKSTATE";
case CMD_SETSTENCILREFERENCESTATE:
return "CMD_SETSTENCILREFERENCESTATE";
case CMD_BINDDESCRIPTORSETS:
return "CMD_BINDDESCRIPTORSETS";
case CMD_BINDINDEXBUFFER:
return "CMD_BINDINDEXBUFFER";
case CMD_BINDVERTEXBUFFER:
return "CMD_BINDVERTEXBUFFER";
case CMD_DRAW:
return "CMD_DRAW";
case CMD_DRAWINDEXED:
return "CMD_DRAWINDEXED";
case CMD_DRAWINDIRECT:
return "CMD_DRAWINDIRECT";
case CMD_DRAWINDEXEDINDIRECT:
return "CMD_DRAWINDEXEDINDIRECT";
case CMD_DISPATCH:
return "CMD_DISPATCH";
case CMD_DISPATCHINDIRECT:
return "CMD_DISPATCHINDIRECT";
case CMD_COPYBUFFER:
return "CMD_COPYBUFFER";
case CMD_COPYIMAGE:
return "CMD_COPYIMAGE";
case CMD_BLITIMAGE:
return "CMD_BLITIMAGE";
case CMD_COPYBUFFERTOIMAGE:
return "CMD_COPYBUFFERTOIMAGE";
case CMD_COPYIMAGETOBUFFER:
return "CMD_COPYIMAGETOBUFFER";
case CMD_CLONEIMAGEDATA:
return "CMD_CLONEIMAGEDATA";
case CMD_UPDATEBUFFER:
return "CMD_UPDATEBUFFER";
case CMD_FILLBUFFER:
return "CMD_FILLBUFFER";
case CMD_CLEARCOLORIMAGE:
return "CMD_CLEARCOLORIMAGE";
case CMD_CLEARATTACHMENTS:
return "CMD_CLEARCOLORATTACHMENT";
case CMD_CLEARDEPTHSTENCILIMAGE:
return "CMD_CLEARDEPTHSTENCILIMAGE";
case CMD_RESOLVEIMAGE:
return "CMD_RESOLVEIMAGE";
case CMD_SETEVENT:
return "CMD_SETEVENT";
case CMD_RESETEVENT:
return "CMD_RESETEVENT";
case CMD_WAITEVENTS:
return "CMD_WAITEVENTS";
case CMD_PIPELINEBARRIER:
return "CMD_PIPELINEBARRIER";
case CMD_BEGINQUERY:
return "CMD_BEGINQUERY";
case CMD_ENDQUERY:
return "CMD_ENDQUERY";
case CMD_RESETQUERYPOOL:
return "CMD_RESETQUERYPOOL";
case CMD_COPYQUERYPOOLRESULTS:
return "CMD_COPYQUERYPOOLRESULTS";
case CMD_WRITETIMESTAMP:
return "CMD_WRITETIMESTAMP";
case CMD_INITATOMICCOUNTERS:
return "CMD_INITATOMICCOUNTERS";
case CMD_LOADATOMICCOUNTERS:
return "CMD_LOADATOMICCOUNTERS";
case CMD_SAVEATOMICCOUNTERS:
return "CMD_SAVEATOMICCOUNTERS";
case CMD_BEGINRENDERPASS:
return "CMD_BEGINRENDERPASS";
case CMD_ENDRENDERPASS:
return "CMD_ENDRENDERPASS";
default:
return "UNKNOWN";
}
}
// SPIRV utility functions
static void build_def_index(shader_module *module) {
for (auto insn : *module) {
switch (insn.opcode()) {
// Types
case spv::OpTypeVoid:
case spv::OpTypeBool:
case spv::OpTypeInt:
case spv::OpTypeFloat:
case spv::OpTypeVector:
case spv::OpTypeMatrix:
case spv::OpTypeImage:
case spv::OpTypeSampler:
case spv::OpTypeSampledImage:
case spv::OpTypeArray:
case spv::OpTypeRuntimeArray:
case spv::OpTypeStruct:
case spv::OpTypeOpaque:
case spv::OpTypePointer:
case spv::OpTypeFunction:
case spv::OpTypeEvent:
case spv::OpTypeDeviceEvent:
case spv::OpTypeReserveId:
case spv::OpTypeQueue:
case spv::OpTypePipe:
module->def_index[insn.word(1)] = insn.offset();
break;
// Fixed constants
case spv::OpConstantTrue:
case spv::OpConstantFalse:
case spv::OpConstant:
case spv::OpConstantComposite:
case spv::OpConstantSampler:
case spv::OpConstantNull:
module->def_index[insn.word(2)] = insn.offset();
break;
// Specialization constants
case spv::OpSpecConstantTrue:
case spv::OpSpecConstantFalse:
case spv::OpSpecConstant:
case spv::OpSpecConstantComposite:
case spv::OpSpecConstantOp:
module->def_index[insn.word(2)] = insn.offset();
break;
// Variables
case spv::OpVariable:
module->def_index[insn.word(2)] = insn.offset();
break;
// Functions
case spv::OpFunction:
module->def_index[insn.word(2)] = insn.offset();
break;
default:
// We don't care about any other defs for now.
break;
}
}
}
static spirv_inst_iter find_entrypoint(shader_module *src, char const *name, VkShaderStageFlagBits stageBits) {
for (auto insn : *src) {
if (insn.opcode() == spv::OpEntryPoint) {
auto entrypointName = (char const *)&insn.word(3);
auto entrypointStageBits = 1u << insn.word(1);