forked from google/vk_callback_swapchain
-
Notifications
You must be signed in to change notification settings - Fork 0
/
layer.h
246 lines (200 loc) · 8.59 KB
/
layer.h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
/*
* Copyright (C) 2017 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef VK_CALLBACK_SWAPCHAIN_LAYER_H_
#define VK_CALLBACK_SWAPCHAIN_LAYER_H_
#include <unordered_map>
#include <vector>
#include "vulkan/vulkan.h"
#include "threading.h"
namespace swapchain {
// All of the instance data that is needed for book-keeping in a layer.
struct InstanceData {
PFN_vkGetInstanceProcAddr vkGetInstanceProcAddr;
PFN_vkDestroyInstance vkDestroyInstance;
PFN_vkEnumeratePhysicalDevices vkEnumeratePhysicalDevices;
PFN_vkEnumerateDeviceExtensionProperties vkEnumerateDeviceExtensionProperties;
PFN_vkCreateDevice vkCreateDevice;
PFN_vkGetPhysicalDeviceQueueFamilyProperties
vkGetPhysicalDeviceQueueFamilyProperties;
PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
// All of the physical devices associated with this instance.
std::vector<VkPhysicalDevice> physical_devices_;
};
// All of the command buffer data that is needed for book-keeping in our layer.
struct CommandBufferData {
VkDevice device_;
PFN_vkCmdPipelineBarrier vkCmdPipelineBarrier;
PFN_vkCmdWaitEvents vkCmdWaitEvents;
};
// All of the physical device data needed for book-keeping in our layer.
struct PhysicalDeviceData {
// The instance that this physical device belongs to.
VkInstance instance_;
VkPhysicalDeviceMemoryProperties memory_properties_;
VkPhysicalDeviceProperties physical_device_properties_;
};
// All of the device data we need for book-keeping.
struct DeviceData {
VkPhysicalDevice physicalDevice;
PFN_vkGetDeviceProcAddr vkGetDeviceProcAddr;
PFN_vkGetDeviceQueue vkGetDeviceQueue;
PFN_vkAllocateMemory vkAllocateMemory;
PFN_vkFreeMemory vkFreeMemory;
PFN_vkMapMemory vkMapMemory;
PFN_vkUnmapMemory vkUnmapMemory;
PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;
PFN_vkCreateFence vkCreateFence;
PFN_vkGetFenceStatus vkGetFenceStatus;
PFN_vkWaitForFences vkWaitForFences;
PFN_vkDestroyFence vkDestroyFence;
PFN_vkResetFences vkResetFences;
PFN_vkCreateImage vkCreateImage;
PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
PFN_vkBindImageMemory vkBindImageMemory;
PFN_vkDestroyImage vkDestroyImage;
PFN_vkCreateBuffer vkCreateBuffer;
PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
PFN_vkBindBufferMemory vkBindBufferMemory;
PFN_vkDestroyBuffer vkDestroyBuffer;
PFN_vkCreateCommandPool vkCreateCommandPool;
PFN_vkDestroyCommandPool vkDestroyCommandPool;
PFN_vkAllocateCommandBuffers vkAllocateCommandBuffers;
PFN_vkFreeCommandBuffers vkFreeCommandBuffers;
PFN_vkBeginCommandBuffer vkBeginCommandBuffer;
PFN_vkEndCommandBuffer vkEndCommandBuffer;
PFN_vkCmdCopyImageToBuffer vkCmdCopyImageToBuffer;
PFN_vkCmdPipelineBarrier vkCmdPipelineBarrier;
PFN_vkCmdWaitEvents vkCmdWaitEvents;
PFN_vkCreateRenderPass vkCreateRenderPass;
PFN_vkQueueSubmit vkQueueSubmit;
PFN_vkDestroyDevice vkDestroyDevice;
};
struct QueueData {
VkDevice device_;
PFN_vkQueueSubmit vkQueueSubmit;
};
// All context functions return a context token.
// Any data within a ContextToken is only valid
// for the lifetime of the ContextToken.
template <typename T>
struct ContextToken {
ContextToken(T& object, threading::mutex& locker)
: object_(object), context_lock_(locker) {}
ContextToken(T& object, std::unique_lock<threading::mutex>&& locker)
: object_(object), context_lock_(std::move(locker)) {}
ContextToken(ContextToken&& _other)
: object_(_other.object_),
context_lock_(std::move(_other.context_lock_)) {}
ContextToken(const ContextToken& _other) = delete;
ContextToken& operator=(const ContextToken& _other) = delete;
const T* operator->() const { return &object_; }
const T& operator*() const { return object_; }
T* operator->() { return &object_; }
T& operator*() { return object_; }
private:
std::unique_lock<threading::mutex> context_lock_;
T& object_;
};
// In order to prevent dead-locks in the presence of
// needing multiple maps/objects, All of these
// should always be acquired in order from the most
// specific to the least specific.
// CommandBuffer->Queue->Device->PhysicalDevice->Instance
// Is is valid to acquire only a subset (Queue->PhysicalDevice),
// but never valid to acquire them in the reverse order.
struct Context {
~Context() {}
using InstanceMap = std::unordered_map<VkInstance, InstanceData>;
using CommandBufferMap =
std::unordered_map<VkCommandBuffer, CommandBufferData>;
using PhysicalDeviceMap =
std::unordered_map<VkPhysicalDevice, PhysicalDeviceData>;
using QueueMap = std::unordered_map<VkQueue, QueueData>;
using DeviceMap = std::unordered_map<VkDevice, DeviceData>;
ContextToken<InstanceMap> GetInstanceMap() {
return ContextToken<InstanceMap>(instance_data_map_, instance_lock_);
}
ContextToken<CommandBufferMap> GetCommandBufferMap() {
return ContextToken<CommandBufferMap>(command_buffer_data_map_,
command_buffer_lock_);
}
ContextToken<QueueMap> GetQueueMap() {
return ContextToken<QueueMap>(queue_data_map_, queue_lock_);
}
ContextToken<PhysicalDeviceMap> GetPhysicalDeviceMap() {
return ContextToken<PhysicalDeviceMap>(physical_device_data_map_,
physical_device_lock_);
}
ContextToken<DeviceMap> GetDeviceMap() {
return ContextToken<DeviceMap>(device_data_map_, device_lock_);
}
ContextToken<InstanceData> GetInstanceData(VkInstance instance) {
std::unique_lock<threading::mutex> locker(instance_lock_);
return ContextToken<InstanceData>(instance_data_map_.at(instance),
std::move(locker));
}
ContextToken<CommandBufferData> GetCommandBufferData(VkCommandBuffer buffer) {
std::unique_lock<threading::mutex> locker(command_buffer_lock_);
return ContextToken<CommandBufferData>(command_buffer_data_map_.at(buffer),
std::move(locker));
}
ContextToken<QueueData> GetQueueData(VkQueue queue) {
std::unique_lock<threading::mutex> locker(queue_lock_);
return ContextToken<QueueData>(queue_data_map_.at(queue),
std::move(locker));
}
ContextToken<PhysicalDeviceData> GetPhysicalDeviceData(
VkPhysicalDevice physical_device) {
std::unique_lock<threading::mutex> locker(physical_device_lock_);
return ContextToken<PhysicalDeviceData>(
physical_device_data_map_.at(physical_device), std::move(locker));
}
ContextToken<DeviceData> GetDeviceData(VkDevice device) {
std::unique_lock<threading::mutex> locker(device_lock_);
return ContextToken<DeviceData>(device_data_map_.at(device),
std::move(locker));
}
private:
// Map of instances to their data. Our other option would be
// to wrap the instance object, but then we have to handle every possible
// instance function.
InstanceMap instance_data_map_;
// Lock for use when reading/writing from kInstanceDataMap.
threading::mutex instance_lock_;
// The global map of command buffers to their data.
CommandBufferMap command_buffer_data_map_;
// Lock for use when reading/writing from kCommandBufferDataMap.
threading::mutex command_buffer_lock_;
// The global map of physical devices to their data.
// This should be locked along with the related instance.
PhysicalDeviceMap physical_device_data_map_;
threading::mutex physical_device_lock_;
// A map from queues to their devices.
QueueMap queue_data_map_;
// A lock around queue operations. This is needed in the callback swapchain
// because we have to insert into a queue, but cannot guarantee that
// another application operation is not submitting to the queue. In this case
// we lock around all queue operations.
threading::mutex queue_lock_;
// The global map of devices to their data.
DeviceMap device_data_map_;
// Lock for use when reading/writing from kDeviceDataMap.
threading::mutex device_lock_;
};
Context& GetGlobalContext();
} // namespace swapchain
#endif // VK_CALLBACK_SWAPCHAIN_LAYER_H