forked from cg-tuwien/Auto-Vk
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathsync.cpp
More file actions
393 lines (361 loc) · 18.3 KB
/
sync.cpp
File metadata and controls
393 lines (361 loc) · 18.3 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
#include <avk/avk.hpp>
namespace avk
{
command_pool sync::sPoolToAllocCommandBuffersFrom;
queue* sync::sQueueToUse;
void sync::presets::default_handler_before_operation(command_buffer_t& aCommandBuffer, pipeline_stage aDestinationStage, std::optional<read_memory_access> aDestinationAccess)
{
// We do not know which operation came before. Hence, we have to be overly cautious and
// establish a (possibly) hefty barrier w.r.t. write access that happened before.
aCommandBuffer.establish_global_memory_barrier_rw(
pipeline_stage::all_commands, aDestinationStage, // Wait for all previous command before continuing with the operation's command
write_memory_access{memory_access::any_write_access}, aDestinationAccess // Make any write access available before making the operation's read access type visible
);
}
void sync::presets::default_handler_after_operation(command_buffer_t& aCommandBuffer, pipeline_stage aSourceStage, std::optional<write_memory_access> aSourceAccess)
{
// We do not know which operation comes after. Hence, we have to be overly cautious and
// establish a (possibly) hefty barrier w.r.t. read access that happens after.
aCommandBuffer.establish_global_memory_barrier_rw(
aSourceStage, pipeline_stage::all_commands, // All subsequent stages have to wait until the operation has completed
aSourceAccess, read_memory_access{memory_access::any_read_access} // Make the operation's writes available and visible to all memory stages
);
}
avk::unique_function<void(command_buffer_t&, pipeline_stage, std::optional<read_memory_access>)> sync::presets::image_copy::wait_for_previous_operations(avk::image_t& aSourceImage, avk::image_t& aDestinationImage)
{
return [&aSourceImage, &aDestinationImage](command_buffer_t& aCommandBuffer, pipeline_stage aDestinationStage, std::optional<read_memory_access> aDestinationAccess) {
// Must transfer the swap chain image's layout:
aDestinationImage.set_target_layout(vk::ImageLayout::eTransferDstOptimal);
aCommandBuffer.establish_image_memory_barrier(
aDestinationImage,
pipeline_stage::top_of_pipe, // Wait for nothing
pipeline_stage::transfer, // Unblock TRANSFER after the layout transition is done
std::optional<memory_access>{}, // No pending writes to flush out
memory_access::transfer_write_access // Transfer write access must have all required memory visible
);
// But, IMPORTANT: must also wait for writing to the image to complete!
aSourceImage.set_target_layout(vk::ImageLayout::eTransferDstOptimal);
aCommandBuffer.establish_image_memory_barrier_rw(
aSourceImage,
pipeline_stage::all_commands, /* -> */ aDestinationStage, // Wait for all previous command before continuing with the operation's command
write_memory_access{memory_access::any_write_access}, // Make any write access available, ...
aDestinationAccess // ... before making the operation's read access type visible
);
};
}
avk::unique_function<void(command_buffer_t&, pipeline_stage, std::optional<write_memory_access>)> sync::presets::image_copy::let_subsequent_operations_wait(avk::image_t& aSourceImage, avk::image_t& aDestinationImage)
{
return [&aSourceImage, &aDestinationImage, originalLayout = aDestinationImage.current_layout()](command_buffer_t& aCommandBuffer, pipeline_stage aSourceStage, std::optional<write_memory_access> aSourceAccess){
assert(vk::ImageLayout::eTransferDstOptimal == aDestinationImage.current_layout());
aDestinationImage.set_target_layout(vk::ImageLayout::eColorAttachmentOptimal); // From transfer-dst into color attachment optimal for further rendering
aCommandBuffer.establish_image_memory_barrier(
aDestinationImage,
pipeline_stage::transfer, // When the TRANSFER has completed
pipeline_stage::all_commands, // Afterwards come further commands
memory_access::transfer_write_access, // Copied memory must be available
memory_access::any_access // Data must be visible to any read and before any write access
);
};
}
avk::unique_function<void(command_buffer_t&, pipeline_stage, std::optional<write_memory_access>)> sync::presets::image_copy::directly_into_present(avk::image_t& aSourceImage, avk::image_t& aDestinationImage)
{
return [&aSourceImage, &aDestinationImage](command_buffer_t& aCommandBuffer, pipeline_stage aSourceStage, std::optional<write_memory_access> aSourceAccess){
assert(vk::ImageLayout::eTransferDstOptimal == aDestinationImage.current_layout());
aDestinationImage.set_target_layout(vk::ImageLayout::ePresentSrcKHR); // From transfer-dst into present-src layout
aCommandBuffer.establish_image_memory_barrier(
aDestinationImage,
pipeline_stage::transfer, // When the TRANSFER has completed
pipeline_stage::bottom_of_pipe, // Afterwards comes the semaphore -> present
memory_access::transfer_write_access, // Copied memory must be available
std::optional<memory_access>{} // Present does not need any memory access specified, it's synced with a semaphore anyways.
);
// No further sync required
};
}
sync::sync(sync&& aOther) noexcept
: mSpecialSync{ std::move(aOther.mSpecialSync) }
, mCommandbufferRequest { std::move(aOther.mCommandbufferRequest) }
, mSemaphoreLifetimeHandler{ std::move(aOther.mSemaphoreLifetimeHandler) }
, mWaitBeforeSemaphores{ std::move(aOther.mWaitBeforeSemaphores) }
, mCommandBufferRefOrLifetimeHandler{ std::move(aOther.mCommandBufferRefOrLifetimeHandler) }
, mCommandBuffer{ std::move(aOther.mCommandBuffer) }
, mEstablishBarrierBeforeOperationCallback{ std::move(aOther.mEstablishBarrierBeforeOperationCallback) }
, mEstablishBarrierAfterOperationCallback{ std::move(aOther.mEstablishBarrierAfterOperationCallback) }
, mQueueToUse{ std::move(aOther.mQueueToUse) }
, mQueueRecommendation{ std::move(aOther.mQueueRecommendation) }
{
aOther.mSpecialSync = sync_type::not_required;
aOther.mSemaphoreLifetimeHandler = {};
aOther.mWaitBeforeSemaphores.clear();
aOther.mCommandBufferRefOrLifetimeHandler = {};
aOther.mCommandBuffer.reset();
aOther.mEstablishBarrierBeforeOperationCallback = {};
aOther.mEstablishBarrierAfterOperationCallback = {};
aOther.mQueueToUse.reset();
aOther.mQueueRecommendation.reset();
}
sync& sync::operator=(sync&& aOther) noexcept
{
mSpecialSync = std::move(aOther.mSpecialSync);
mCommandbufferRequest = std::move(aOther.mCommandbufferRequest);
mSemaphoreLifetimeHandler = std::move(aOther.mSemaphoreLifetimeHandler);
mWaitBeforeSemaphores = std::move(aOther.mWaitBeforeSemaphores);
mCommandBufferRefOrLifetimeHandler = std::move(aOther.mCommandBufferRefOrLifetimeHandler);
mCommandBuffer = std::move(aOther.mCommandBuffer);
mEstablishBarrierBeforeOperationCallback = std::move(aOther.mEstablishBarrierBeforeOperationCallback);
mEstablishBarrierAfterOperationCallback = std::move(aOther.mEstablishBarrierAfterOperationCallback);
mQueueToUse = std::move(aOther.mQueueToUse);
mQueueRecommendation = std::move(aOther.mQueueRecommendation);
aOther.mSpecialSync = sync_type::not_required;
aOther.mSemaphoreLifetimeHandler = {};
aOther.mWaitBeforeSemaphores.clear();
aOther.mCommandBufferRefOrLifetimeHandler = {};
aOther.mCommandBuffer.reset();
aOther.mEstablishBarrierBeforeOperationCallback = {};
aOther.mEstablishBarrierAfterOperationCallback = {};
aOther.mQueueToUse.reset();
aOther.mQueueRecommendation.reset();
return *this;
}
sync::~sync()
{
if (mCommandBuffer.has_value()) {
if (get_sync_type() == sync_type::by_return) {
AVK_LOG_ERROR("Sync is requested 'by_return', but command buffer has not been fetched.");
}
else {
AVK_LOG_ERROR("Command buffer has not been submitted but ak::sync instance is destructed. This must be a bug.");
}
}
#ifdef _DEBUG
if (mEstablishBarrierBeforeOperationCallback) {
AVK_LOG_DEBUG("The before-operation-barrier-callback has never been invoked for this ak::sync instance. This can be a bug, but it can be okay as well.");
}
if (mEstablishBarrierBeforeOperationCallback) {
AVK_LOG_DEBUG("The after-operation-barrier-callback has never been invoked for this ak::sync instance. This can be a bug, but it can be okay as well.");
}
#endif
}
sync sync::not_required()
{
sync result;
result.mSpecialSync = sync_type::not_required; // User explicitely stated that there's no sync required.
return result;
}
sync sync::wait_idle(bool aDontWarn)
{
sync result;
if (aDontWarn) {
result.mSpecialSync = sync_type::via_wait_idle_deliberately;
}
return result;
}
sync sync::auxiliary_with_barriers(
sync& aMasterSync,
unique_function<void(command_buffer_t&, pipeline_stage /* destination stage */, std::optional<read_memory_access> /* destination access */)> aEstablishBarrierBeforeOperation,
unique_function<void(command_buffer_t&, pipeline_stage /* source stage */, std::optional<write_memory_access> /* source access */)> aEstablishBarrierAfterOperation
)
{
// Perform some checks
const auto stealBeforeHandlerOnDemand = is_about_to_steal_before_handler_on_demand(aEstablishBarrierBeforeOperation);
const auto stealAfterHandlerOnDemand = is_about_to_steal_after_handler_on_demand(aEstablishBarrierAfterOperation);
const auto stealBeforeHandlerImmediately = is_about_to_steal_before_handler_immediately(aEstablishBarrierBeforeOperation);
const auto stealAfterHandlerImmediately = is_about_to_steal_after_handler_immediately(aEstablishBarrierAfterOperation);
assert(2 != (static_cast<int>(stealBeforeHandlerImmediately) + static_cast<int>(stealBeforeHandlerOnDemand)));
assert(2 != (static_cast<int>(stealAfterHandlerImmediately) + static_cast<int>(stealAfterHandlerOnDemand)));
// Possibly steal something
if (stealBeforeHandlerOnDemand) {
aEstablishBarrierBeforeOperation = [&aMasterSync](command_buffer_t& cb, pipeline_stage stage, std::optional<read_memory_access> access) {
// Execute and invalidate:
auto handler = std::move(aMasterSync.mEstablishBarrierBeforeOperationCallback);
aMasterSync.mEstablishBarrierBeforeOperationCallback = {};
if (handler) {
handler(cb, stage, access);
}
};
}
else if (stealBeforeHandlerImmediately) {
aEstablishBarrierBeforeOperation = std::move(aMasterSync.mEstablishBarrierBeforeOperationCallback);
aMasterSync.mEstablishBarrierBeforeOperationCallback = {};
}
if (stealAfterHandlerOnDemand) {
aEstablishBarrierAfterOperation = [&aMasterSync](command_buffer_t& cb, pipeline_stage stage, std::optional<write_memory_access> access) {
// Execute and invalidate:
auto handler = std::move(aMasterSync.mEstablishBarrierAfterOperationCallback);
aMasterSync.mEstablishBarrierAfterOperationCallback = {};
if (handler) {
handler(cb, stage, access);
}
};
}
else if (stealAfterHandlerImmediately) {
aEstablishBarrierAfterOperation = std::move(aMasterSync.mEstablishBarrierAfterOperationCallback);
aMasterSync.mEstablishBarrierAfterOperationCallback = {};
}
// Prepare a shiny new sync instance
sync result;
result.mCommandBufferRefOrLifetimeHandler = std::ref(aMasterSync.get_or_create_command_buffer()); // <-- Set the command buffer reference, not the lifetime handler
result.mEstablishBarrierAfterOperationCallback = std::move(aEstablishBarrierAfterOperation);
result.mEstablishBarrierBeforeOperationCallback = std::move(aEstablishBarrierBeforeOperation);
// Queues may not be used anyways by auxiliary sync instances:
result.mQueueToUse = {};
result.mQueueRecommendation = {};
return result;
}
sync& sync::on_queue(std::reference_wrapper<queue> aQueue)
{
mQueueToUse = aQueue;
return *this;
}
sync::sync_type sync::get_sync_type() const
{
if (mSemaphoreLifetimeHandler) {
return sync_type::via_semaphore;
}
if (!std::holds_alternative<std::monostate>(mCommandBufferRefOrLifetimeHandler)) {
return sync_type::via_barrier;
}
return mSpecialSync.has_value() ? mSpecialSync.value() : sync_type::via_wait_idle;
}
std::reference_wrapper<queue> sync::queue_to_use() const
{
//#if defined(_DEBUG) && LOG_LEVEL > 4
// if (!mQueueToUse.has_value()) {
// if (mQueueRecommendation.has_value()) {
// LOG_DEBUG_MEGA_VERBOSE(fmt::format("No queue specified => will submit to queue {} which was recommended by the operation. HTH.", mQueueRecommendation.value().get().queue_index()));
// }
// else {
// LOG_DEBUG_MEGA_VERBOSE("No queue specified => will submit to the graphics queue. HTH.");
// }
// }
//#endif
// //return mQueueToUse.value();
return *sQueueToUse;
}
sync& sync::create_reusable_commandbuffer()
{
mCommandbufferRequest = commandbuffer_request::reusable;
return *this;
}
sync& sync::create_single_use_commandbuffer()
{
mCommandbufferRequest = commandbuffer_request::single_use;
return *this;
}
command_buffer_t& sync::get_or_create_command_buffer()
{
if (std::holds_alternative<std::reference_wrapper<command_buffer_t>>(mCommandBufferRefOrLifetimeHandler)) {
return std::get<std::reference_wrapper<command_buffer_t>>(mCommandBufferRefOrLifetimeHandler).get();
}
if (!mCommandBuffer.has_value()) {
switch (mCommandbufferRequest) {
case commandbuffer_request::reusable:
mCommandBuffer = sPoolToAllocCommandBuffersFrom->alloc_command_buffer({});
break;
default:
mCommandBuffer = sPoolToAllocCommandBuffersFrom->alloc_command_buffer(vk::CommandBufferUsageFlagBits::eOneTimeSubmit);
break;
}
mCommandBuffer.value()->begin_recording(); // Immediately start recording
}
return mCommandBuffer.value();
}
//std::reference_wrapper<queue> sync::queue_to_transfer_to() const
//{
// return mQueueToTransferOwnershipTo.value_or(queue_to_use());
//}
//
//bool sync::queues_are_the_same() const
//{
// queue& q0 = queue_to_use();
// queue& q1 = queue_to_transfer_to();
// return q0 == q1;
//}
void sync::set_queue_hint(std::reference_wrapper<queue> aQueueRecommendation)
{
mQueueRecommendation = aQueueRecommendation;
}
void sync::establish_barrier_before_the_operation(pipeline_stage aDestinationPipelineStages, std::optional<read_memory_access> aDestinationMemoryStages)
{
if (!mEstablishBarrierBeforeOperationCallback) {
return; // nothing to do here
}
mEstablishBarrierBeforeOperationCallback(get_or_create_command_buffer(), aDestinationPipelineStages, aDestinationMemoryStages);
mEstablishBarrierBeforeOperationCallback = {};
}
void sync::establish_barrier_after_the_operation(pipeline_stage aSourcePipelineStages, std::optional<write_memory_access> aSourceMemoryStages)
{
if (!mEstablishBarrierAfterOperationCallback) {
return; // nothing to do here
}
mEstablishBarrierAfterOperationCallback(get_or_create_command_buffer(), aSourcePipelineStages, aSourceMemoryStages);
mEstablishBarrierAfterOperationCallback = {};
}
std::optional<command_buffer> sync::submit_and_sync()
{
queue& queue = queue_to_use();
auto syncType = get_sync_type();
switch (syncType) {
case sync_type::via_semaphore:
{
assert(mSemaphoreLifetimeHandler);
assert(mCommandBuffer.has_value());
mCommandBuffer.value()->establish_global_memory_barrier(
pipeline_stage::all_commands,
pipeline_stage::all_commands,
std::optional<memory_access>{memory_access::any_access},
std::optional<memory_access>{memory_access::any_access});
mCommandBuffer.value()->end_recording(); // What started in get_or_create_command_buffer() ends here.
auto sema = queue.submit_and_handle_with_semaphore(std::move(mCommandBuffer.value()), std::move(mWaitBeforeSemaphores));
mSemaphoreLifetimeHandler(std::move(sema)); // Transfer ownership and be done with it
mCommandBuffer.reset(); // Command buffer has been moved from. It's gone.
mWaitBeforeSemaphores.clear(); // Never ever use them again (they have been moved from)
}
break;
case sync_type::via_barrier:
assert(!std::holds_alternative<std::monostate>(mCommandBufferRefOrLifetimeHandler));
if (std::holds_alternative<unique_function<void(command_buffer)>>(mCommandBufferRefOrLifetimeHandler)) {
assert(mCommandBuffer.has_value());
mCommandBuffer.value()->end_recording(); // What started in get_or_create_command_buffer() ends here.
queue.submit(mCommandBuffer.value());
std::get<unique_function<void(command_buffer)>>(mCommandBufferRefOrLifetimeHandler)(std::move(mCommandBuffer.value())); // Transfer ownership and be done with it.
mCommandBuffer.reset(); // Command buffer has been moved from. It's gone.
}
else { // Must mean that we are an auxiliary sync handler
assert(std::holds_alternative<std::reference_wrapper<command_buffer_t>>(mCommandBufferRefOrLifetimeHandler));
// ... that means: Nothing to do here. Master sync will submit the command buffer.
}
break;
case sync_type::via_wait_idle:
AVK_LOG_WARNING("Performing waitIdle on queue " + std::to_string(queue_to_use().get().queue_index()) + " in order to sync because no other type of handler is present.");
case sync_type::via_wait_idle_deliberately:
assert(mCommandBuffer.has_value());
mCommandBuffer.value()->end_recording(); // What started in get_or_create_command_buffer() ends here.
queue.submit(mCommandBuffer.value());
queue_to_use().get().handle().waitIdle();
mCommandBuffer.reset(); // Command buffer is fully handled after waitIdle() and can be destroyed.
break;
case sync_type::not_required:
assert(false);
throw avk::runtime_error("You were wrong with your assumption that there was no sync required! => Provide a concrete sync strategy!");
case sync_type::by_return:
{
if (!mCommandBuffer.has_value()) {
throw avk::runtime_error("Something went wrong. There is no command buffer.");
}
mCommandBuffer.value()->end_recording(); // What started in get_or_create_command_buffer() ends here.
auto tmp = std::move(mCommandBuffer.value());
mCommandBuffer.reset();
return std::move(tmp);
}
case sync_type::by_existing_command_buffer:
// All good, everything's handled outside.
return {};
default:
assert(false);
throw avk::logic_error("unknown syncType");
}
assert(!mCommandBuffer.has_value());
return {};
}
}