Vulkan backend work

This commit is contained in:
ksuprynowicz 2024-09-08 09:50:22 +02:00 committed by Karol Suprynowicz
parent c0351af854
commit be1551c21c
10 changed files with 1233 additions and 129 deletions

File diff suppressed because it is too large Load diff

View file

@ -28,32 +28,174 @@
#include "VKForward.h"
#include "../../../../vk/src/vk/Context.h"
//#define GPU_STEREO_TECHNIQUE_DOUBLED_SMARTER
#define GPU_STEREO_TECHNIQUE_INSTANCED
// Let these be configured by the one define picked above
#ifdef GPU_STEREO_TECHNIQUE_DOUBLED_SIMPLE
#define GPU_STEREO_DRAWCALL_DOUBLED
#endif
#ifdef GPU_STEREO_TECHNIQUE_DOUBLED_SMARTER
#define GPU_STEREO_DRAWCALL_DOUBLED
#define GPU_STEREO_CAMERA_BUFFER
#endif
#ifdef GPU_STEREO_TECHNIQUE_INSTANCED
#define GPU_STEREO_DRAWCALL_INSTANCED
#define GPU_STEREO_CAMERA_BUFFER
#endif
namespace gpu { namespace vulkan {
class VKInputFormat : public GPUObject {
public:
static VKInputFormat* sync(const Stream::Format& inputFormat);
VKInputFormat();
~VKInputFormat();
std::string key;
};
class VKBackend : public Backend, public std::enable_shared_from_this<VKBackend> {
// Context Backend static interface required
friend class gpu::Context;
static void init();
static BackendPointer createBackend();
protected:
// Allows for correction of the camera pose to account for changes
// between the time when a was recorded and the time(s) when it is
// executed
// Prev is the previous correction used at previous frame
struct CameraCorrection {
mat4 correction;
mat4 correctionInverse;
mat4 prevView;
mat4 prevViewInverse;
};
struct TransformStageState {
#ifdef GPU_STEREO_CAMERA_BUFFER
struct Cameras {
TransformCamera _cams[2];
Cameras(){};
Cameras(const TransformCamera& cam) { _cams[0] = cam; };
Cameras(const TransformCamera& camL, const TransformCamera& camR) {
_cams[0] = camL;
_cams[1] = camR;
};
};
using CameraBufferElement = Cameras;
#else
using CameraBufferElement = TransformCamera;
#endif
using TransformCameras = std::vector<CameraBufferElement>;
TransformCamera _camera;
TransformCameras _cameras;
mutable std::map<std::string, void*> _drawCallInfoOffsets;
uint32_t _objectBuffer{ 0 };
uint32_t _cameraBuffer{ 0 };
uint32_t _drawCallInfoBuffer{ 0 };
uint32_t _objectBufferTexture{ 0 };
size_t _cameraUboSize{ 0 };
bool _viewIsCamera{ false };
bool _skybox{ false };
Transform _view;
CameraCorrection _correction;
bool _viewCorrectionEnabled{ true };
Mat4 _projection;
Vec4i _viewport{ 0, 0, 1, 1 };
Vec2 _depthRange{ 0.0f, 1.0f };
Vec2 _projectionJitter{ 0.0f, 0.0f };
bool _invalidView{ false };
bool _invalidProj{ false };
bool _invalidViewport{ false };
bool _enabledDrawcallInfoBuffer{ false };
using Pair = std::pair<size_t, size_t>;
using List = std::list<Pair>;
List _cameraOffsets;
mutable List::const_iterator _camerasItr;
mutable size_t _currentCameraOffset{ INVALID_OFFSET };
void preUpdate(size_t commandIndex, const StereoState& stereo, Vec2u framebufferSize);
void update(size_t commandIndex, const StereoState& stereo) const;
void bindCurrentCamera(int stereoSide) const;
} _transform;
static const int MAX_NUM_ATTRIBUTES = Stream::NUM_INPUT_SLOTS;
// The drawcall Info attribute channel is reserved and is the upper bound for the number of availables Input buffers
static const int MAX_NUM_INPUT_BUFFERS = Stream::DRAW_CALL_INFO;
struct InputStageState {
bool _invalidFormat { true };
bool _lastUpdateStereoState { false };
FormatReference _format { GPU_REFERENCE_INIT_VALUE };
std::string _formatKey;
typedef std::bitset<MAX_NUM_ATTRIBUTES> ActivationCache;
ActivationCache _attributeActivation { 0 };
typedef std::bitset<MAX_NUM_INPUT_BUFFERS> BuffersState;
BuffersState _invalidBuffers { 0 };
BuffersState _attribBindingBuffers { 0 };
std::array<BufferReference, MAX_NUM_INPUT_BUFFERS> _buffers;
std::array<Offset, MAX_NUM_INPUT_BUFFERS> _bufferOffsets;
std::array<Offset, MAX_NUM_INPUT_BUFFERS> _bufferStrides;
std::array<uint32_t, MAX_NUM_INPUT_BUFFERS> _bufferVBOs;
BufferReference _indexBuffer;
Offset _indexBufferOffset { 0 };
Type _indexBufferType { UINT32 };
BufferReference _indirectBuffer;
Offset _indirectBufferOffset { 0 };
Offset _indirectBufferStride { 0 };
uint32_t _defaultVAO { 0 };
} _input;
void draw(VkPrimitiveTopology mode, uint32 numVertices, uint32 startVertex);
void renderPassTransfer(Batch& batch);
void renderPassDraw(const Batch& batch);
void transferTransformState(const Batch& batch) const;
void updateInput();
void updateTransform(const Batch& batch);
void updatePipeline();
vulkan::VKFramebuffer* syncGPUObject(const Framebuffer& framebuffer);
VKBuffer* syncGPUObject(const Buffer& buffer);
VKTexture* syncGPUObject(const TexturePointer& texture);
VKQuery* syncGPUObject(const Query& query);
public:
VKBackend();
~VKBackend();
void syncProgram(const gpu::ShaderPointer& program) override {}
void syncCache() override {}
void recycle() const override {}
void setCameraCorrection(const Mat4& correction, const Mat4& prevRenderView, bool reset = false) override {}
void setCameraCorrection(const Mat4& correction, const Mat4& prevRenderView, bool reset = false) override;
uint32_t getTextureID(const TexturePointer&) override { return 0; }
void executeFrame(const FramePointer& frame) final override;
void executeFrame(const FramePointer& frame) final;
bool isTextureManagementSparseEnabled() const override;
bool supportedTextureFormat(const gpu::Element& format) const override;
const std::string& getVersion() const override;
void downloadFramebuffer(const FramebufferPointer& srcFramebuffer, const Vec4i& region, QImage& destImage) final override {}
void downloadFramebuffer(const FramebufferPointer& srcFramebuffer, const Vec4i& region, QImage& destImage) final;
void setDrawCommandBuffer(VkCommandBuffer commandBuffer);
void trash(const VKBuffer& buffer);
#if 0
// Draw Stage
virtual void do_draw(const Batch& batch, size_t paramOffset) final;
virtual void do_drawIndexed(const Batch& batch, size_t paramOffset) final;
@ -73,7 +215,6 @@ public:
virtual void do_glUniform2f(const Batch& batch, size_t paramOffset) final;
virtual void do_glUniform3f(const Batch& batch, size_t paramOffset) final;
virtual void do_glUniform4f(const Batch& batch, size_t paramOffset) final;
virtual void do_glColor4f(const Batch& batch, size_t paramOffset) final;
// Transform Stage
virtual void do_setModelTransform(const Batch& batch, size_t paramOffset) final;
@ -89,7 +230,7 @@ public:
// Resource Stage
virtual void do_setResourceBuffer(const Batch& batch, size_t paramOffset) final;
virtual void do_setResourceTexture(const Batch& batch, size_t paramOffset) final;
virtual void do_setResourceTextureTable(const Batch& batch, size_t paramOffset);
virtual void do_setResourceTextureTable(const Batch& batch, size_t paramOffset) {}; // VKTODO: not needed currently, to be implemented in the future
virtual void do_setResourceFramebufferSwapChainTexture(const Batch& batch, size_t paramOffset) final;
// Pipeline Stage
@ -124,7 +265,7 @@ public:
// Performance profiling markers
virtual void do_pushProfileRange(const Batch& batch, size_t paramOffset) final;
virtual void do_popProfileRange(const Batch& batch, size_t paramOffset) final;
#endif
protected:
// Logical device, application's view of the physical device (GPU)
// VkPipeline cache object
@ -135,6 +276,12 @@ protected:
VkQueue _transferQueue; //TODO: initialize from device
friend class VKBuffer;
VkCommandBuffer _currentCommandBuffer;
size_t _commandIndex{ 0 };
int _currentDraw{ -1 };
bool _inRenderTransferPass{ false };
typedef void (VKBackend::*CommandCall)(const Batch&, size_t);
static std::array<VKBackend::CommandCall, Batch::NUM_COMMANDS> _commandCalls;
static const size_t INVALID_OFFSET = (size_t)-1;
};
}} // namespace gpu::vulkan

View file

@ -5,10 +5,152 @@
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#if 0
#include "VKFramebuffer.h"
#include "VKBackend.h"
void gpu::vulkan::VKFramebuffer::update() {
// VKTODO
/*gl::GLTexture* gltexture = nullptr;
TexturePointer surface;
if (_gpuObject.getColorStamps() != _colorStamps) {
if (_gpuObject.hasColor()) {
_colorBuffers.clear();
static const GLenum colorAttachments[] = {
GL_COLOR_ATTACHMENT0,
GL_COLOR_ATTACHMENT1,
GL_COLOR_ATTACHMENT2,
GL_COLOR_ATTACHMENT3,
GL_COLOR_ATTACHMENT4,
GL_COLOR_ATTACHMENT5,
GL_COLOR_ATTACHMENT6,
GL_COLOR_ATTACHMENT7,
GL_COLOR_ATTACHMENT8,
GL_COLOR_ATTACHMENT9,
GL_COLOR_ATTACHMENT10,
GL_COLOR_ATTACHMENT11,
GL_COLOR_ATTACHMENT12,
GL_COLOR_ATTACHMENT13,
GL_COLOR_ATTACHMENT14,
GL_COLOR_ATTACHMENT15 };
int unit = 0;
auto backend = _backend.lock();
for (auto& b : _gpuObject.getRenderBuffers()) {
surface = b._texture;
if (surface) {
Q_ASSERT(TextureUsageType::RENDERBUFFER == surface->getUsageType());
gltexture = backend->syncGPUObject(surface);
} else {
gltexture = nullptr;
}
if (gltexture) {
if (gltexture->_target == GL_TEXTURE_2D) {
glNamedFramebufferTexture(_id, colorAttachments[unit], gltexture->_texture, 0);
} else if (gltexture->_target == GL_TEXTURE_2D_MULTISAMPLE) {
glNamedFramebufferTexture(_id, colorAttachments[unit], gltexture->_texture, 0);
} else {
glNamedFramebufferTextureLayer(_id, colorAttachments[unit], gltexture->_texture, 0, b._subresource);
}
_colorBuffers.push_back(colorAttachments[unit]);
} else {
glNamedFramebufferTexture(_id, colorAttachments[unit], 0, 0);
}
unit++;
}
}
_colorStamps = _gpuObject.getColorStamps();
}
GLenum attachement = GL_DEPTH_STENCIL_ATTACHMENT;
if (!_gpuObject.hasStencil()) {
attachement = GL_DEPTH_ATTACHMENT;
} else if (!_gpuObject.hasDepth()) {
attachement = GL_STENCIL_ATTACHMENT;
}
if (_gpuObject.getDepthStamp() != _depthStamp) {
auto surface = _gpuObject.getDepthStencilBuffer();
auto backend = _backend.lock();
if (_gpuObject.hasDepthStencil() && surface) {
Q_ASSERT(TextureUsageType::RENDERBUFFER == surface->getUsageType());
gltexture = backend->syncGPUObject(surface);
}
if (gltexture) {
if (gltexture->_target == GL_TEXTURE_2D) {
glNamedFramebufferTexture(_id, attachement, gltexture->_texture, 0);
}
else if (gltexture->_target == GL_TEXTURE_2D_MULTISAMPLE) {
glNamedFramebufferTexture(_id, attachement, gltexture->_texture, 0);
} else {
glNamedFramebufferTextureLayer(_id, attachement, gltexture->_texture, 0,
_gpuObject.getDepthStencilBufferSubresource());
}
} else {
glNamedFramebufferTexture(_id, attachement, 0, 0);
}
_depthStamp = _gpuObject.getDepthStamp();
}
// Last but not least, define where we draw
if (!_colorBuffers.empty()) {
glNamedFramebufferDrawBuffers(_id, (GLsizei)_colorBuffers.size(), _colorBuffers.data());
} else {
glNamedFramebufferDrawBuffer(_id, GL_NONE);
}
// Now check for completness
_status = glCheckNamedFramebufferStatus(_id, GL_DRAW_FRAMEBUFFER);
// restore the current framebuffer
checkStatus();*/
}
bool gpu::vulkan::VKFramebuffer::checkStatus(gpu::vulkan::VKFramebuffer::FramebufferStatus target) const {
// VKTODO
/*switch (_status) {
case GL_FRAMEBUFFER_COMPLETE:
// Success !
return true;
case GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT:
qCWarning(gpugllogging) << "GLFramebuffer::syncGPUObject : Framebuffer not valid, GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT.";
break;
case GL_FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT:
qCWarning(gpugllogging) << "GLFramebuffer::syncGPUObject : Framebuffer not valid, GL_FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT.";
break;
case GL_FRAMEBUFFER_UNSUPPORTED:
qCWarning(gpugllogging) << "GLFramebuffer::syncGPUObject : Framebuffer not valid, GL_FRAMEBUFFER_UNSUPPORTED.";
break;
#if !defined(USE_GLES)
case GL_FRAMEBUFFER_INCOMPLETE_DRAW_BUFFER:
qCWarning(gpugllogging) << "GLFramebuffer::syncGPUObject : Framebuffer not valid, GL_FRAMEBUFFER_INCOMPLETE_DRAW_BUFFER.";
break;
case GL_FRAMEBUFFER_INCOMPLETE_READ_BUFFER:
qCWarning(gpugllogging) << "GLFramebuffer::syncGPUObject : Framebuffer not valid, GL_FRAMEBUFFER_INCOMPLETE_READ_BUFFER.";
break;
#endif
default:
break;
}
return false;
*/
}
gpu::vulkan::VKFramebuffer::~VKFramebuffer() {
//VKTODO
/*if (_id) {
auto backend = _backend.lock();
if (backend) {
backend->releaseFramebuffer(_id);
}
}*/
}
#if 0
using namespace gpu;
using namespace gpu::gl;

View file

@ -11,23 +11,22 @@
#include "VKShared.h"
#include "VKBackend.h"
namespace gpu { namespace vk {
namespace gpu { namespace vulkan {
class VKFramebuffer : public VKObject<Framebuffer> {
class VKFramebuffer : public vulkan::VKObject<Framebuffer> {
public:
template <typename VKFramebufferType>
static VKFramebufferType* sync(VKBackend& backend, const Framebuffer& framebuffer) {
VKFramebufferType* object = Backend::getGPUObject<VKFramebufferType>(framebuffer);
static VKFramebuffer* sync(vulkan::VKBackend& backend, const Framebuffer& framebuffer) {
VKFramebuffer* object = Backend::getGPUObject<VKFramebuffer>(framebuffer);
bool needsUpate { false };
bool needsUpdate{ false };
if (!object ||
framebuffer.getDepthStamp() != object->_depthStamp ||
framebuffer.getColorStamps() != object->_colorStamps) {
needsUpate = true;
needsUpdate = true;
}
// If GPU object already created and in sync
if (!needsUpate) {
if (!needsUpdate) {
return object;
} else if (framebuffer.isEmpty()) {
// NO framebuffer definition yet so let's avoid thinking
@ -37,18 +36,18 @@ public:
// need to have a gpu object?
if (!object) {
// All is green, assign the gpuobject to the Framebuffer
object = new VKFramebufferType(backend.shared_from_this(), framebuffer);
object = new VKFramebuffer(backend.shared_from_this(), framebuffer);
Backend::setGPUObject(framebuffer, object);
(void)CHECK_VK_ERROR();
}
object->update();
return object;
}
// VKTODO: what type should it return?
template <typename VKFramebufferType>
static VKuint getId(VKBackend& backend, const Framebuffer& framebuffer) {
VKFramebufferType* fbo = sync<VKFramebufferType>(backend, framebuffer);
static uint32_t getId(vulkan::VKBackend& backend, const Framebuffer& framebuffer) {
VKFramebufferType* fbo = sync(backend, framebuffer);
if (fbo) {
return fbo->_id;
} else {
@ -56,17 +55,20 @@ public:
}
}
const VKuint& _fbo { _id };
std::vector<VKenum> _colorBuffers;
// VKTODO: probably a Vulkan handle instead of this
//const VKuint& _fbo { _id };
//std::vector<VKenum> _colorBuffers;
Stamp _depthStamp { 0 };
std::vector<Stamp> _colorStamps;
protected:
VKenum _status { VK_FRAMEBUFFER_COMPLETE };
virtual void update() = 0;
bool checkStatus(VKenum target) const;
enum FramebufferStatus { VK_FRAMEBUFFER_COMPLETE } _status;
virtual void update();
bool checkStatus(FramebufferStatus target) const;
VKFramebuffer(const std::weak_ptr<VKBackend>& backend, const Framebuffer& framebuffer, VKuint id) : VKObject(backend, framebuffer, id) {}
// VKTODO: We need a check on backend.lock(), or to pass backend reference instead
VKFramebuffer(const std::weak_ptr<vulkan::VKBackend>& backend, const Framebuffer& framebuffer) : VKObject(*backend.lock(), framebuffer) {}
// VKTODO: Do we need virtual destructor here?
~VKFramebuffer();
};

View file

@ -11,20 +11,18 @@
#include "VKShared.h"
#include "VKBackend.h"
namespace gpu { namespace vk {
namespace gpu { namespace vulkan {
class VKQuery : public VKObject<Query> {
using Parent = gpu::vk::VKObject<Query>;
using Parent = gpu::vulkan::VKObject<Query>;
public:
template <typename VKQueryType>
static VKQueryType* sync(VKBackend& backend, const Query& query) {
VKQueryType* object = Backend::getGPUObject<VKQueryType>(query);
static VKQuery* sync(VKBackend& backend, const Query& query) {
VKQuery* object = Backend::getGPUObject<VKQuery>(query);
// need to have a gpu object?
if (!object) {
// All is green, assign the gpuobject to the Query
object = new VKQueryType(backend.shared_from_this(), query);
(void)CHECK_VK_ERROR();
object = new VKQuery(backend.shared_from_this(), query);
Backend::setGPUObject(query, object);
}
@ -32,30 +30,36 @@ public:
}
template <typename VKQueryType>
static VKuint getId(VKBackend& backend, const QueryPointer& query) {
static uint32_t getId(VKBackend& backend, const QueryPointer& query) {
// VKTODO Vulkan handle is used instead
return 0;
if (!query) {
return 0;
}
VKQuery* object = sync<VKQueryType>(backend, *query);
/*VKQuery* object = sync<VKQueryType>(backend, *query);
if (!object) {
return 0;
}
return object->_endqo;
return object->_endqo;*/
}
const VKuint& _endqo = { _id };
const VKuint _beginqo = { 0 };
VKuint64 _result { (VKuint64)-1 };
// VKTODO Vulkan handle is used instead
/*const uint32_t& _endqo = { _id };
const uint32_t _beginqo = { 0 };
uint64_t _result { (uint64_t)-1 };*/
protected:
VKQuery(const std::weak_ptr<VKBackend>& backend, const Query& query, VKuint endId, VKuint beginId) : Parent(backend, query, endId), _beginqo(beginId) {}
// VKTODO: We need a check on backend.lock(), or to pass backend reference instead
VKQuery(const std::weak_ptr<VKBackend>& backend, const Query& query) : Parent(*backend.lock(), query) {}
~VKQuery() {
if (_id) {
VKuint ids[2] = { _endqo, _beginqo };
glDeleteQueries(2, ids);
}
// Vulkan handle is used instead
/*if (_id) {
// VKTODO
uint32_t ids[2] = { _endqo, _beginqo };
//glDeleteQueries(2, ids);
}*/
}
};

View file

@ -12,6 +12,7 @@
#include <gpu/Forward.h>
#include <gpu/Format.h>
#include <gpu/Context.h>
#include "VKBackend.h"
Q_DECLARE_LOGGING_CATEGORY(gpu_vk_logging)
Q_DECLARE_LOGGING_CATEGORY(trace_gpu_vk)
@ -81,26 +82,24 @@ static const VkPrimitiveTopology PRIMITIVE_TO_VK[gpu::NUM_PRIMITIVES] = {
VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN,
};
//static const VKenum ELEMENT_TYPE_TO_VK[gpu::NUM_TYPES] = {
// VK_FLOAT,
// VK_INT,
// VK_UNSIGNED_INT,
// VK_HALF_FLOAT,
// VK_SHORT,
// VK_UNSIGNED_SHORT,
// VK_BYTE,
// VK_UNSIGNED_BYTE,
// // Normalized values
// VK_INT,
// VK_UNSIGNED_INT,
// VK_SHORT,
// VK_UNSIGNED_SHORT,
// VK_BYTE,
// VK_UNSIGNED_BYTE
//};
bool checkGLError(const char* name = nullptr);
bool checkGLErrorDebug(const char* name = nullptr);
// VKTODO is it needed?
/*static const enum ELEMENT_TYPE_TO_VK[gpu::NUM_TYPES] = {
VK_FLOAT,
VK_INT,
VK_UNSIGNED_INT,
VK_HALF_FLOAT,
VK_SHORT,
VK_UNSIGNED_SHORT,
VK_BYTE,
VK_UNSIGNED_BYTE,
// Normalized values
VK_INT,
VK_UNSIGNED_INT,
VK_SHORT,
VK_UNSIGNED_SHORT,
VK_BYTE,
VK_UNSIGNED_BYTE
};*/
class VKBackend;
@ -116,9 +115,7 @@ protected:
const std::weak_ptr<VKBackend> _backend;
};
} } // namespace gpu::gl
#define CHECK_VK_ERROR() gpu::vk::checkGLErrorDebug(__FUNCTION__)
} } // namespace gpu::vulkan
#endif

View file

@ -1,6 +1,7 @@
#include "Allocation.h"
#include <mutex>
#include <stdexcept>
using namespace vks;

View file

@ -30,8 +30,7 @@
#define VK_USE_PLATFORM_XCB_KHR
#endif
#define VKCPP_ENHANCED_MODE
#include <vulkan/vulkan.hpp>
#include <vulkan/vulkan.h>
#define VULKAN_USE_VMA 1

View file

@ -244,7 +244,7 @@ void Context::trashCommandBuffers(const std::vector<VkCommandBuffer>& cmdBuffers
trashAll<VkCommandBuffer>(cmdBuffers, destructor);
}
void Context::emptyDumpster(vk::Fence fence) {
void Context::emptyDumpster(VkFence fence) {
VoidLambdaList newDumpster;
newDumpster.swap(dumpster);
recycler.push(FencedLambda{ fence, [fence, newDumpster, this] {
@ -577,7 +577,10 @@ Buffer Context::stageToDeviceBuffer(const VkBufferUsageFlags& usage, size_t size
Buffer staging = createStagingBuffer(size, data);
Buffer result = createDeviceBuffer(usage | VK_BUFFER_USAGE_TRANSFER_DST_BIT, size);
withPrimaryCommandBuffer(
[&](vk::CommandBuffer copyCmd) { copyCmd.copyBuffer(staging.buffer, result.buffer, vk::BufferCopy(0, 0, size)); });
[&](VkCommandBuffer copyCmd) {
VkBufferCopy bufferCopy{ 0, 0, size };
vkCmdCopyBuffer(copyCmd, staging.buffer, result.buffer, 1, &bufferCopy);
});
staging.destroy();
return result;
}

View file

@ -107,11 +107,11 @@ public:
return getDeviceExtensionNames(physicalDevice).count(extension) != 0;
}
void requireExtensions(const vk::ArrayProxy<const std::string>& requestedExtensions) {
void requireExtensions(const std::set<std::string>& requestedExtensions) {
requiredExtensions.insert(requestedExtensions.begin(), requestedExtensions.end());
}
void requireDeviceExtensions(const vk::ArrayProxy<const std::string>& requestedExtensions) {
void requireDeviceExtensions(const std::set<std::string>& requestedExtensions) {
requiredDeviceExtensions.insert(requestedExtensions.begin(), requestedExtensions.end());
}
@ -161,7 +161,7 @@ public:
// Should be called from time to time by the application to migrate zombie resources
// to the recycler along with a fence that will be signalled when the objects are
// safe to delete.
void emptyDumpster(vk::Fence fence);
void emptyDumpster(VkFence fence);
// Check the recycler fences for signalled status. Any that are signalled will have their corresponding
// lambdas executed, freeing up the associated resources
@ -239,16 +239,16 @@ public:
// This function is intended for initialization only. It incurs a queue and device
// flush and may impact performance if used in non-setup code
void withPrimaryCommandBuffer(const std::function<void(const VkCommandBuffer& commandBuffer)>& f) const {
vk::CommandBuffer commandBuffer = device->createCommandBuffer(VK_COMMAND_BUFFER_LEVEL_PRIMARY);
VkCommandBuffer commandBuffer = device->createCommandBuffer(VK_COMMAND_BUFFER_LEVEL_PRIMARY);
VkCommandBufferBeginInfo vkCommandBufferBeginInfo {
.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO,
.pNext = nullptr,
.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT,
.pInheritanceInfo = nullptr
};
commandBuffer.begin(vkCommandBufferBeginInfo);
vkBeginCommandBuffer(commandBuffer, &vkCommandBufferBeginInfo);
f(commandBuffer);
commandBuffer.end();
vkEndCommandBuffer(commandBuffer);
device->flushCommandBuffer(commandBuffer, queue, true);
}