Vulkan library

This commit is contained in:
Brad Davis 2018-10-28 19:27:06 -07:00 committed by Lubosz Sarnecki
parent 03b96a679e
commit 91aac0a481
26 changed files with 3316 additions and 0 deletions

View file

@ -0,0 +1,16 @@
if (WIN32)
find_path(VULKAN_INCLUDE_DIR NAMES vulkan/vulkan.h HINTS "$ENV{VULKAN_SDK}/Include" "$ENV{VK_SDK_PATH}/Include")
if (CMAKE_CL_64)
find_library(VULKAN_LIBRARY NAMES vulkan-1 HINTS "$ENV{VULKAN_SDK}/Lib" "$ENV{VK_SDK_PATH}/Bin")
else()
find_library(VULKAN_LIBRARY NAMES vulkan-1 HINTS "$ENV{VULKAN_SDK}/Lib32" "$ENV{VK_SDK_PATH}/Bin32")
endif()
else()
find_path(VULKAN_INCLUDE_DIR NAMES vulkan/vulkan.h HINTS "$ENV{VULKAN_SDK}/include")
find_library(VULKAN_LIBRARY NAMES vulkan HINTS
"$ENV{VULKAN_SDK}/lib")
endif()
include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(Vulkan DEFAULT_MSG VULKAN_LIBRARY VULKAN_INCLUDE_DIR)
mark_as_advanced(VULKAN_INCLUDE_DIR VULKAN_LIBRARY)

View file

@ -25,4 +25,8 @@ namespace hifi { namespace properties {
const char* PRIMARY_CONTEXT = "com.highfidelity.gl.primaryContext";
}
namespace vk {
const char* CONTEXT = "com.highfidelity.vk.context";
}
} }

View file

@ -28,6 +28,10 @@ namespace hifi { namespace properties {
extern const char* PRIMARY_CONTEXT;
}
namespace vk {
extern const char* CONTEXT;
}
} }

View file

@ -0,0 +1,6 @@
set(TARGET_NAME vk)
setup_hifi_library(Gui)
GroupSources(src)
link_hifi_libraries(shared gl)
target_vulkan()

View file

@ -0,0 +1,94 @@
#include "Allocation.h"
#include <mutex>
using namespace vks;
#if VULKAN_USE_VMA
VmaAllocator& Allocation::getAllocator() {
static VmaAllocator allocator;
return allocator;
}
void Allocation::initAllocator(const vk::PhysicalDevice& physicalDevice, const vk::Device& device) {
static std::once_flag once;
std::call_once(once, [&] {
VmaAllocatorCreateInfo allocatorInfo = {};
allocatorInfo.physicalDevice = physicalDevice;
allocatorInfo.device = device;
VmaAllocator& allocator = getAllocator();
vmaCreateAllocator(&allocatorInfo, &allocator);
});
}
void Allocation::flush(vk::DeviceSize size, vk::DeviceSize offset) {
vmaFlushAllocation(getAllocator(), allocation, offset, size);
}
void Allocation::invalidate(vk::DeviceSize size, vk::DeviceSize offset) {
vmaInvalidateAllocation(getAllocator(), allocation, offset, size);
}
void* Allocation::rawmap(size_t offset, vk::DeviceSize size) {
if (offset != 0 || size != VK_WHOLE_SIZE) {
throw std::runtime_error("Unsupported");
}
if (!mapped) {
vmaMapMemory(getAllocator(), allocation, &mapped);
}
return mapped;
}
void Allocation::unmap() {
if (mapped) {
vmaUnmapMemory(getAllocator(), allocation);
mapped = nullptr;
}
}
void Allocation::destroy() {
unmap();
}
#else
void Alloction::initAllocator(const vk::PhysicalDevice&, const vk::Device&) {
}
void Alloction::flush(vk::DeviceSize size, vk::DeviceSize offset = 0) {
return device.flushMappedMemoryRanges(vk::MappedMemoryRange{ memory, offset, size });
}
void Alloction::invalidate(vk::DeviceSize size, vk::DeviceSize offset = 0) {
return device.invalidateMappedMemoryRanges(vk::MappedMemoryRange{ memory, offset, size });
}
void* Alloction::rawmap(size_t offset = 0, VkDeviceSize size) {
mapped = device.mapMemory(memory, offset, size, vk::MemoryMapFlags());
return (T*)mapped;
}
void Alloction::unmap() {
device.unmapMemory(memory);
}
void Alloction::destroy();
{
if (mapped) {
unmap();
}
if (memory) {
device.freeMemory(memory);
memory = nullptr;
}
}
#endif

View file

@ -0,0 +1,60 @@
#pragma once
#include "Config.h"
#include "Device.h"
namespace vks {
// A wrapper class for an allocation, either an Image or Buffer. Not intended to be used used directly
// but only as a base class providing common functionality for the classes below.
//
// Provides easy to use mechanisms for mapping, unmapping and copying host data to the device memory
struct Allocation {
static void initAllocator(const vk::PhysicalDevice&, const vk::Device&);
void* rawmap(size_t offset = 0, VkDeviceSize size = VK_WHOLE_SIZE);
void unmap();
template <typename T = void>
inline T* map(size_t offset = 0, VkDeviceSize size = VK_WHOLE_SIZE) {
return (T*)rawmap(offset, size);
}
inline void copy(size_t size, const void* data, VkDeviceSize offset = 0) const {
memcpy((uint8_t*)mapped + offset, data, size);
}
template<typename T>
inline void copy(const T& data, VkDeviceSize offset = 0) const {
copy(sizeof(T), &data, offset);
}
template<typename T>
inline void copy(const std::vector<T>& data, VkDeviceSize offset = 0) const {
copy(sizeof(T) * data.size(), data.data(), offset);
}
void flush(vk::DeviceSize size, vk::DeviceSize offset = 0);
void invalidate(vk::DeviceSize size, vk::DeviceSize offset = 0);
virtual void destroy();
vks::Device device;
vk::DeviceSize size{ 0 };
vk::DeviceSize alignment{ 0 };
vk::DeviceSize allocSize{ 0 };
#if VULKAN_USE_VMA
static VmaAllocator& Allocation::getAllocator();
VmaAllocation allocation;
/** @brief Memory propertys flags to be filled by external source at buffer creation (to query at some later point) */
vk::MemoryPropertyFlags memoryPropertyFlags;
#else
vk::DeviceMemory memory;
#endif
void* mapped{ nullptr };
};
}

View file

@ -0,0 +1,51 @@
#pragma once
#include "Allocation.h"
namespace vks
{
/**
* @brief Encapsulates access to a Vulkan buffer backed up by device memory
* @note To be filled by an external source like the VulkanDevice
*/
struct Buffer : public Allocation
{
private:
using Parent = Allocation;
public:
vk::Buffer buffer;
/** @brief Usage flags to be filled by external source at buffer creation (to query at some later point) */
vk::BufferUsageFlags usageFlags;
vk::DescriptorBufferInfo descriptor;
operator bool() const {
return buffer.operator bool();
}
/**
* Setup the default descriptor for this buffer
*
* @param size (Optional) Size of the memory range of the descriptor
* @param offset (Optional) Byte offset from beginning
*
*/
void setupDescriptor(vk::DeviceSize size, vk::DeviceSize offset = 0) {
descriptor.offset = offset;
descriptor.buffer = buffer;
descriptor.range = size;
}
/**
* Release all Vulkan resources held by this buffer
*/
void destroy() {
if (buffer) {
device.destroy(buffer);
buffer = vk::Buffer{};
}
Parent::destroy();
}
};
}

View file

@ -0,0 +1,39 @@
//
// Created by Bradley Austin Davis on 2016/03/19
// Copyright 2013 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#pragma once
#include <algorithm>
#include <functional>
#include <list>
#include <queue>
#include <set>
#include <string>
#include <vector>
#include <glm/glm.hpp>
#include <QtGlobal>
#if defined(Q_OS_WIN)
#define VK_USE_PLATFORM_WIN32_KHR
#elif defined(Q_OS_ANDROID)
#define VK_USE_PLATFORM_ANDROID_KHR
#elif defined(Q_OS_DARWIN)
#else
#define VK_USE_PLATFORM_XLIB_KHR
#endif
#define VKCPP_ENHANCED_MODE
#include <vulkan/vulkan.hpp>
#define VULKAN_USE_VMA 1
#if VULKAN_USE_VMA
#include <vma/vk_mem_alloc.h>
#endif

View file

@ -0,0 +1,7 @@
#include "Context.h"
#include <QtCore/QCoreApplication>
vks::Context& vks::Context::get() {
static Context INSTANCE;
return INSTANCE;
}

View file

@ -0,0 +1,815 @@
#pragma once
#include "Config.h"
#include "Debug.h"
#include "Image.h"
#include "Buffer.h"
#include "Helpers.h"
#include "Device.h"
#include <unordered_set>
#include <QtCore/QDebug>
namespace vks {
using StringList = std::list<std::string>;
using CStringVector = std::vector<const char*>;
using DevicePickerFunction = std::function<vk::PhysicalDevice(const std::vector<vk::PhysicalDevice>&)>;
using DeviceExtensionsPickerFunction = std::function<std::set<std::string>(const vk::PhysicalDevice&)>;
using InstanceExtensionsPickerFunction = std::function<std::set<std::string>()>;
using InstanceExtensionsPickerFunctions = std::list<InstanceExtensionsPickerFunction>;
using LayerVector = std::vector<const char*>;
using MipData = ::std::pair<vk::Extent3D, vk::DeviceSize>;
namespace queues {
struct DeviceCreateInfo : public vk::DeviceCreateInfo {
std::vector<vk::DeviceQueueCreateInfo> deviceQueues;
std::vector<std::vector<float>> deviceQueuesPriorities;
void addQueueFamily(uint32_t queueFamilyIndex, vk::ArrayProxy<float> priorities) {
deviceQueues.push_back({ {}, queueFamilyIndex });
std::vector<float> prioritiesVector;
prioritiesVector.resize(priorities.size());
memcpy(prioritiesVector.data(), priorities.data(), sizeof(float) * priorities.size());
deviceQueuesPriorities.push_back(prioritiesVector);
}
void addQueueFamily(uint32_t queueFamilyIndex, size_t count = 1) {
std::vector<float> priorities;
priorities.resize(count);
std::fill(priorities.begin(), priorities.end(), 0.0f);
addQueueFamily(queueFamilyIndex, priorities);
}
void update() {
assert(deviceQueuesPriorities.size() == deviceQueues.size());
auto size = deviceQueues.size();
for (auto i = 0; i < size; ++i) {
auto& deviceQueue = deviceQueues[i];
auto& deviceQueuePriorities = deviceQueuesPriorities[i];
deviceQueue.queueCount = (uint32_t)deviceQueuePriorities.size();
deviceQueue.pQueuePriorities = deviceQueuePriorities.data();
}
this->queueCreateInfoCount = (uint32_t)deviceQueues.size();
this->pQueueCreateInfos = deviceQueues.data();
}
};
} // namespace queues
///////////////////////////////////////////////////////////////////////
//
// Object destruction support
//
// It's often critical to avoid destroying an object that may be in use by the GPU. In order to service this need
// the context class contains structures for objects that are pending deletion.
//
// The first container is the dumpster, and it just contains a set of lambda objects that when executed, destroy
// resources (presumably... in theory the lambda can do anything you want, but the purpose is to contain GPU object
// destruction calls).
//
// When the application makes use of a function that uses a fence, it can provide that fence to the context as a marker
// for destroying all the pending objects. Anything in the dumpster is migrated to the recycler.
//
// Finally, an application can call the recycle function at regular intervals (perhaps once per frame, perhaps less often)
// in order to check the fences and execute the associated destructors for any that are signalled.
using VoidLambda = std::function<void()>;
using VoidLambdaList = std::list<VoidLambda>;
using FencedLambda = std::pair<vk::Fence, VoidLambda>;
using FencedLambdaQueue = std::queue<FencedLambda>;
struct Context {
private:
static CStringVector toCStrings(const StringList& values) {
CStringVector result;
result.reserve(values.size());
for (const auto& string : values) {
result.push_back(string.c_str());
}
return result;
}
static CStringVector toCStrings(const vk::ArrayProxy<const std::string>& values) {
CStringVector result;
result.reserve(values.size());
for (const auto& string : values) {
result.push_back(string.c_str());
}
return result;
}
static CStringVector filterLayers(const StringList& desiredLayers) {
static std::set<std::string> validLayerNames = getAvailableLayers();
CStringVector result;
for (const auto& string : desiredLayers) {
if (validLayerNames.count(string) != 0) {
result.push_back(string.c_str());
}
}
return result;
}
Context() {};
public:
static Context& get();
// Create application wide Vulkan instance
static std::set<std::string> getAvailableLayers() {
std::set<std::string> result;
auto layers = vk::enumerateInstanceLayerProperties();
for (auto layer : layers) {
result.insert(layer.layerName);
}
return result;
}
static std::vector<vk::ExtensionProperties> getExtensions() { return vk::enumerateInstanceExtensionProperties(); }
static std::set<std::string> getExtensionNames() {
std::set<std::string> extensionNames;
for (auto& ext : getExtensions()) {
extensionNames.insert(ext.extensionName);
}
return extensionNames;
}
static bool isExtensionPresent(const std::string& extensionName) { return getExtensionNames().count(extensionName) != 0; }
static std::vector<vk::ExtensionProperties> getDeviceExtensions(const vk::PhysicalDevice& physicalDevice) {
return physicalDevice.enumerateDeviceExtensionProperties();
}
static std::set<std::string> getDeviceExtensionNames(const vk::PhysicalDevice& physicalDevice) {
std::set<std::string> extensionNames;
for (auto& ext : getDeviceExtensions(physicalDevice)) {
extensionNames.insert(ext.extensionName);
}
return extensionNames;
}
static bool isDeviceExtensionPresent(const vk::PhysicalDevice& physicalDevice, const std::string& extension) {
return getDeviceExtensionNames(physicalDevice).count(extension) != 0;
}
void requireExtensions(const vk::ArrayProxy<const std::string>& requestedExtensions) {
requiredExtensions.insert(requestedExtensions.begin(), requestedExtensions.end());
}
void requireDeviceExtensions(const vk::ArrayProxy<const std::string>& requestedExtensions) {
requiredDeviceExtensions.insert(requestedExtensions.begin(), requestedExtensions.end());
}
void addInstanceExtensionPicker(const InstanceExtensionsPickerFunction& function) {
instanceExtensionsPickers.push_back(function);
}
void setDevicePicker(const DevicePickerFunction& picker) { devicePicker = picker; }
void setDeviceExtensionsPicker(const DeviceExtensionsPickerFunction& picker) { deviceExtensionsPicker = picker; }
void setValidationEnabled(bool enable) {
if (instance) {
throw std::runtime_error("Cannot change validations state after instance creation");
}
enableValidation = enable;
}
void createInstance() {
if (instance) {
throw std::runtime_error("Instance already exists");
}
if (isExtensionPresent(VK_EXT_DEBUG_UTILS_EXTENSION_NAME)) {
requireExtensions({ VK_EXT_DEBUG_UTILS_EXTENSION_NAME });
enableValidation = true;
enableDebugMarkers = true;
qDebug() << "Found debug marker extension";
}
// Vulkan instance
vk::ApplicationInfo appInfo;
appInfo.pApplicationName = "VulkanExamples";
appInfo.pEngineName = "VulkanExamples";
appInfo.apiVersion = VK_API_VERSION_1_0;
std::set<std::string> instanceExtensions;
instanceExtensions.insert(requiredExtensions.begin(), requiredExtensions.end());
for (const auto& picker : instanceExtensionsPickers) {
auto extensions = picker();
instanceExtensions.insert(extensions.begin(), extensions.end());
}
std::vector<const char*> enabledExtensions;
for (const auto& extension : instanceExtensions) {
enabledExtensions.push_back(extension.c_str());
}
// Enable surface extensions depending on os
vk::InstanceCreateInfo instanceCreateInfo;
instanceCreateInfo.pApplicationInfo = &appInfo;
if (enabledExtensions.size() > 0) {
instanceCreateInfo.enabledExtensionCount = (uint32_t)enabledExtensions.size();
instanceCreateInfo.ppEnabledExtensionNames = enabledExtensions.data();
}
CStringVector layers;
if (enableValidation) {
layers = filterLayers(debug::getDefaultValidationLayers());
instanceCreateInfo.enabledLayerCount = (uint32_t)layers.size();
instanceCreateInfo.ppEnabledLayerNames = layers.data();
}
instance = vk::createInstance(instanceCreateInfo);
if (enableValidation) {
debug::setupDebugging(instance);
}
if (enableDebugMarkers) {
debug::marker::setup(instance);
}
}
void destroyContext() {
queue.waitIdle();
device.waitIdle();
for (const auto& trash : dumpster) {
trash();
}
while (!recycler.empty()) {
recycle();
}
destroyCommandPool();
device.destroy();
if (enableValidation) {
debug::cleanupDebugging(instance);
}
instance.destroy();
}
uint32_t findQueue(const vk::QueueFlags& desiredFlags, const vk::SurfaceKHR& presentSurface = vk::SurfaceKHR()) const {
uint32_t bestMatch{ VK_QUEUE_FAMILY_IGNORED };
VkQueueFlags bestMatchExtraFlags{ VK_QUEUE_FLAG_BITS_MAX_ENUM };
size_t queueCount = queueFamilyProperties.size();
for (uint32_t i = 0; i < queueCount; ++i) {
auto currentFlags = queueFamilyProperties[i].queueFlags;
// Doesn't contain the required flags, skip it
if (!(currentFlags & desiredFlags)) {
continue;
}
VkQueueFlags currentExtraFlags = (currentFlags & ~desiredFlags).operator VkQueueFlags();
// If we find an exact match, return immediately
if (0 == currentExtraFlags) {
return i;
}
if (bestMatch == VK_QUEUE_FAMILY_IGNORED || currentExtraFlags < bestMatchExtraFlags) {
bestMatch = i;
bestMatchExtraFlags = currentExtraFlags;
}
}
return bestMatch;
}
template <typename T>
void trash(T value, std::function<void(T t)> destructor = [](T t) { t.destroy(); }) const {
if (!value) {
return;
}
dumpster.push_back([=] { destructor(value); });
}
template <typename T>
void trashAll(const std::vector<T>& values, std::function<void(const std::vector<T>&)> destructor) const {
if (values.empty()) {
return;
}
dumpster.push_back([=] { destructor(values); });
}
//
// Convenience functions for trashing specific types. These functions know what kind of function
// call to make for destroying a given Vulkan object.
//
void trashPipeline(vk::Pipeline& pipeline) const {
trash<vk::Pipeline>(pipeline, [this](vk::Pipeline pipeline) { device.destroyPipeline(pipeline); });
}
void trashCommandBuffers(const std::vector<vk::CommandBuffer>& cmdBuffers, vk::CommandPool commandPool = nullptr) const {
if (!commandPool) {
commandPool = getCommandPool();
}
using DtorLambda = std::function<void(const std::vector<vk::CommandBuffer>&)>;
DtorLambda destructor =
[=](const std::vector<vk::CommandBuffer>& cmdBuffers) {
device.freeCommandBuffers(commandPool, cmdBuffers);
};
trashAll<vk::CommandBuffer>(cmdBuffers, destructor);
}
// Should be called from time to time by the application to migrate zombie resources
// to the recycler along with a fence that will be signalled when the objects are
// safe to delete.
void emptyDumpster(vk::Fence fence) {
VoidLambdaList newDumpster;
newDumpster.swap(dumpster);
recycler.push(FencedLambda{ fence, [fence, newDumpster, this] {
for (const auto& f : newDumpster) {
f();
}
} });
}
// Check the recycler fences for signalled status. Any that are signalled will have their corresponding
// lambdas executed, freeing up the associated resources
void recycle() {
while (!recycler.empty()) {
const auto& trashItem = recycler.front();
const auto& fence = trashItem.first;
auto fenceStatus = device.getFenceStatus(fence);
if (vk::Result::eSuccess != fenceStatus) {
break;
}
const VoidLambda& lambda = trashItem.second;
lambda();
device.destroyFence(fence);
recycler.pop();
}
}
// Create an image memory barrier for changing the layout of
// an image and put it into an active command buffer
// See chapter 11.4 "vk::Image Layout" for details
void setImageLayout(vk::CommandBuffer cmdbuffer,
vk::Image image,
vk::ImageLayout oldImageLayout,
vk::ImageLayout newImageLayout,
vk::ImageSubresourceRange subresourceRange) const {
// Create an image barrier object
vk::ImageMemoryBarrier imageMemoryBarrier;
imageMemoryBarrier.oldLayout = oldImageLayout;
imageMemoryBarrier.newLayout = newImageLayout;
imageMemoryBarrier.image = image;
imageMemoryBarrier.subresourceRange = subresourceRange;
imageMemoryBarrier.srcAccessMask = vks::util::accessFlagsForLayout(oldImageLayout);
imageMemoryBarrier.dstAccessMask = vks::util::accessFlagsForLayout(newImageLayout);
// Put barrier on top
// Put barrier inside setup command buffer
cmdbuffer.pipelineBarrier(vk::PipelineStageFlagBits::eAllCommands, vk::PipelineStageFlagBits::eAllCommands,
vk::DependencyFlags(), nullptr, nullptr, imageMemoryBarrier);
}
// Fixed sub resource on first mip level and layer
void setImageLayout(vk::CommandBuffer cmdbuffer,
vk::Image image,
vk::ImageAspectFlags aspectMask,
vk::ImageLayout oldImageLayout,
vk::ImageLayout newImageLayout) const {
vk::ImageSubresourceRange subresourceRange;
subresourceRange.aspectMask = aspectMask;
subresourceRange.levelCount = 1;
subresourceRange.layerCount = 1;
setImageLayout(cmdbuffer, image, oldImageLayout, newImageLayout, subresourceRange);
}
void setImageLayout(vk::Image image,
vk::ImageLayout oldImageLayout,
vk::ImageLayout newImageLayout,
vk::ImageSubresourceRange subresourceRange) const {
withPrimaryCommandBuffer([&](const auto& commandBuffer) {
setImageLayout(commandBuffer, image, oldImageLayout, newImageLayout, subresourceRange);
});
}
// Fixed sub resource on first mip level and layer
void setImageLayout(vk::Image image,
vk::ImageAspectFlags aspectMask,
vk::ImageLayout oldImageLayout,
vk::ImageLayout newImageLayout) const {
withPrimaryCommandBuffer([&](const auto& commandBuffer) {
setImageLayout(commandBuffer, image, aspectMask, oldImageLayout, newImageLayout);
});
}
void createDevice(const vk::SurfaceKHR& surface = nullptr) {
pickDevice(surface);
buildDevice();
#if VULKAN_USE_VMA
vks::Allocation::initAllocator(physicalDevice, device);
#endif
// Get the graphics queue
queue = device.getQueue(queueIndices.graphics, 0);
}
protected:
void pickDevice(const vk::SurfaceKHR& surface ) {
// Physical device
physicalDevices = instance.enumeratePhysicalDevices();
// Note :
// This example will always use the first physical device reported,
// change the vector index if you have multiple Vulkan devices installed
// and want to use another one
physicalDevice = devicePicker(physicalDevices);
struct Version {
uint32_t patch : 12;
uint32_t minor : 10;
uint32_t major : 10;
} _version;
for (const auto& extensionProperties : physicalDevice.enumerateDeviceExtensionProperties()) {
physicalDeviceExtensions.insert(extensionProperties.extensionName);
qDebug() << "Device Extension " << extensionProperties.extensionName;
}
// Store properties (including limits) and features of the phyiscal device
// So examples can check against them and see if a feature is actually supported
queueFamilyProperties = physicalDevice.getQueueFamilyProperties();
deviceProperties = physicalDevice.getProperties();
memcpy(&_version, &deviceProperties.apiVersion, sizeof(uint32_t));
deviceFeatures = physicalDevice.getFeatures();
// Gather physical device memory properties
deviceMemoryProperties = physicalDevice.getMemoryProperties();
queueIndices.graphics = findQueue(vk::QueueFlagBits::eGraphics, surface);
queueIndices.compute = findQueue(vk::QueueFlagBits::eCompute);
queueIndices.transfer = findQueue(vk::QueueFlagBits::eTransfer);
}
void buildDevice() {
// Vulkan device
vks::queues::DeviceCreateInfo deviceCreateInfo;
deviceCreateInfo.addQueueFamily(queueIndices.graphics, queueFamilyProperties[queueIndices.graphics].queueCount);
if (queueIndices.compute != VK_QUEUE_FAMILY_IGNORED && queueIndices.compute != queueIndices.graphics) {
deviceCreateInfo.addQueueFamily(queueIndices.compute, queueFamilyProperties[queueIndices.compute].queueCount);
}
if (queueIndices.transfer != VK_QUEUE_FAMILY_IGNORED && queueIndices.transfer != queueIndices.graphics &&
queueIndices.transfer != queueIndices.compute) {
deviceCreateInfo.addQueueFamily(queueIndices.transfer, queueFamilyProperties[queueIndices.transfer].queueCount);
}
deviceCreateInfo.update();
deviceCreateInfo.pEnabledFeatures = &deviceFeatures;
std::set<std::string> requestedDeviceExtensions = deviceExtensionsPicker(physicalDevice);
requestedDeviceExtensions.insert(requiredDeviceExtensions.begin(), requiredDeviceExtensions.end());
// enable the debug marker extension if it is present (likely meaning a debugging tool is present)
std::vector<const char*> enabledExtensions;
for (const auto& extension : requestedDeviceExtensions) {
enabledExtensions.push_back(extension.c_str());
}
if (enabledExtensions.size() > 0) {
deviceCreateInfo.enabledExtensionCount = (uint32_t)enabledExtensions.size();
deviceCreateInfo.ppEnabledExtensionNames = enabledExtensions.data();
}
device = physicalDevice.createDevice(deviceCreateInfo);
}
public:
// Vulkan instance, stores all per-application states
vk::Instance instance;
std::vector<vk::PhysicalDevice> physicalDevices;
// Physical device (GPU) that Vulkan will ise
vk::PhysicalDevice physicalDevice;
std::unordered_set<std::string> physicalDeviceExtensions;
// Queue family properties
std::vector<vk::QueueFamilyProperties> queueFamilyProperties;
// Stores physical device properties (for e.g. checking device limits)
vk::PhysicalDeviceProperties deviceProperties;
// Stores phyiscal device features (for e.g. checking if a feature is available)
vk::PhysicalDeviceFeatures deviceFeatures;
vk::PhysicalDeviceFeatures enabledFeatures;
// Stores all available memory (type) properties for the physical device
vk::PhysicalDeviceMemoryProperties deviceMemoryProperties;
// Logical device, application's view of the physical device (GPU)
vks::Device device;
struct QueueIndices {
uint32_t graphics{ VK_QUEUE_FAMILY_IGNORED };
uint32_t transfer{ VK_QUEUE_FAMILY_IGNORED };
uint32_t compute{ VK_QUEUE_FAMILY_IGNORED };
} queueIndices;
vk::Queue queue;
const vk::CommandPool& getCommandPool() const {
if (!commandPool) {
vk::CommandPoolCreateInfo cmdPoolInfo;
cmdPoolInfo.queueFamilyIndex = queueIndices.graphics;
cmdPoolInfo.flags = vk::CommandPoolCreateFlagBits::eResetCommandBuffer;
auto pool = device.createCommandPool(cmdPoolInfo);
const_cast<vk::CommandPool&>(commandPool) = pool;
}
return commandPool;
}
void destroyCommandPool() {
if (commandPool) {
device.destroy(commandPool);
commandPool = nullptr;
}
}
std::vector<vk::CommandBuffer> allocateCommandBuffers(
uint32_t count,
vk::CommandBufferLevel level = vk::CommandBufferLevel::ePrimary) const {
std::vector<vk::CommandBuffer> result;
vk::CommandBufferAllocateInfo commandBufferAllocateInfo;
commandBufferAllocateInfo.commandPool = getCommandPool();
commandBufferAllocateInfo.commandBufferCount = count;
commandBufferAllocateInfo.level = vk::CommandBufferLevel::ePrimary;
result = device.allocateCommandBuffers(commandBufferAllocateInfo);
return result;
}
vk::CommandBuffer createCommandBuffer(vk::CommandBufferLevel level = vk::CommandBufferLevel::ePrimary) const {
vk::CommandBuffer cmdBuffer;
vk::CommandBufferAllocateInfo cmdBufAllocateInfo;
cmdBufAllocateInfo.commandPool = getCommandPool();
cmdBufAllocateInfo.level = level;
cmdBufAllocateInfo.commandBufferCount = 1;
cmdBuffer = device.allocateCommandBuffers(cmdBufAllocateInfo)[0];
return cmdBuffer;
}
void flushCommandBuffer(vk::CommandBuffer& commandBuffer) const {
if (!commandBuffer) {
return;
}
queue.submit(vk::SubmitInfo{ 0, nullptr, nullptr, 1, &commandBuffer }, vk::Fence());
queue.waitIdle();
device.waitIdle();
}
// Create a short lived command buffer which is immediately executed and released
// This function is intended for initialization only. It incurs a queue and device
// flush and may impact performance if used in non-setup code
void withPrimaryCommandBuffer(const std::function<void(const vk::CommandBuffer& commandBuffer)>& f) const {
vk::CommandBuffer commandBuffer = createCommandBuffer(vk::CommandBufferLevel::ePrimary);
commandBuffer.begin(vk::CommandBufferBeginInfo{ vk::CommandBufferUsageFlagBits::eOneTimeSubmit });
f(commandBuffer);
commandBuffer.end();
flushCommandBuffer(commandBuffer);
device.freeCommandBuffers(getCommandPool(), commandBuffer);
}
Image createImage(const vk::ImageCreateInfo& imageCreateInfo, const vk::MemoryPropertyFlags& memoryPropertyFlags) const {
Image result;
result.device = device;
result.format = imageCreateInfo.format;
result.extent = imageCreateInfo.extent;
#if VULKAN_USE_VMA
VmaAllocationCreateInfo allocInfo = {};
allocInfo.requiredFlags = memoryPropertyFlags.operator unsigned int();
auto pCreateInfo = &(imageCreateInfo.operator const VkImageCreateInfo&());
auto pImage = &reinterpret_cast<VkImage&>(result.image);
vmaCreateImage(Allocation::getAllocator(), pCreateInfo, &allocInfo, pImage, &result.allocation, nullptr);
#else
result.image = device.createImage(imageCreateInfo);
if ((memoryPropertyFlags & vk::MemoryPropertyFlagBits::eLazilyAllocated) !=
vk::MemoryPropertyFlagBits::eLazilyAllocated) {
vk::MemoryRequirements memReqs = device.getImageMemoryRequirements(result.image);
vk::MemoryAllocateInfo memAllocInfo;
memAllocInfo.allocationSize = result.allocSize = memReqs.size;
memAllocInfo.memoryTypeIndex = getMemoryType(memReqs.memoryTypeBits, memoryPropertyFlags);
result.memory = device.allocateMemory(memAllocInfo);
device.bindImageMemory(result.image, result.memory, 0);
}
#endif
return result;
}
Image stageToDeviceImage(vk::ImageCreateInfo imageCreateInfo,
const vk::MemoryPropertyFlags& memoryPropertyFlags,
vk::DeviceSize size,
const void* data,
const std::vector<MipData>& mipData = {}) const {
Buffer staging = createStagingBuffer(size, data);
imageCreateInfo.usage = imageCreateInfo.usage | vk::ImageUsageFlagBits::eTransferDst;
Image result = createImage(imageCreateInfo, memoryPropertyFlags);
withPrimaryCommandBuffer([&](const vk::CommandBuffer& copyCmd) {
vk::ImageSubresourceRange range(vk::ImageAspectFlagBits::eColor, 0, imageCreateInfo.mipLevels, 0, 1);
// Prepare for transfer
setImageLayout(copyCmd, result.image, vk::ImageLayout::eUndefined, vk::ImageLayout::eTransferDstOptimal, range);
// Prepare for transfer
std::vector<vk::BufferImageCopy> bufferCopyRegions;
{
vk::BufferImageCopy bufferCopyRegion;
bufferCopyRegion.imageSubresource.aspectMask = vk::ImageAspectFlagBits::eColor;
bufferCopyRegion.imageSubresource.layerCount = 1;
if (!mipData.empty()) {
for (uint32_t i = 0; i < imageCreateInfo.mipLevels; i++) {
bufferCopyRegion.imageSubresource.mipLevel = i;
bufferCopyRegion.imageExtent = mipData[i].first;
bufferCopyRegions.push_back(bufferCopyRegion);
bufferCopyRegion.bufferOffset += mipData[i].second;
}
} else {
bufferCopyRegion.imageExtent = imageCreateInfo.extent;
bufferCopyRegions.push_back(bufferCopyRegion);
}
}
copyCmd.copyBufferToImage(staging.buffer, result.image, vk::ImageLayout::eTransferDstOptimal, bufferCopyRegions);
// Prepare for shader read
setImageLayout(copyCmd, result.image, vk::ImageLayout::eTransferDstOptimal, vk::ImageLayout::eShaderReadOnlyOptimal,
range);
});
staging.destroy();
return result;
}
template <typename T>
Image stageToDeviceImage(const vk::ImageCreateInfo& imageCreateInfo,
const vk::MemoryPropertyFlags& memoryPropertyFlags,
const std::vector<T>& data) const {
return stageToDeviceImage(imageCreateInfo, memoryPropertyFlags, data.size() * sizeof(T), (void*)data.data());
}
template <typename T>
Image stageToDeviceImage(const vk::ImageCreateInfo& imageCreateInfo, const std::vector<T>& data) const {
return stageToDeviceImage(imageCreateInfo, vk::MemoryPropertyFlagBits::eDeviceLocal, data.size() * sizeof(T),
(void*)data.data());
}
Buffer createBuffer(const vk::BufferUsageFlags& usageFlags,
vk::DeviceSize size,
const vk::MemoryPropertyFlags& memoryPropertyFlags) const {
Buffer result;
result.device = device;
result.size = size;
result.descriptor.range = size;
result.descriptor.offset = 0;
vk::BufferCreateInfo createInfo{ {}, size, usageFlags };
#if VULKAN_USE_VMA
VmaAllocationCreateInfo allocInfo = {};
allocInfo.requiredFlags = memoryPropertyFlags.operator unsigned int();
auto pCreateInfo = &createInfo.operator const VkBufferCreateInfo&();
auto pBuffer = &reinterpret_cast<VkBuffer&>(result.buffer);
vmaCreateBuffer(Allocation::getAllocator(), pCreateInfo, &allocInfo, pBuffer, &result.allocation, nullptr);
#else
result.descriptor.buffer = result.buffer = device.createBuffer(bufferCreateInfo);
vk::MemoryRequirements memReqs = device.getBufferMemoryRequirements(result.buffer);
vk::MemoryAllocateInfo memAlloc;
result.allocSize = memAlloc.allocationSize = memReqs.size;
memAlloc.memoryTypeIndex = getMemoryType(memReqs.memoryTypeBits, memoryPropertyFlags);
result.memory = device.allocateMemory(memAlloc);
device.bindBufferMemory(result.buffer, result.memory, 0);
#endif
result.descriptor.buffer = result.buffer;
return result;
}
Buffer createDeviceBuffer(const vk::BufferUsageFlags& usageFlags, vk::DeviceSize size) const {
static const vk::MemoryPropertyFlags memoryProperties = vk::MemoryPropertyFlagBits::eDeviceLocal;
return createBuffer(usageFlags, size, memoryProperties);
}
Buffer createStagingBuffer(vk::DeviceSize size, const void* data = nullptr) const {
auto result = createBuffer(vk::BufferUsageFlagBits::eTransferSrc, size,
vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent);
if (data != nullptr) {
result.map();
result.copy(size, data);
result.unmap();
}
return result;
}
template <typename T>
Buffer createStagingBuffer(const std::vector<T>& data) const {
return createBuffer(data.size() * sizeof(T), (void*)data.data());
}
template <typename T>
Buffer createStagingBuffer(const T& data) const {
return createStagingBuffer(sizeof(T), &data);
}
template <typename T>
Buffer createUniformBuffer(const T& data, size_t count = 3) const {
auto alignment = deviceProperties.limits.minUniformBufferOffsetAlignment;
auto extra = sizeof(T) % alignment;
auto alignedSize = sizeof(T) + (alignment - extra);
auto allocatedSize = count * alignedSize;
static const auto usageFlags = vk::BufferUsageFlagBits::eUniformBuffer | vk::BufferUsageFlagBits::eTransferDst;
static const auto memoryFlags = vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent;
auto result = createBuffer(usageFlags, allocatedSize, memoryFlags);
result.alignment = alignedSize;
result.descriptor.range = result.alignment;
result.map();
result.copy(data);
return result;
}
Buffer stageToDeviceBuffer(const vk::BufferUsageFlags& usage, size_t size, const void* data) const {
Buffer staging = createStagingBuffer(size, data);
Buffer result = createDeviceBuffer(usage | vk::BufferUsageFlagBits::eTransferDst, size);
withPrimaryCommandBuffer(
[&](vk::CommandBuffer copyCmd) { copyCmd.copyBuffer(staging.buffer, result.buffer, vk::BufferCopy(0, 0, size)); });
staging.destroy();
return result;
}
template <typename T>
Buffer stageToDeviceBuffer(const vk::BufferUsageFlags& usage, const std::vector<T>& data) const {
return stageToDeviceBuffer(usage, sizeof(T) * data.size(), data.data());
}
template <typename T>
Buffer stageToDeviceBuffer(const vk::BufferUsageFlags& usage, const T& data) const {
return stageToDeviceBuffer(usage, sizeof(T), (void*)&data);
}
vk::Bool32 getMemoryType(uint32_t typeBits, const vk::MemoryPropertyFlags& properties, uint32_t* typeIndex) const {
for (uint32_t i = 0; i < 32; i++) {
if ((typeBits & 1) == 1) {
if ((deviceMemoryProperties.memoryTypes[i].propertyFlags & properties) == properties) {
*typeIndex = i;
return true;
}
}
typeBits >>= 1;
}
return false;
}
uint32_t getMemoryType(uint32_t typeBits, const vk::MemoryPropertyFlags& properties) const {
uint32_t result = 0;
if (!getMemoryType(typeBits, properties, &result)) {
// todo : throw error
}
return result;
}
vk::Format getSupportedDepthFormat() const {
// Since all depth formats may be optional, we need to find a suitable depth format to use
// Start with the highest precision packed format
std::vector<vk::Format> depthFormats = { vk::Format::eD32SfloatS8Uint, vk::Format::eD32Sfloat,
vk::Format::eD24UnormS8Uint, vk::Format::eD16UnormS8Uint,
vk::Format::eD16Unorm };
for (auto& format : depthFormats) {
vk::FormatProperties formatProps;
formatProps = physicalDevice.getFormatProperties(format);
// vk::Format must support depth stencil attachment for optimal tiling
if (formatProps.optimalTilingFeatures & vk::FormatFeatureFlagBits::eDepthStencilAttachment) {
return format;
}
}
throw std::runtime_error("No supported depth format");
}
private:
// A collection of items queued for destruction. Once a fence has been created
// for a queued submit, these items can be moved to the recycler for actual destruction
// by calling the rec
mutable VoidLambdaList dumpster;
FencedLambdaQueue recycler;
InstanceExtensionsPickerFunctions instanceExtensionsPickers;
// Set to true when example is created with enabled validation layers
bool enableValidation = false;
// Set to true when the debug marker extension is detected
bool enableDebugMarkers = false;
std::set<std::string> requiredExtensions;
std::set<std::string> requiredDeviceExtensions;
DevicePickerFunction devicePicker = [](const std::vector<vk::PhysicalDevice>& devices) -> vk::PhysicalDevice {
return devices[0];
};
DeviceExtensionsPickerFunction deviceExtensionsPicker = [](const vk::PhysicalDevice& device) -> std::set<std::string> {
return {};
};
vk::CommandPool commandPool;
};
using ContextPtr = std::shared_ptr<Context>;
} // namespace vks

View file

@ -0,0 +1,173 @@
/*
* Vulkan examples debug wrapper
*
* Copyright (C) 2016 by Sascha Willems - www.saschawillems.de
*
* This code is licensed under the MIT license (MIT) (http://opensource.org/licenses/MIT)
*/
#include "Debug.h"
#include <functional>
#include <iostream>
#include <list>
#include <string>
#include <sstream>
#include <mutex>
#include <QtCore/QDebug>
namespace vks { namespace debug {
const StringList& getDefaultValidationLayers() {
static const StringList validationLayerNames {
#if defined(__ANDROID__)
"VK_LAYER_GOOGLE_threading", "VK_LAYER_LUNARG_parameter_validation", "VK_LAYER_LUNARG_object_tracker",
"VK_LAYER_LUNARG_core_validation", "VK_LAYER_LUNARG_swapchain", "VK_LAYER_GOOGLE_unique_objects",
#else
"VK_LAYER_LUNARG_standard_validation",
#endif
};
return validationLayerNames;
}
const Output DEFAULT_OUTPUT = [](const SevFlags& sevFlags, const std::string& message) {
#ifdef _MSC_VER
OutputDebugStringA(message.c_str());
OutputDebugStringA("\n");
#endif
std::stringstream buf;
if (sevFlags & SevBits::eError) {
std::cout << "ERROR: ";
} else if (sevFlags & SevBits::eWarning) {
std::cout << "WARNING: ";
} else if (sevFlags & SevBits::eInfo) {
std::cout << "INFO: ";
} else if (sevFlags & SevBits::eVerbose) {
std::cout << "VERBOSE: ";
} else {
std::cout << "Unknown sev: ";
}
std::cout << message << std::endl;
};
const MessageFormatter DEFAULT_MESSAGE_FORMATTER =
[](const SevFlags& sevFlags, const TypeFlags& typeFlags, const CallbackData* callbackData, void*) -> std::string {
// FIXME improve on this
return std::string(callbackData->pMessage);
};
MessageFormatter CURRENT_FORMATTER = DEFAULT_MESSAGE_FORMATTER;
Output CURRENT_OUTPUT = DEFAULT_OUTPUT;
Output setOutputFunction(const Output& function) {
Output result = function;
std::swap(result, CURRENT_OUTPUT);
return result;
}
void setMessageFormatter(const MessageFormatter& function) {
CURRENT_FORMATTER = function;
}
VkBool32 debugCallback(VkDebugUtilsMessageSeverityFlagBitsEXT messageSeverity,
VkDebugUtilsMessageTypeFlagsEXT messageType,
const VkDebugUtilsMessengerCallbackDataEXT* pCallbackData,
void* pUserData) {
SevFlags sevFlags;
reinterpret_cast<VkDebugUtilsMessageSeverityFlagBitsEXT&>(sevFlags) = messageSeverity;
TypeFlags typeFlags{ messageType };
auto callbackData = reinterpret_cast<const CallbackData*>(pCallbackData);
auto message = CURRENT_FORMATTER(sevFlags, typeFlags, callbackData, pUserData);
CURRENT_OUTPUT(sevFlags, message);
return VK_TRUE;
}
static vk::DebugUtilsMessengerEXT messenger{};
const vk::DispatchLoaderDynamic& getInstanceDispatcher(const vk::Instance& instance = nullptr) {
static vk::DispatchLoaderDynamic dispatcher;
static std::once_flag once;
if (instance) {
std::call_once(once, [&] { dispatcher.init(instance); });
}
return dispatcher;
}
void setupDebugging(const vk::Instance& instance, const SevFlags& severityFlags, const TypeFlags& typeFlags, void* userData) {
vk::DebugUtilsMessengerCreateInfoEXT createInfo{ {}, severityFlags, typeFlags, debugCallback, userData };
messenger = instance.createDebugUtilsMessengerEXT(createInfo, nullptr, getInstanceDispatcher(instance));
}
void cleanupDebugging(const vk::Instance& instance) {
instance.destroyDebugUtilsMessengerEXT(messenger, nullptr, getInstanceDispatcher(instance));
}
namespace marker {
static bool active = false;
void setup(const vk::Instance& instance) {
qDebug() << "QQQ" << __FUNCTION__ << "setup debugging";
const auto& dispatcher = getInstanceDispatcher(instance);
active = (nullptr != dispatcher.vkSetDebugUtilsObjectTagEXT);
}
void setObjectName(const vk::Device& device, uint64_t object, vk::ObjectType objectType, const std::string& name) {
const auto& dispatcher = getInstanceDispatcher();
if (active) {
device.setDebugUtilsObjectNameEXT({ objectType, object, name.c_str() }, getInstanceDispatcher());
}
}
void beginRegion(const vk::CommandBuffer& cmdbuffer, const std::string& name, const glm::vec4& color) {
// Check for valid function pointer (may not be present if not running in a debugging application)
if (active) {
cmdbuffer.beginDebugUtilsLabelEXT(vk::DebugUtilsLabelEXT{ name.c_str(), { { color.r, color.g, color.b, color.a } } },
getInstanceDispatcher());
}
}
void insert(const vk::CommandBuffer& cmdbuffer, const std::string& name, const glm::vec4& color) {
// Check for valid function pointer (may not be present if not running in a debugging application)
if (active) {
cmdbuffer.insertDebugUtilsLabelEXT(vk::DebugUtilsLabelEXT{ name.c_str(), { { color.r, color.g, color.b, color.a } } },
getInstanceDispatcher());
}
}
void endRegion(const vk::CommandBuffer& cmdbuffer) {
// Check for valid function (may not be present if not running in a debugging application)
if (active) {
cmdbuffer.endDebugUtilsLabelEXT(getInstanceDispatcher());
}
}
void setName(const vk::Device& device, const vk::CommandBuffer& obj, const std::string& name) {
setObjectName(device, (uint64_t)obj.operator VkCommandBuffer(), vk::ObjectType::eCommandBuffer, name);
}
void setName(const vk::Device& device, const vk::Queue& obj, const std::string& name) {
setObjectName(device, (uint64_t)obj.operator VkQueue(), vk::ObjectType::eQueue, name);
}
void setName(const vk::Device& device, const vk::Image& obj, const std::string& name) {
setObjectName(device, (uint64_t)obj.operator VkImage(), vk::ObjectType::eImage, name);
}
void setName(const vk::Device& device, const vk::Buffer& obj, const std::string& name) {
setObjectName(device, (uint64_t)obj.operator VkBuffer(), vk::ObjectType::eBuffer, name);
}
void setName(const vk::Device& device, const vk::Framebuffer& obj, const std::string& name) {
setObjectName(device, (uint64_t)obj.operator VkFramebuffer(), vk::ObjectType::eFramebuffer, name);
}
void setName(const vk::Device& device, const vk::Pipeline& obj, const std::string& name) {
setObjectName(device, (uint64_t)obj.operator VkPipeline(), vk::ObjectType::ePipeline, name);
}
} // namespace marker
}} // namespace vks::debug

View file

@ -0,0 +1,65 @@
#pragma once
#include "Config.h"
namespace vks { namespace debug {
using StringList = std::list<std::string>;
const StringList& getDefaultValidationLayers();
using SevBits = vk::DebugUtilsMessageSeverityFlagBitsEXT;
using TypeBits = vk::DebugUtilsMessageTypeFlagBitsEXT;
using SevFlags = vk::DebugUtilsMessageSeverityFlagsEXT;
using TypeFlags = vk::DebugUtilsMessageTypeFlagsEXT;
using CallbackData = vk::DebugUtilsMessengerCallbackDataEXT;
using Output = std::function<void(const SevFlags&, const std::string&)>;
Output setOutputFunction(const Output& function);
using MessageFormatter = std::function<std::string(const SevFlags&, const TypeFlags&, const CallbackData*, void*)>;
void setMessageFormatter(const MessageFormatter& function);
void setupDebugging(const vk::Instance& instance,
const SevFlags& severityFlags = SevBits::eError | SevBits::eWarning,
const TypeFlags& typeFlags = TypeBits::eGeneral | TypeBits::eValidation | TypeBits::ePerformance,
void* userData = nullptr);
// Clear debug callback
void cleanupDebugging(const vk::Instance& instance);
// Setup and functions for the VK_EXT_debug_marker_extension
// Extension spec can be found at https://github.com/KhronosGroup/Vulkan-Docs/blob/1.0-VK_EXT_debug_marker/doc/specs/vulkan/appendices/VK_EXT_debug_marker.txt
// Note that the extension will only be present if run from an offline debugging application
// The actual check for extension presence and enabling it on the device is done in the example base class
// See ExampleBase::createInstance and ExampleBase::createDevice (base/vkx::ExampleBase.cpp)
namespace marker {
// Get function pointers for the debug report extensions from the device
void setup(const vk::Instance& instance);
// Start a new debug marker region
void beginRegion(const vk::CommandBuffer& cmdbuffer, const std::string& pMarkerName, const glm::vec4& color);
// Insert a new debug marker into the command buffer
void insert(const vk::CommandBuffer& cmdbuffer, const std::string& markerName, const glm::vec4& color);
// End the current debug marker region
void endRegion(const vk::CommandBuffer& cmdbuffer);
// Sets the debug name of an object
// All Objects in Vulkan are represented by their 64-bit handles which are passed into this function
// along with the object type
void setObjectName(const vk::Device& device, uint64_t object, vk::ObjectType objectType, const std::string& name);
// Object specific naming functions
void setName(const vk::Device& device, const vk::CommandBuffer& obj, const std::string& name);
void setName(const vk::Device& device, const vk::Queue& obj, const std::string& name);
void setName(const vk::Device& device, const vk::Image& obj, const std::string& name);
void setName(const vk::Device& device, const vk::Buffer& obj, const std::string& name);
void setName(const vk::Device& device, const vk::Framebuffer& obj, const std::string& name);
void setName(const vk::Device& device, const vk::Pipeline& obj, const std::string& name);
} // namespace marker
}} // namespace vks::debug

View file

@ -0,0 +1,171 @@
#pragma once
#include "Config.h"
namespace vks {
struct Device : public vk::Device {
using OptionalAllocationCallbacks = vk::Optional<const vk::AllocationCallbacks>;
using vk::Device::destroy;
Device& operator=(const vk::Device& device) {
(vk::Device&)(*this) = device;
return *this;
}
template <typename T>
void destroy(T& t, OptionalAllocationCallbacks allocator = nullptr) const {
#if __cplusplus > 201703L
static_assert(false);
#else
assert(false);
#endif
}
template <>
void destroy<vk::Fence>(vk::Fence& object, OptionalAllocationCallbacks allocator) const {
destroyFence(object, allocator);
object = vk::Fence();
}
template <>
void destroy<vk::Semaphore>(vk::Semaphore& object, OptionalAllocationCallbacks allocator) const {
destroySemaphore(object, allocator);
object = vk::Semaphore();
}
template <>
void destroy<vk::Event>(vk::Event& object, OptionalAllocationCallbacks allocator) const {
destroyEvent(object, allocator);
object = vk::Event();
}
template <>
void destroy<vk::QueryPool>(vk::QueryPool& object, OptionalAllocationCallbacks allocator) const {
destroyQueryPool(object, allocator);
object = vk::QueryPool();
}
template <>
void destroy<vk::Buffer>(vk::Buffer& object, OptionalAllocationCallbacks allocator) const {
destroyBuffer(object, allocator);
object = vk::Buffer();
}
template <>
void destroy<vk::BufferView>(vk::BufferView& object, OptionalAllocationCallbacks allocator) const {
destroyBufferView(object, allocator);
object = vk::BufferView();
}
template <>
void destroy<vk::Image>(vk::Image& object, OptionalAllocationCallbacks allocator) const {
destroyImage(object, allocator);
object = vk::Image();
}
template <>
void destroy<vk::ImageView>(vk::ImageView& object, OptionalAllocationCallbacks allocator) const {
destroyImageView(object, allocator);
object = vk::ImageView();
}
template <>
void destroy<vk::ShaderModule>(vk::ShaderModule& object, OptionalAllocationCallbacks allocator) const {
destroyShaderModule(object, allocator);
object = vk::ShaderModule();
}
template <>
void destroy<vk::PipelineCache>(vk::PipelineCache& object, OptionalAllocationCallbacks allocator) const {
destroyPipelineCache(object, allocator);
object = vk::PipelineCache();
}
template <>
void destroy<vk::Pipeline>(vk::Pipeline& object, OptionalAllocationCallbacks allocator) const {
destroyPipeline(object, allocator);
object = vk::Pipeline();
}
template <>
void destroy<vk::PipelineLayout>(vk::PipelineLayout& object, OptionalAllocationCallbacks allocator) const {
destroyPipelineLayout(object, allocator);
object = vk::PipelineLayout();
}
template <>
void destroy<vk::Sampler>(vk::Sampler& object, OptionalAllocationCallbacks allocator) const {
destroySampler(object, allocator);
object = vk::Sampler();
}
template <>
void destroy<vk::DescriptorSetLayout>(vk::DescriptorSetLayout& object, OptionalAllocationCallbacks allocator) const {
destroyDescriptorSetLayout(object, allocator);
object = vk::DescriptorSetLayout();
}
template <>
void destroy<vk::DescriptorPool>(vk::DescriptorPool& object, OptionalAllocationCallbacks allocator) const {
destroyDescriptorPool(object, allocator);
object = vk::DescriptorPool();
}
template <>
void destroy<vk::Framebuffer>(vk::Framebuffer& object, OptionalAllocationCallbacks allocator) const {
destroyFramebuffer(object, allocator);
object = vk::Framebuffer();
}
template <>
void destroy<vk::RenderPass>(vk::RenderPass& object, OptionalAllocationCallbacks allocator) const {
destroyRenderPass(object, allocator);
object = vk::RenderPass();
}
template <>
void destroy<vk::CommandPool>(vk::CommandPool& object, OptionalAllocationCallbacks allocator) const {
destroyCommandPool(object, allocator);
object = vk::CommandPool();
}
template <>
void destroy<vk::SwapchainKHR>(vk::SwapchainKHR& object, OptionalAllocationCallbacks allocator) const {
destroySwapchainKHR(object, allocator);
object = vk::SwapchainKHR();
}
template <>
void destroy<vk::IndirectCommandsLayoutNVX>(vk::IndirectCommandsLayoutNVX& object, OptionalAllocationCallbacks allocator) const {
destroyIndirectCommandsLayoutNVX(object, allocator);
object = vk::IndirectCommandsLayoutNVX();
}
template <>
void destroy<vk::ObjectTableNVX>(vk::ObjectTableNVX& object, OptionalAllocationCallbacks allocator) const {
destroyObjectTableNVX(object, allocator);
object = vk::ObjectTableNVX();
}
template <>
void destroy<vk::DescriptorUpdateTemplateKHR>(vk::DescriptorUpdateTemplateKHR& object, OptionalAllocationCallbacks allocator) const {
destroyDescriptorUpdateTemplateKHR(object, allocator);
object = vk::DescriptorUpdateTemplateKHR();
}
template <>
void destroy<vk::SamplerYcbcrConversionKHR>(vk::SamplerYcbcrConversionKHR& object, OptionalAllocationCallbacks allocator) const {
destroySamplerYcbcrConversionKHR(object, allocator);
object = vk::SamplerYcbcrConversionKHR();
}
template <>
void destroy<vk::ValidationCacheEXT>(vk::ValidationCacheEXT& object, OptionalAllocationCallbacks allocator) const {
destroyValidationCacheEXT(object, allocator);
object = vk::ValidationCacheEXT();
}
};
}

View file

@ -0,0 +1,7 @@
namespace vks {
struct Allocated;
struct Buffer;
struct Image;
struct Context;
struct Swapchain;
}

View file

@ -0,0 +1,109 @@
//
// Created by Bradley Austin Davis on 2016/03/19
// Copyright 2013 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#pragma once
#include "Config.h"
#include "Context.h"
namespace vks {
using FramebufferAttachment = Image;
struct Framebuffer {
using Attachment = FramebufferAttachment;
vk::Device device;
vk::Framebuffer framebuffer;
Attachment depth;
std::vector<Attachment> colors;
void destroy() {
for (auto& color : colors) {
color.destroy();
}
if (depth.format != vk::Format::eUndefined) {
depth.destroy();
}
if (framebuffer) {
device.destroyFramebuffer(framebuffer);
framebuffer = vk::Framebuffer();
}
}
// Prepare a new framebuffer for offscreen rendering
// The contents of this framebuffer are then
// blitted to our render target
void create(const vks::Context& context, const glm::uvec2& size, const std::vector<vk::Format>& colorFormats, vk::Format depthFormat, const vk::RenderPass& renderPass, vk::ImageUsageFlags colorUsage = vk::ImageUsageFlagBits::eSampled, vk::ImageUsageFlags depthUsage = vk::ImageUsageFlags()) {
device = context.device;
destroy();
colors.resize(colorFormats.size());
// Color attachment
vk::ImageCreateInfo image;
image.imageType = vk::ImageType::e2D;
image.extent.width = size.x;
image.extent.height = size.y;
image.extent.depth = 1;
image.mipLevels = 1;
image.arrayLayers = 1;
image.samples = vk::SampleCountFlagBits::e1;
image.tiling = vk::ImageTiling::eOptimal;
// vk::Image of the framebuffer is blit source
image.usage = vk::ImageUsageFlagBits::eColorAttachment | colorUsage;
vk::ImageViewCreateInfo colorImageView;
colorImageView.viewType = vk::ImageViewType::e2D;
colorImageView.subresourceRange.aspectMask = vk::ImageAspectFlagBits::eColor;
colorImageView.subresourceRange.levelCount = 1;
colorImageView.subresourceRange.layerCount = 1;
for (size_t i = 0; i < colorFormats.size(); ++i) {
image.format = colorFormats[i];
colors[i] = context.createImage(image, vk::MemoryPropertyFlagBits::eDeviceLocal);
colorImageView.format = colorFormats[i];
colorImageView.image = colors[i].image;
colors[i].view = device.createImageView(colorImageView);
}
bool useDepth = depthFormat != vk::Format::eUndefined;
// Depth stencil attachment
if (useDepth) {
image.format = depthFormat;
image.usage = vk::ImageUsageFlagBits::eDepthStencilAttachment | depthUsage;
depth = context.createImage(image, vk::MemoryPropertyFlagBits::eDeviceLocal);
vk::ImageViewCreateInfo depthStencilView;
depthStencilView.viewType = vk::ImageViewType::e2D;
depthStencilView.format = depthFormat;
depthStencilView.subresourceRange.aspectMask = vk::ImageAspectFlagBits::eDepth;
depthStencilView.subresourceRange.levelCount = 1;
depthStencilView.subresourceRange.layerCount = 1;
depthStencilView.image = depth.image;
depth.view = device.createImageView(depthStencilView);
}
std::vector<vk::ImageView> attachments;
attachments.resize(colors.size());
for (size_t i = 0; i < colors.size(); ++i) {
attachments[i] = colors[i].view;
}
if (useDepth) {
attachments.push_back(depth.view);
}
vk::FramebufferCreateInfo fbufCreateInfo;
fbufCreateInfo.renderPass = renderPass;
fbufCreateInfo.attachmentCount = (uint32_t)attachments.size();
fbufCreateInfo.pAttachments = attachments.data();
fbufCreateInfo.width = size.x;
fbufCreateInfo.height = size.y;
fbufCreateInfo.layers = 1;
framebuffer = context.device.createFramebuffer(fbufCreateInfo);
}
};
}

View file

@ -0,0 +1,86 @@
#include "Helpers.h"
#include <mutex>
#include <QtCore/QString>
#include <QtCore/QFileInfo>
#include <gl/Config.h>
#include <shared/FileUtils.h>
const QString& getPipelineCacheFile() {
static const QString PIPELINE_CACHE_FOLDER{ "" };
static const QString PIPELINE_CACHE_FILE_NAME{ "pipeline_cache.bin" };
static const QString PIPELINE_CACHE_FILE = FileUtils::standardPath(PIPELINE_CACHE_FOLDER) + PIPELINE_CACHE_FILE_NAME;
return PIPELINE_CACHE_FOLDER;
}
bool vks::util::loadPipelineCacheData(std::vector<uint8_t>& outCache) {
outCache.clear();
const QString& cacheFile = getPipelineCacheFile();
if (QFileInfo(cacheFile).exists()) {
QFile file(cacheFile);
if (file.open(QFile::ReadOnly)) {
QByteArray data = file.readAll();
outCache.resize(data.size());
memcpy(outCache.data(), data.data(), data.size());
file.close();
return true;
}
}
return false;
}
void vks::util::savePipelineCacheData(const std::vector<uint8_t>& cache) {
QString cacheFile = getPipelineCacheFile();
QFile saveFile(cacheFile);
saveFile.open(QFile::WriteOnly | QFile::Truncate);
saveFile.write((const char*)cache.data(), cache.size());
saveFile.close();
}
static std::set<std::string> getGLExtensions() {
static std::set<std::string> result;
static std::once_flag once;
std::call_once(once, [&]{
GLint count = 0;
glGetIntegerv(GL_NUM_EXTENSIONS, &count);
for (GLint i = 0; i < count; ++i) {
auto name = glGetStringi(GL_EXTENSIONS, i);
result.insert((const char*)name);
}
});
return result;
}
static bool hasExtension(const std::string& name) {
const auto& extensions = getGLExtensions();
return 0 != extensions.count(name);
}
vks::util::gl::UuidSet vks::util::gl::getUuids() {
static vks::util::gl::UuidSet result;
static std::once_flag once;
QUuid driverUuid;
using GLUUID = std::array<GLint, 16>;
std::call_once(once, [&]{
GLUUID value;
glGetIntegerv(GL_DRIVER_UUID_EXT, value.data());
GLint deviceIdCount = 0;
glGetIntegerv(GL_NUM_DEVICE_UUIDS_EXT, &deviceIdCount);
for (GLint i = 0; i < deviceIdCount; ++i) {
result.insert(QUuid(QByteArray((const char*)value.data(), (int)value.size())));
}
});
return result;
}
bool vks::util::gl::contextSupported(QOpenGLContext*) {
return hasExtension("GL_EXT_memory_object") && hasExtension("GL_EXT_semaphore");
}

View file

@ -0,0 +1,91 @@
#pragma once
#include "Config.h"
#include <array>
#include <vector>
#include <QtCore/QUuid>
class QOpenGLContext;
namespace vks { namespace util {
inline vk::ColorComponentFlags fullColorWriteMask() {
return vk::ColorComponentFlagBits::eR | vk::ColorComponentFlagBits::eG | vk::ColorComponentFlagBits::eB |
vk::ColorComponentFlagBits::eA;
}
inline vk::Viewport viewport(float width, float height, float minDepth = 0, float maxDepth = 1) {
vk::Viewport viewport;
viewport.width = width;
viewport.height = height;
viewport.minDepth = minDepth;
viewport.maxDepth = maxDepth;
return viewport;
}
inline vk::Viewport viewport(const glm::uvec2& size, float minDepth = 0, float maxDepth = 1) {
return viewport((float)size.x, (float)size.y, minDepth, maxDepth);
}
inline vk::Viewport viewport(const vk::Extent2D& size, float minDepth = 0, float maxDepth = 1) {
return viewport((float)size.width, (float)size.height, minDepth, maxDepth);
}
inline vk::Rect2D rect2D(uint32_t width, uint32_t height, int32_t offsetX = 0, int32_t offsetY = 0) {
vk::Rect2D rect2D;
rect2D.extent.width = width;
rect2D.extent.height = height;
rect2D.offset.x = offsetX;
rect2D.offset.y = offsetY;
return rect2D;
}
inline vk::Rect2D rect2D(const glm::uvec2& size, const glm::ivec2& offset = glm::ivec2(0)) {
return rect2D(size.x, size.y, offset.x, offset.y);
}
inline vk::Rect2D rect2D(const vk::Extent2D& size, const vk::Offset2D& offset = vk::Offset2D()) {
return rect2D(size.width, size.height, offset.x, offset.y);
}
inline vk::AccessFlags accessFlagsForLayout(vk::ImageLayout layout) {
switch (layout) {
case vk::ImageLayout::ePreinitialized:
return vk::AccessFlagBits::eHostWrite;
case vk::ImageLayout::eTransferDstOptimal:
return vk::AccessFlagBits::eTransferWrite;
case vk::ImageLayout::eTransferSrcOptimal:
return vk::AccessFlagBits::eTransferRead;
case vk::ImageLayout::eColorAttachmentOptimal:
return vk::AccessFlagBits::eColorAttachmentWrite;
case vk::ImageLayout::eDepthStencilAttachmentOptimal:
return vk::AccessFlagBits::eDepthStencilAttachmentWrite;
case vk::ImageLayout::eShaderReadOnlyOptimal:
return vk::AccessFlagBits::eShaderRead;
default:
return vk::AccessFlags();
}
}
inline vk::ClearColorValue clearColor(const glm::vec4& v = glm::vec4(0)) {
vk::ClearColorValue result;
memcpy(&result.float32, &v, sizeof(result.float32));
return result;
}
bool loadPipelineCacheData(std::vector<uint8_t>& outCache);
void savePipelineCacheData(const std::vector<uint8_t>& cache);
namespace gl {
using UuidSet = std::set<QUuid>;
UuidSet getUuids();
bool contextSupported(QOpenGLContext*);
} // namespace vks::util::gl
}} // namespace vks::util

View file

@ -0,0 +1,42 @@
#pragma once
#include "Allocation.h"
namespace vks {
// Encaspulates an image, the memory for that image, a view of the image,
// as well as a sampler and the image format.
//
// The sampler is not populated by the allocation code, but is provided
// for convenience and easy cleanup if it is populated.
struct Image : public Allocation
{
private:
using Parent = Allocation;
public:
vk::Image image;
vk::Extent3D extent;
vk::ImageView view;
vk::Sampler sampler;
vk::Format format{ vk::Format::eUndefined };
operator bool() const {
return image.operator bool();
}
void destroy() override {
if (sampler) {
device.destroySampler(sampler);
sampler = vk::Sampler();
}
if (view) {
device.destroyImageView(view);
view = vk::ImageView();
}
if (image) {
device.destroyImage(image);
image = vk::Image();
}
Parent::destroy();
}
};
}

View file

@ -0,0 +1,173 @@
#pragma once
#include "Context.h"
#include "Shaders.h"
namespace vks {
namespace pipelines {
struct PipelineRasterizationStateCreateInfo : public vk::PipelineRasterizationStateCreateInfo {
using Parent = vk::PipelineRasterizationStateCreateInfo;
PipelineRasterizationStateCreateInfo() {
lineWidth = 1.0f;
cullMode = vk::CullModeFlagBits::eBack;
}
};
struct PipelineInputAssemblyStateCreateInfo : public vk::PipelineInputAssemblyStateCreateInfo {
PipelineInputAssemblyStateCreateInfo() {
topology = vk::PrimitiveTopology::eTriangleList;
}
};
struct PipelineColorBlendAttachmentState : public vk::PipelineColorBlendAttachmentState {
PipelineColorBlendAttachmentState() {
colorWriteMask = vks::util::fullColorWriteMask();
}
};
struct PipelineColorBlendStateCreateInfo : public vk::PipelineColorBlendStateCreateInfo {
// Default to a single color attachment state with no blending
std::vector<PipelineColorBlendAttachmentState> blendAttachmentStates{ PipelineColorBlendAttachmentState() };
void update() {
this->attachmentCount = (uint32_t)blendAttachmentStates.size();
this->pAttachments = blendAttachmentStates.data();
}
};
struct PipelineDynamicStateCreateInfo : public vk::PipelineDynamicStateCreateInfo {
std::vector<vk::DynamicState> dynamicStateEnables;
PipelineDynamicStateCreateInfo() {
dynamicStateEnables = { vk::DynamicState::eViewport, vk::DynamicState::eScissor };
}
void update() {
this->dynamicStateCount = (uint32_t)dynamicStateEnables.size();
this->pDynamicStates = dynamicStateEnables.data();
}
};
struct PipelineVertexInputStateCreateInfo : public vk::PipelineVertexInputStateCreateInfo {
std::vector<vk::VertexInputBindingDescription> bindingDescriptions;
std::vector<vk::VertexInputAttributeDescription> attributeDescriptions;
void update() {
vertexAttributeDescriptionCount = (uint32_t)attributeDescriptions.size();
vertexBindingDescriptionCount = (uint32_t)bindingDescriptions.size();
pVertexBindingDescriptions = bindingDescriptions.data();
pVertexAttributeDescriptions = attributeDescriptions.data();
}
};
struct PipelineViewportStateCreateInfo : public vk::PipelineViewportStateCreateInfo {
std::vector<vk::Viewport> viewports;
std::vector<vk::Rect2D> scissors;
void update() {
if (viewports.empty()) {
viewportCount = 1;
pViewports = nullptr;
} else {
viewportCount = (uint32_t)viewports.size();
pViewports = viewports.data();
}
if (scissors.empty()) {
scissorCount = 1;
pScissors = 0;
} else {
scissorCount = (uint32_t)scissors.size();
pScissors = scissors.data();
}
}
};
struct PipelineDepthStencilStateCreateInfo : public vk::PipelineDepthStencilStateCreateInfo {
PipelineDepthStencilStateCreateInfo(bool depthEnable = true) {
if (depthEnable) {
depthTestEnable = VK_TRUE;
depthWriteEnable = VK_TRUE;
depthCompareOp = vk::CompareOp::eLessOrEqual;
}
}
};
struct GraphicsPipelineBuilder {
private:
void init() {
pipelineCreateInfo.pRasterizationState = &rasterizationState;
pipelineCreateInfo.pInputAssemblyState = &inputAssemblyState;
pipelineCreateInfo.pColorBlendState = &colorBlendState;
pipelineCreateInfo.pMultisampleState = &multisampleState;
pipelineCreateInfo.pViewportState = &viewportState;
pipelineCreateInfo.pDepthStencilState = &depthStencilState;
pipelineCreateInfo.pDynamicState = &dynamicState;
pipelineCreateInfo.pVertexInputState = &vertexInputState;
}
public:
GraphicsPipelineBuilder(const vk::Device& device, const vk::PipelineLayout layout, const vk::RenderPass& renderPass) :
device(device) {
pipelineCreateInfo.layout = layout;
pipelineCreateInfo.renderPass = renderPass;
init();
}
GraphicsPipelineBuilder(const GraphicsPipelineBuilder& other) : GraphicsPipelineBuilder(other.device, other.layout, other.renderPass) {}
GraphicsPipelineBuilder& operator=(const GraphicsPipelineBuilder& other) = delete;
~GraphicsPipelineBuilder() {
destroyShaderModules();
}
const vk::Device& device;
vk::PipelineCache pipelineCache;
vk::RenderPass& renderPass { pipelineCreateInfo.renderPass };
vk::PipelineLayout& layout { pipelineCreateInfo.layout };
PipelineInputAssemblyStateCreateInfo inputAssemblyState;
PipelineRasterizationStateCreateInfo rasterizationState;
vk::PipelineMultisampleStateCreateInfo multisampleState;
PipelineDepthStencilStateCreateInfo depthStencilState;
PipelineViewportStateCreateInfo viewportState;
PipelineDynamicStateCreateInfo dynamicState;
PipelineColorBlendStateCreateInfo colorBlendState;
PipelineVertexInputStateCreateInfo vertexInputState;
std::vector<vk::PipelineShaderStageCreateInfo> shaderStages;
vk::GraphicsPipelineCreateInfo pipelineCreateInfo;
void update() {
pipelineCreateInfo.stageCount = static_cast<uint32_t>(shaderStages.size());
pipelineCreateInfo.pStages = shaderStages.data();
dynamicState.update();
colorBlendState.update();
vertexInputState.update();
viewportState.update();
}
void destroyShaderModules() {
for (const auto shaderStage : shaderStages) {
device.destroyShaderModule(shaderStage.module);
}
shaderStages.clear();
}
// Load a SPIR-V shader
vk::PipelineShaderStageCreateInfo& loadShader(const std::string& fileName, vk::ShaderStageFlagBits stage, const char* entryPoint = "main") {
vk::PipelineShaderStageCreateInfo shaderStage = vks::shaders::loadShader(device, fileName, stage, entryPoint);
shaderStages.push_back(shaderStage);
return shaderStages.back();
}
vk::Pipeline create(const vk::PipelineCache& cache) {
update();
return device.createGraphicsPipeline(cache, pipelineCreateInfo);
}
vk::Pipeline create() {
return create(pipelineCache);
}
};
}
} // namespace vks::pipelines

View file

@ -0,0 +1,147 @@
#pragma once
#include "Config.h"
namespace vks { namespace renderpasses {
struct AttachmentDescription : public vk::AttachmentDescription {
AttachmentDescription& withFlags(vk::AttachmentDescriptionFlags flags) {
this->flags = flags; return *this;
}
AttachmentDescription& withFormat(vk::Format format) {
this->format = format; return *this;
}
AttachmentDescription& withInitialLayout(vk::ImageLayout layout) {
this->initialLayout = layout; return *this;
}
AttachmentDescription& withFinalLayout(vk::ImageLayout layout) {
this->finalLayout = layout; return *this;
}
AttachmentDescription& withSampleCount(vk::SampleCountFlagBits samples) {
this->samples = samples; return *this;
}
AttachmentDescription& withLoadOp(vk::AttachmentLoadOp loadOp) {
this->loadOp = loadOp; return *this;
}
AttachmentDescription& withStoreOp(vk::AttachmentStoreOp storeOp) {
this->storeOp = storeOp; return *this;
}
AttachmentDescription& withLStenciloadOp(vk::AttachmentLoadOp loadOp) {
this->stencilLoadOp = loadOp; return *this;
}
AttachmentDescription& withStencilStoreOp(vk::AttachmentStoreOp storeOp) {
this->stencilStoreOp = storeOp; return *this;
}
};
struct SubpassDescription : public vk::SubpassDescription {
std::vector<vk::AttachmentReference> colorAttachments;
std::vector<vk::AttachmentReference> inputAttachments;
std::vector<vk::AttachmentReference> resolveAttachments;
vk::AttachmentReference depthStencilAttachment;
std::vector<uint32_t> preserveAttachments;
void update() {
this->colorAttachmentCount = (uint32_t)colorAttachments.size();
this->pColorAttachments = colorAttachments.data();
this->inputAttachmentCount = (uint32_t)inputAttachments.size();
this->pInputAttachments = inputAttachments.data();
this->pResolveAttachments = resolveAttachments.data();
this->pDepthStencilAttachment = &depthStencilAttachment;
this->preserveAttachmentCount = (uint32_t)preserveAttachments.size();
this->pPreserveAttachments = preserveAttachments.data();
}
};
struct RenderPassBuilder {
std::vector<vk::AttachmentDescription> attachments;
std::vector<vk::SubpassDependency> subpassDependencies;
std::vector<SubpassDescription> subpasses;
size_t addAttachment(const vk::AttachmentDescription& attachment) {
attachments.push_back(attachment);
return attachments.size() - 1;
}
vk::RenderPass build(const vk::Device& device) {
for (auto& subpass : subpasses) {
subpass.update();
}
vk::RenderPassCreateInfo renderPassInfo;
renderPassInfo.attachmentCount = (uint32_t)attachments.size();
renderPassInfo.pAttachments = attachments.data();
renderPassInfo.subpassCount = (uint32_t)subpasses.size();
renderPassInfo.pSubpasses = subpasses.data();
renderPassInfo.dependencyCount = (uint32_t)subpassDependencies.size();
renderPassInfo.pDependencies = subpassDependencies.data();
return device.createRenderPass(renderPassInfo);
}
};
#if 0
subpass.pipelineBindPoint = vk::PipelineBindPoint::eGraphics;
attachments.resize(colorFormats.size());
colorAttachmentReferences.resize(attachments.size());
// Color attachment
for (uint32_t i = 0; i < attachments.size(); ++i) {
attachments[i].format = colorFormats[i];
attachments[i].loadOp = vk::AttachmentLoadOp::eClear;
attachments[i].storeOp = colorFinalLayout == vk::ImageLayout::eColorAttachmentOptimal ? vk::AttachmentStoreOp::eDontCare : vk::AttachmentStoreOp::eStore;
attachments[i].initialLayout = vk::ImageLayout::eUndefined;
attachments[i].finalLayout = colorFinalLayout;
vk::AttachmentReference& attachmentReference = colorAttachmentReferences[i];
attachmentReference.attachment = i;
attachmentReference.layout = vk::ImageLayout::eColorAttachmentOptimal;
subpass.colorAttachmentCount = (uint32_t)colorAttachmentReferences.size();
subpass.pColorAttachments = colorAttachmentReferences.data();
}
// Do we have a depth format?
vk::AttachmentReference depthAttachmentReference;
if (depthFormat != vk::Format::eUndefined) {
vk::AttachmentDescription depthAttachment;
depthAttachment.format = depthFormat;
depthAttachment.loadOp = vk::AttachmentLoadOp::eClear;
// We might be using the depth attacment for something, so preserve it if it's final layout is not undefined
depthAttachment.storeOp =
depthFinalLayout == vk::ImageLayout::eDepthStencilAttachmentOptimal ? vk::AttachmentStoreOp::eDontCare : vk::AttachmentStoreOp::eStore;
depthAttachment.initialLayout = vk::ImageLayout::eUndefined;
depthAttachment.finalLayout = depthFinalLayout;
attachments.push_back(depthAttachment);
depthAttachmentReference.attachment = (uint32_t)attachments.size() - 1;
depthAttachmentReference.layout = vk::ImageLayout::eDepthStencilAttachmentOptimal;
subpass.pDepthStencilAttachment = &depthAttachmentReference;
}
{
if ((colorFinalLayout != vk::ImageLayout::eColorAttachmentOptimal) && (colorFinalLayout != vk::ImageLayout::eUndefined)) {
// Implicit transition
vk::SubpassDependency dependency;
dependency.srcSubpass = 0;
dependency.srcAccessMask = vk::AccessFlagBits::eColorAttachmentWrite;
dependency.srcStageMask = vk::PipelineStageFlagBits::eColorAttachmentOutput;
dependency.dstSubpass = VK_SUBPASS_EXTERNAL;
dependency.dstAccessMask = vks::util::accessFlagsForLayout(colorFinalLayout);
dependency.dstStageMask = vk::PipelineStageFlagBits::eBottomOfPipe;
subpassDependencies.push_back(dependency);
}
if ((depthFinalLayout != vk::ImageLayout::eColorAttachmentOptimal) && (depthFinalLayout != vk::ImageLayout::eUndefined)) {
// Implicit transition
vk::SubpassDependency dependency;
dependency.srcSubpass = 0;
dependency.srcAccessMask = vk::AccessFlagBits::eDepthStencilAttachmentWrite;
dependency.srcStageMask = vk::PipelineStageFlagBits::eBottomOfPipe;
dependency.dstSubpass = VK_SUBPASS_EXTERNAL;
dependency.dstAccessMask = vks::util::accessFlagsForLayout(depthFinalLayout);
dependency.dstStageMask = vk::PipelineStageFlagBits::eBottomOfPipe;
subpassDependencies.push_back(dependency);
}
#endif
}} // namespace vks::renderpasses

View file

@ -0,0 +1,279 @@
/*
* Class wrapping access to the swap chain
*
* A swap chain is a collection of framebuffers used for rendering
* The swap chain images can then presented to the windowing system
*
* Copyright (C) 2016 by Sascha Willems - www.saschawillems.de
*
* This code is licensed under the MIT license (MIT) (http://opensource.org/licenses/MIT)
*/
#pragma once
#include "Config.h"
#include <mutex>
namespace vks {
struct SwapchainImage {
vk::Image image;
vk::ImageView view;
vk::Fence fence;
};
struct Swapchain {
vk::SurfaceKHR surface;
vk::SwapchainKHR swapchain;
vk::PresentInfoKHR presentInfo;
vk::Extent2D extent;
const vks::Context& context{ vks::Context::get() };
const vk::PhysicalDevice& physicalDevice{ context.physicalDevice };
const vk::Device& device{ context.device };
const uint32_t& graphicsQueueIndex{ context.queueIndices.graphics };
const vk::Queue& queue{ context.queue };
std::vector<SwapchainImage> images;
vk::Format colorFormat;
vk::ColorSpaceKHR colorSpace;
uint32_t imageCount{ 0 };
uint32_t currentImage{ 0 };
Swapchain() {
presentInfo.swapchainCount = 1;
presentInfo.pSwapchains = &swapchain;
presentInfo.pImageIndices = &currentImage;
}
void waitIdle() {
queue.waitIdle();
device.waitIdle();
}
void setSurface(const vk::SurfaceKHR& newSurface) {
surface = newSurface;
}
// Creates an os specific surface
// Tries to find a graphics and a present queue
void create(vk::Extent2D& size, bool vsync = false) {
if (!physicalDevice || !device || !surface) {
throw std::runtime_error("Initialize the physicalDevice, device, and queue members");
}
static std::once_flag once;
std::call_once(once, [&] {
// Get list of supported surface formats
std::vector<vk::SurfaceFormatKHR> surfaceFormats = physicalDevice.getSurfaceFormatsKHR(surface);
auto formatCount = surfaceFormats.size();
physicalDevice.getSurfaceSupportKHR(graphicsQueueIndex, surface);
// If the surface format list only includes one entry with vk::Format::eUndefined,
// there is no preferered format, so we assume vk::Format::eB8G8R8A8Unorm
if ((formatCount == 1) && (surfaceFormats[0].format == vk::Format::eUndefined)) {
colorFormat = vk::Format::eB8G8R8A8Unorm;
} else {
// Always select the first available color format
// If you need a specific format (e.g. SRGB) you'd need to
// iterate over the list of available surface format and
// check for it's presence
colorFormat = surfaceFormats[0].format;
}
colorSpace = surfaceFormats[0].colorSpace;
});
vk::SwapchainKHR oldSwapchain = swapchain;
currentImage = 0;
// Get physical device surface properties and formats
vk::SurfaceCapabilitiesKHR surfCaps = physicalDevice.getSurfaceCapabilitiesKHR(surface);
// Get available present modes
std::vector<vk::PresentModeKHR> presentModes = physicalDevice.getSurfacePresentModesKHR(surface);
auto presentModeCount = presentModes.size();
// width and height are either both -1, or both not -1.
if (surfCaps.currentExtent.width == -1) {
// If the surface size is undefined, the size is set to
// the size of the images requested.
extent = size;
} else {
// If the surface size is defined, the swap chain size must match
extent = surfCaps.currentExtent;
size = surfCaps.currentExtent;
}
// Prefer mailbox mode if present, it's the lowest latency non-tearing present mode
vk::PresentModeKHR swapchainPresentMode = vk::PresentModeKHR::eFifo;
if (!vsync) {
for (size_t i = 0; i < presentModeCount; i++) {
if (presentModes[i] == vk::PresentModeKHR::eMailbox) {
swapchainPresentMode = vk::PresentModeKHR::eMailbox;
break;
}
if ((swapchainPresentMode != vk::PresentModeKHR::eMailbox) && (presentModes[i] == vk::PresentModeKHR::eImmediate)) {
swapchainPresentMode = vk::PresentModeKHR::eImmediate;
}
}
}
// Determine the number of images
uint32_t desiredNumberOfSwapchainImages = surfCaps.minImageCount + 1;
if ((surfCaps.maxImageCount > 0) && (desiredNumberOfSwapchainImages > surfCaps.maxImageCount)) {
desiredNumberOfSwapchainImages = surfCaps.maxImageCount;
}
vk::SurfaceTransformFlagBitsKHR preTransform;
if (surfCaps.supportedTransforms & vk::SurfaceTransformFlagBitsKHR::eIdentity) {
preTransform = vk::SurfaceTransformFlagBitsKHR::eIdentity;
} else {
preTransform = surfCaps.currentTransform;
}
//auto imageFormat = context.physicalDevice.getImageFormatProperties(colorFormat, vk::ImageType::e2D, vk::ImageTiling::eOptimal, vk::ImageUsageFlagBits::eColorAttachment, vk::ImageCreateFlags());
vk::SwapchainCreateInfoKHR swapchainCI;
swapchainCI.surface = surface;
swapchainCI.minImageCount = desiredNumberOfSwapchainImages;
swapchainCI.imageFormat = colorFormat;
swapchainCI.imageColorSpace = colorSpace;
swapchainCI.imageExtent = extent;
swapchainCI.imageUsage = vk::ImageUsageFlagBits::eColorAttachment | vk::ImageUsageFlagBits::eTransferDst;
swapchainCI.preTransform = preTransform;
swapchainCI.imageArrayLayers = 1;
swapchainCI.imageSharingMode = vk::SharingMode::eExclusive;
swapchainCI.queueFamilyIndexCount = 0;
swapchainCI.pQueueFamilyIndices = NULL;
swapchainCI.presentMode = swapchainPresentMode;
swapchainCI.oldSwapchain = oldSwapchain;
swapchainCI.clipped = true;
swapchainCI.compositeAlpha = vk::CompositeAlphaFlagBitsKHR::eOpaque;
swapchain = device.createSwapchainKHR(swapchainCI);
// If an existing sawp chain is re-created, destroy the old swap chain
// This also cleans up all the presentable images
if (oldSwapchain) {
for (uint32_t i = 0; i < imageCount; i++) {
device.destroyImageView(images[i].view);
}
device.destroySwapchainKHR(oldSwapchain);
}
vk::ImageViewCreateInfo colorAttachmentView;
colorAttachmentView.format = colorFormat;
colorAttachmentView.subresourceRange.aspectMask = vk::ImageAspectFlagBits::eColor;
colorAttachmentView.subresourceRange.levelCount = 1;
colorAttachmentView.subresourceRange.layerCount = 1;
colorAttachmentView.viewType = vk::ImageViewType::e2D;
// Get the swap chain images
auto swapChainImages = device.getSwapchainImagesKHR(swapchain);
imageCount = (uint32_t)swapChainImages.size();
// Get the swap chain buffers containing the image and imageview
images.resize(imageCount);
for (uint32_t i = 0; i < imageCount; i++) {
images[i].image = swapChainImages[i];
colorAttachmentView.image = swapChainImages[i];
images[i].view = device.createImageView(colorAttachmentView);
images[i].fence = vk::Fence();
}
}
std::vector<vk::Framebuffer> createFramebuffers(vk::FramebufferCreateInfo framebufferCreateInfo) {
// Verify that the first attachment is null
assert(framebufferCreateInfo.pAttachments[0] == vk::ImageView());
std::vector<vk::ImageView> attachments;
attachments.resize(framebufferCreateInfo.attachmentCount);
for (size_t i = 0; i < framebufferCreateInfo.attachmentCount; ++i) {
attachments[i] = framebufferCreateInfo.pAttachments[i];
}
framebufferCreateInfo.pAttachments = attachments.data();
std::vector<vk::Framebuffer> framebuffers;
framebuffers.resize(imageCount);
for (uint32_t i = 0; i < imageCount; i++) {
attachments[0] = images[i].view;
framebuffers[i] = device.createFramebuffer(framebufferCreateInfo);
}
return framebuffers;
}
// Acquires the next image in the swap chain
vk::ResultValue<uint32_t> acquireNextImage(const vk::Semaphore& presentCompleteSemaphore, const vk::Fence& fence = vk::Fence()) {
auto resultValue = device.acquireNextImageKHR(swapchain, UINT64_MAX, presentCompleteSemaphore, fence);
vk::Result result = resultValue.result;
if (result != vk::Result::eSuccess && result != vk::Result::eSuboptimalKHR) {
throw std::error_code(result);
}
currentImage = resultValue.value;
return resultValue;
}
void clearSubmitFence(uint32_t index) {
images[index].fence = vk::Fence();
}
vk::Fence getSubmitFence(bool destroy = false) {
auto& image = images[currentImage];
while (image.fence) {
vk::Result fenceRes = device.waitForFences(image.fence, VK_TRUE, UINT64_MAX);
if (fenceRes == vk::Result::eSuccess) {
if (destroy) {
device.destroyFence(image.fence);
}
image.fence = vk::Fence();
}
}
image.fence = device.createFence({});
return image.fence;
}
// Present the current image to the queue
vk::Result queuePresent(vk::Semaphore waitSemaphore) {
presentInfo.waitSemaphoreCount = waitSemaphore ? 1 : 0;
presentInfo.pWaitSemaphores = &waitSemaphore;
return queue.presentKHR(presentInfo);
}
// Free all Vulkan resources used by the swap chain
void destroy() {
for (uint32_t i = 0; i < imageCount; i++) {
device.destroyImageView(images[i].view);
}
device.destroySwapchainKHR(swapchain);
}
private:
uint32_t findQueue(const vk::QueueFlags& flags) const {
std::vector<vk::QueueFamilyProperties> queueProps = physicalDevice.getQueueFamilyProperties();
size_t queueCount = queueProps.size();
for (uint32_t i = 0; i < queueCount; i++) {
if (queueProps[i].queueFlags & flags) {
if (surface && !physicalDevice.getSurfaceSupportKHR(i, surface)) {
continue;
}
return i;
}
}
throw std::runtime_error("No queue matches the flags " + vk::to_string(flags));
}
};
using SwapchainPtr = std::shared_ptr<Swapchain>;
}
#if 0
#ifdef __ANDROID__
vk::AndroidSurfaceCreateInfoKHR surfaceCreateInfo;
surfaceCreateInfo.window = window;
surface = context.instance.createAndroidSurfaceKHR(surfaceCreateInfo);
#else
surface = glfw::createWindowSurface(context.instance, window);
#endif
#endif

View file

@ -0,0 +1,572 @@
#pragma once
#include "Config.h"
#include "Context.h"
#include "Buffer.h"
#include "Image.h"
namespace vks { namespace texture {
/** @brief Vulkan texture base class */
class Texture : public vks::Image {
using Parent = vks::Image;
public:
vk::Device device;
vk::ImageLayout imageLayout;
uint32_t mipLevels;
uint32_t layerCount{ 1 };
vk::DescriptorImageInfo descriptor;
Texture& operator=(const vks::Image& image) {
destroy();
(vks::Image&)*this = image;
return *this;
}
/** @brief Update image descriptor from current sampler, view and image layout */
void updateDescriptor() {
descriptor.sampler = sampler;
descriptor.imageView = view;
descriptor.imageLayout = imageLayout;
}
/** @brief Release all Vulkan resources held by this texture */
void destroy() {
Parent::destroy();
}
};
/** @brief 2D texture */
class Texture2D : public Texture {
using Parent = Texture;
public:
/**
* Load a 2D texture including all mip levels
*
* @param filename File to load (supports .ktx and .dds)
* @param format Vulkan format of the image data stored in the file
* @param device Vulkan device to create the texture on
* @param copyQueue Queue used for the texture staging copy commands (must support transfer)
* @param (Optional) imageUsageFlags Usage flags for the texture's image (defaults to VK_IMAGE_USAGE_SAMPLED_BIT)
* @param (Optional) imageLayout Usage layout for the texture (defaults VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL)
* @param (Optional) forceLinear Force linear tiling (not advised, defaults to false)
*
*/
void loadFromFile(
const vks::Context& context,
const std::string& filename,
vk::Format format,
vk::ImageUsageFlags imageUsageFlags = vk::ImageUsageFlagBits::eSampled,
vk::ImageLayout imageLayout = vk::ImageLayout::eShaderReadOnlyOptimal,
bool forceLinear = false)
{
this->imageLayout = imageLayout;
std::shared_ptr<gli::texture2d> tex2Dptr;
vks::file::withBinaryFileContents(filename, [&](size_t size, const void* data) {
tex2Dptr = std::make_shared<gli::texture2d>(gli::load((const char*)data, size));
});
const auto& tex2D = *tex2Dptr;
assert(!tex2D.empty());
device = context.device;
extent.width = static_cast<uint32_t>(tex2D[0].extent().x);
extent.height = static_cast<uint32_t>(tex2D[0].extent().y);
extent.depth = 1;
mipLevels = static_cast<uint32_t>(tex2D.levels());
layerCount = 1;
// Create optimal tiled target image
vk::ImageCreateInfo imageCreateInfo;
imageCreateInfo.imageType = vk::ImageType::e2D;
imageCreateInfo.format = format;
imageCreateInfo.mipLevels = mipLevels;
imageCreateInfo.arrayLayers = 1;
imageCreateInfo.extent = extent;
imageCreateInfo.usage = imageUsageFlags | vk::ImageUsageFlagBits::eTransferDst;
#if 1
((vks::Image&)(*this)) = context.stageToDeviceImage(imageCreateInfo, vk::MemoryPropertyFlagBits::eDeviceLocal, tex2D);
#else
((vks::Image&)*this) = context.createImage(imageCreateInfo);
auto stagingBuffer = context.createBuffer(vk::BufferUsageFlagBits::eTransferSrc, tex2D);
// Setup buffer copy regions for each layer including all of it's miplevels
std::vector<vk::BufferImageCopy> bufferCopyRegions;
size_t offset = 0;
vk::BufferImageCopy bufferCopyRegion;
bufferCopyRegion.imageSubresource.aspectMask = vk::ImageAspectFlagBits::eColor;
bufferCopyRegion.imageSubresource.layerCount = 1;
bufferCopyRegion.imageExtent.depth = 1;
for (uint32_t level = 0; level < mipLevels; level++) {
auto image = tex2D[level];
auto imageExtent = image.extent();
bufferCopyRegion.imageSubresource.mipLevel = level;
bufferCopyRegion.imageSubresource.baseArrayLayer = 1;
bufferCopyRegion.imageExtent.width = static_cast<uint32_t>(imageExtent.x);
bufferCopyRegion.imageExtent.height = static_cast<uint32_t>(imageExtent.y);
bufferCopyRegion.bufferOffset = offset;
bufferCopyRegions.push_back(bufferCopyRegion);
// Increase offset into staging buffer for next level / face
offset += image.size();
}
vk::ImageSubresourceRange subresourceRange;
subresourceRange.aspectMask = vk::ImageAspectFlagBits::eColor;
subresourceRange.levelCount = mipLevels;
subresourceRange.layerCount = layerCount;
// Use a separate command buffer for texture loading
context.withPrimaryCommandBuffer([&](const vk::CommandBuffer& copyCmd) {
// Image barrier for optimal image (target)
// Set initial layout for all array layers (faces) of the optimal (target) tiled texture
context.setImageLayout(copyCmd, image, vk::ImageLayout::eUndefined, vk::ImageLayout::eTransferDstOptimal, subresourceRange);
// Copy the layers and mip levels from the staging buffer to the optimal tiled image
copyCmd.copyBufferToImage(stagingBuffer.buffer, image, vk::ImageLayout::eTransferDstOptimal, bufferCopyRegions);
// Change texture image layout to shader read after all faces have been copied
context.setImageLayout(copyCmd, image, vk::ImageLayout::eTransferDstOptimal, imageLayout, subresourceRange);
});
// Clean up staging resources
stagingBuffer.destroy();
#endif
// Create sampler
vk::SamplerCreateInfo samplerCreateInfo;
samplerCreateInfo.magFilter = vk::Filter::eLinear;
samplerCreateInfo.minFilter = vk::Filter::eLinear;
samplerCreateInfo.mipmapMode = vk::SamplerMipmapMode::eLinear;
// Max level-of-detail should match mip level count
samplerCreateInfo.maxLod = (float)mipLevels;
// Only enable anisotropic filtering if enabled on the devicec
samplerCreateInfo.maxAnisotropy = context.deviceFeatures.samplerAnisotropy ? context.deviceProperties.limits.maxSamplerAnisotropy : 1.0f;
samplerCreateInfo.anisotropyEnable = context.deviceFeatures.samplerAnisotropy;
samplerCreateInfo.borderColor = vk::BorderColor::eFloatOpaqueWhite;
sampler = device.createSampler(samplerCreateInfo);
// Create image view
vk::ImageViewCreateInfo viewCreateInfo;
viewCreateInfo.viewType = vk::ImageViewType::e2D;
viewCreateInfo.image = image;
viewCreateInfo.format = format;
viewCreateInfo.subresourceRange = { vk::ImageAspectFlagBits::eColor, 0, mipLevels, 0, layerCount };
view = context.device.createImageView(viewCreateInfo);
// Update descriptor image info member that can be used for setting up descriptor sets
updateDescriptor();
}
#if 0
/**
* Creates a 2D texture from a buffer
*
* @param buffer Buffer containing texture data to upload
* @param bufferSize Size of the buffer in machine units
* @param width Width of the texture to create
* @param height Height of the texture to create
* @param format Vulkan format of the image data stored in the file
* @param device Vulkan device to create the texture on
* @param copyQueue Queue used for the texture staging copy commands (must support transfer)
* @param (Optional) filter Texture filtering for the sampler (defaults to VK_FILTER_LINEAR)
* @param (Optional) imageUsageFlags Usage flags for the texture's image (defaults to VK_IMAGE_USAGE_SAMPLED_BIT)
* @param (Optional) imageLayout Usage layout for the texture (defaults VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL)
*/
void fromBuffer(
void* buffer,
VkDeviceSize bufferSize,
VkFormat format,
uint32_t width,
uint32_t height,
VkQueue copyQueue,
VkFilter filter = VK_FILTER_LINEAR,
VkImageUsageFlags imageUsageFlags = VK_IMAGE_USAGE_SAMPLED_BIT,
VkImageLayout imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL)
{
assert(buffer);
this->device = device;
width = width;
height = height;
mipLevels = 1;
VkMemoryAllocateInfo memAllocInfo = vks::initializers::memoryAllocateInfo();
VkMemoryRequirements memReqs;
// Use a separate command buffer for texture loading
VkCommandBuffer copyCmd = device->createCommandBuffer(VK_COMMAND_BUFFER_LEVEL_PRIMARY, true);
// Create a host-visible staging buffer that contains the raw image data
VkBuffer stagingBuffer;
VkDeviceMemory stagingMemory;
VkBufferCreateInfo bufferCreateInfo = vks::initializers::bufferCreateInfo();
bufferCreateInfo.size = bufferSize;
// This buffer is used as a transfer source for the buffer copy
bufferCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
bufferCreateInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
VK_CHECK_RESULT(vkCreateBuffer(device->logicalDevice, &bufferCreateInfo, nullptr, &stagingBuffer));
// Get memory requirements for the staging buffer (alignment, memory type bits)
vkGetBufferMemoryRequirements(device->logicalDevice, stagingBuffer, &memReqs);
memAllocInfo.allocationSize = memReqs.size;
// Get memory type index for a host visible buffer
memAllocInfo.memoryTypeIndex = device->getMemoryType(memReqs.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT);
VK_CHECK_RESULT(vkAllocateMemory(device->logicalDevice, &memAllocInfo, nullptr, &stagingMemory));
VK_CHECK_RESULT(vkBindBufferMemory(device->logicalDevice, stagingBuffer, stagingMemory, 0));
// Copy texture data into staging buffer
uint8_t *data;
VK_CHECK_RESULT(vkMapMemory(device->logicalDevice, stagingMemory, 0, memReqs.size, 0, (void **)&data));
memcpy(data, buffer, bufferSize);
vkUnmapMemory(device->logicalDevice, stagingMemory);
VkBufferImageCopy bufferCopyRegion = {};
bufferCopyRegion.imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
bufferCopyRegion.imageSubresource.mipLevel = 0;
bufferCopyRegion.imageSubresource.baseArrayLayer = 0;
bufferCopyRegion.imageSubresource.layerCount = 1;
bufferCopyRegion.imageExtent.width = width;
bufferCopyRegion.imageExtent.height = height;
bufferCopyRegion.imageExtent.depth = 1;
bufferCopyRegion.bufferOffset = 0;
// Create optimal tiled target image
VkImageCreateInfo imageCreateInfo = vks::initializers::imageCreateInfo();
imageCreateInfo.imageType = VK_IMAGE_TYPE_2D;
imageCreateInfo.format = format;
imageCreateInfo.mipLevels = mipLevels;
imageCreateInfo.arrayLayers = 1;
imageCreateInfo.samples = VK_SAMPLE_COUNT_1_BIT;
imageCreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL;
imageCreateInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
imageCreateInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
imageCreateInfo.extent = { width, height, 1 };
imageCreateInfo.usage = imageUsageFlags;
// Ensure that the TRANSFER_DST bit is set for staging
if (!(imageCreateInfo.usage & VK_IMAGE_USAGE_TRANSFER_DST_BIT))
{
imageCreateInfo.usage |= VK_IMAGE_USAGE_TRANSFER_DST_BIT;
}
VK_CHECK_RESULT(vkCreateImage(device->logicalDevice, &imageCreateInfo, nullptr, &image));
vkGetImageMemoryRequirements(device->logicalDevice, image, &memReqs);
memAllocInfo.allocationSize = memReqs.size;
memAllocInfo.memoryTypeIndex = device->getMemoryType(memReqs.memoryTypeBits, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT);
VK_CHECK_RESULT(vkAllocateMemory(device->logicalDevice, &memAllocInfo, nullptr, &deviceMemory));
VK_CHECK_RESULT(vkBindImageMemory(device->logicalDevice, image, deviceMemory, 0));
VkImageSubresourceRange subresourceRange = {};
subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
subresourceRange.baseMipLevel = 0;
subresourceRange.levelCount = mipLevels;
subresourceRange.layerCount = 1;
// Image barrier for optimal image (target)
// Optimal image will be used as destination for the copy
vks::tools::setImageLayout(
copyCmd,
image,
VK_IMAGE_LAYOUT_UNDEFINED,
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
subresourceRange);
// Copy mip levels from staging buffer
vkCmdCopyBufferToImage(
copyCmd,
stagingBuffer,
image,
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1,
&bufferCopyRegion
);
// Change texture image layout to shader read after all mip levels have been copied
this->imageLayout = imageLayout;
vks::tools::setImageLayout(
copyCmd,
image,
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
imageLayout,
subresourceRange);
device->flushCommandBuffer(copyCmd, copyQueue);
// Clean up staging resources
vkFreeMemory(device->logicalDevice, stagingMemory, nullptr);
vkDestroyBuffer(device->logicalDevice, stagingBuffer, nullptr);
// Create sampler
VkSamplerCreateInfo samplerCreateInfo = {};
samplerCreateInfo.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO;
samplerCreateInfo.magFilter = filter;
samplerCreateInfo.minFilter = filter;
samplerCreateInfo.mipmapMode = VK_SAMPLER_MIPMAP_MODE_LINEAR;
samplerCreateInfo.addressModeU = VK_SAMPLER_ADDRESS_MODE_REPEAT;
samplerCreateInfo.addressModeV = VK_SAMPLER_ADDRESS_MODE_REPEAT;
samplerCreateInfo.addressModeW = VK_SAMPLER_ADDRESS_MODE_REPEAT;
samplerCreateInfo.mipLodBias = 0.0f;
samplerCreateInfo.compareOp = VK_COMPARE_OP_NEVER;
samplerCreateInfo.minLod = 0.0f;
samplerCreateInfo.maxLod = 0.0f;
samplerCreateInfo.maxAnisotropy = 1.0f;
VK_CHECK_RESULT(vkCreateSampler(device->logicalDevice, &samplerCreateInfo, nullptr, &sampler));
// Create image view
VkImageViewCreateInfo viewCreateInfo = {};
viewCreateInfo.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
viewCreateInfo.pNext = NULL;
viewCreateInfo.viewType = VK_IMAGE_VIEW_TYPE_2D;
viewCreateInfo.format = format;
viewCreateInfo.components = { VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_G, VK_COMPONENT_SWIZZLE_B, VK_COMPONENT_SWIZZLE_A };
viewCreateInfo.subresourceRange = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1 };
viewCreateInfo.subresourceRange.levelCount = 1;
viewCreateInfo.image = image;
VK_CHECK_RESULT(vkCreateImageView(device->logicalDevice, &viewCreateInfo, nullptr, &view));
// Update descriptor image info member that can be used for setting up descriptor sets
updateDescriptor();
}
#endif
};
/** @brief 2D array texture */
class Texture2DArray : public Texture {
public:
/**
* Load a 2D texture array including all mip levels
*
* @param filename File to load (supports .ktx and .dds)
* @param format Vulkan format of the image data stored in the file
* @param device Vulkan device to create the texture on
* @param copyQueue Queue used for the texture staging copy commands (must support transfer)
* @param (Optional) imageUsageFlags Usage flags for the texture's image (defaults to VK_IMAGE_USAGE_SAMPLED_BIT)
* @param (Optional) imageLayout Usage layout for the texture (defaults VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL)
*
*/
void loadFromFile(
const vks::Context& context,
std::string filename,
vk::Format format,
vk::ImageUsageFlags imageUsageFlags = vk::ImageUsageFlagBits::eSampled,
vk::ImageLayout imageLayout = vk::ImageLayout::eShaderReadOnlyOptimal) {
this->device = device;
std::shared_ptr<gli::texture2d_array> texPtr;
vks::file::withBinaryFileContents(filename, [&](size_t size, const void* data) {
texPtr = std::make_shared<gli::texture2d_array>(gli::load((const char*)data, size));
});
const gli::texture2d_array& tex2DArray = *texPtr;
extent.width = static_cast<uint32_t>(tex2DArray.extent().x);
extent.height = static_cast<uint32_t>(tex2DArray.extent().y);
extent.depth = 1;
layerCount = static_cast<uint32_t>(tex2DArray.layers());
mipLevels = static_cast<uint32_t>(tex2DArray.levels());
auto stagingBuffer = context.createStagingBuffer(tex2DArray);
// Setup buffer copy regions for each layer including all of it's miplevels
std::vector<vk::BufferImageCopy> bufferCopyRegions;
size_t offset = 0;
vk::BufferImageCopy bufferCopyRegion;
bufferCopyRegion.imageSubresource.aspectMask = vk::ImageAspectFlagBits::eColor;
bufferCopyRegion.imageSubresource.layerCount = 1;
bufferCopyRegion.imageExtent.depth = 1;
for (uint32_t layer = 0; layer < layerCount; layer++) {
for (uint32_t level = 0; level < mipLevels; level++) {
auto image = tex2DArray[layer][level];
auto imageExtent = image.extent();
bufferCopyRegion.imageSubresource.mipLevel = level;
bufferCopyRegion.imageSubresource.baseArrayLayer = layer;
bufferCopyRegion.imageExtent.width = static_cast<uint32_t>(imageExtent.x);
bufferCopyRegion.imageExtent.height = static_cast<uint32_t>(imageExtent.y);
bufferCopyRegion.bufferOffset = offset;
bufferCopyRegions.push_back(bufferCopyRegion);
// Increase offset into staging buffer for next level / face
offset += image.size();
}
}
// Create optimal tiled target image
vk::ImageCreateInfo imageCreateInfo;
imageCreateInfo.imageType = vk::ImageType::e2D;
imageCreateInfo.format = format;
imageCreateInfo.extent = extent;
imageCreateInfo.usage = imageUsageFlags | vk::ImageUsageFlagBits::eTransferDst;
imageCreateInfo.arrayLayers = layerCount;
imageCreateInfo.mipLevels = mipLevels;
((vks::Image&)*this) = context.createImage(imageCreateInfo);
vk::ImageSubresourceRange subresourceRange;
subresourceRange.aspectMask = vk::ImageAspectFlagBits::eColor;
subresourceRange.levelCount = mipLevels;
subresourceRange.layerCount = layerCount;
// Use a separate command buffer for texture loading
context.withPrimaryCommandBuffer([&](const vk::CommandBuffer& copyCmd) {
// Image barrier for optimal image (target)
// Set initial layout for all array layers (faces) of the optimal (target) tiled texture
context.setImageLayout(copyCmd, image, vk::ImageLayout::eUndefined, vk::ImageLayout::eTransferDstOptimal, subresourceRange);
// Copy the layers and mip levels from the staging buffer to the optimal tiled image
copyCmd.copyBufferToImage(stagingBuffer.buffer, image, vk::ImageLayout::eTransferDstOptimal, bufferCopyRegions);
// Change texture image layout to shader read after all faces have been copied
context.setImageLayout(copyCmd, image, vk::ImageLayout::eTransferDstOptimal, imageLayout, subresourceRange);
});
// Clean up staging resources
stagingBuffer.destroy();
// Create sampler
vk::SamplerCreateInfo samplerCreateInfo;
samplerCreateInfo.magFilter = vk::Filter::eLinear;
samplerCreateInfo.minFilter = vk::Filter::eLinear;
samplerCreateInfo.mipmapMode = vk::SamplerMipmapMode::eLinear;
samplerCreateInfo.addressModeU = vk::SamplerAddressMode::eClampToEdge;
samplerCreateInfo.addressModeV = vk::SamplerAddressMode::eClampToEdge;
samplerCreateInfo.addressModeW = vk::SamplerAddressMode::eClampToEdge;
samplerCreateInfo.maxAnisotropy = context.deviceFeatures.samplerAnisotropy ? context.deviceProperties.limits.maxSamplerAnisotropy : 1.0f;
samplerCreateInfo.maxLod = (float)mipLevels;
samplerCreateInfo.borderColor = vk::BorderColor::eFloatOpaqueWhite;
sampler = context.device.createSampler(samplerCreateInfo);
// Create image view
vk::ImageViewCreateInfo viewCreateInfo;
viewCreateInfo.viewType = vk::ImageViewType::e2DArray;
viewCreateInfo.image = image;
viewCreateInfo.format = format;
viewCreateInfo.subresourceRange = vk::ImageSubresourceRange{ vk::ImageAspectFlagBits::eColor, 0, mipLevels, 0, layerCount };
view = context.device.createImageView(viewCreateInfo);
// Update descriptor image info member that can be used for setting up descriptor sets
updateDescriptor();
}
};
/** @brief Cube map texture */
class TextureCubeMap : public Texture {
public:
/**
* Load a cubemap texture including all mip levels from a single file
*
* @param filename File to load (supports .ktx and .dds)
* @param format Vulkan format of the image data stored in the file
* @param device Vulkan device to create the texture on
* @param copyQueue Queue used for the texture staging copy commands (must support transfer)
* @param (Optional) imageUsageFlags Usage flags for the texture's image (defaults to VK_IMAGE_USAGE_SAMPLED_BIT)
* @param (Optional) imageLayout Usage layout for the texture (defaults VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL)
*
*/
void loadFromFile(
const vks::Context& context,
const std::string& filename,
vk::Format format,
vk::ImageUsageFlags imageUsageFlags = vk::ImageUsageFlagBits::eSampled,
vk::ImageLayout imageLayout = vk::ImageLayout::eShaderReadOnlyOptimal) {
device = context.device;
std::shared_ptr<const gli::texture_cube> texPtr;
vks::file::withBinaryFileContents(filename, [&](size_t size, const void* data) {
texPtr = std::make_shared<const gli::texture_cube>(gli::load((const char*)data, size));
});
const auto& texCube = *texPtr;
assert(!texCube.empty());
extent.width = static_cast<uint32_t>(texCube.extent().x);
extent.height = static_cast<uint32_t>(texCube.extent().y);
extent.depth = 1;
mipLevels = static_cast<uint32_t>(texCube.levels());
auto stagingBuffer = context.createStagingBuffer(texCube);
// Setup buffer copy regions for each face including all of it's miplevels
std::vector<vk::BufferImageCopy> bufferCopyRegions;
size_t offset = 0;
vk::BufferImageCopy bufferImageCopy;
bufferImageCopy.imageSubresource.aspectMask = vk::ImageAspectFlagBits::eColor;
bufferImageCopy.imageSubresource.layerCount = 1;
bufferImageCopy.imageExtent.depth = 1;
for (uint32_t face = 0; face < 6; face++) {
for (uint32_t level = 0; level < mipLevels; level++) {
auto image = (texCube)[face][level];
auto imageExtent = image.extent();
bufferImageCopy.bufferOffset = offset;
bufferImageCopy.imageSubresource.mipLevel = level;
bufferImageCopy.imageSubresource.baseArrayLayer = face;
bufferImageCopy.imageExtent.width = (uint32_t)imageExtent.x;
bufferImageCopy.imageExtent.height = (uint32_t)imageExtent.y;
bufferCopyRegions.push_back(bufferImageCopy);
// Increase offset into staging buffer for next level / face
offset += image.size();
}
}
// Create optimal tiled target image
vk::ImageCreateInfo imageCreateInfo;
imageCreateInfo.imageType = vk::ImageType::e2D;
imageCreateInfo.format = format;
imageCreateInfo.mipLevels = mipLevels;
imageCreateInfo.extent = extent;
// Cube faces count as array layers in Vulkan
imageCreateInfo.arrayLayers = 6;
// Ensure that the TRANSFER_DST bit is set for staging
imageCreateInfo.usage = imageUsageFlags | vk::ImageUsageFlagBits::eTransferDst;
// This flag is required for cube map images
imageCreateInfo.flags = vk::ImageCreateFlagBits::eCubeCompatible;
((vks::Image&)*this) = context.createImage(imageCreateInfo);
context.withPrimaryCommandBuffer([&](const vk::CommandBuffer& copyCmd) {
// Image barrier for optimal image (target)
// Set initial layout for all array layers (faces) of the optimal (target) tiled texture
vk::ImageSubresourceRange subresourceRange { vk::ImageAspectFlagBits::eColor, 0, mipLevels, 0, 6 };
context.setImageLayout(copyCmd, image, vk::ImageLayout::eUndefined, vk::ImageLayout::eTransferDstOptimal, subresourceRange);
// Copy the cube map faces from the staging buffer to the optimal tiled image
copyCmd.copyBufferToImage(stagingBuffer.buffer, image, vk::ImageLayout::eTransferDstOptimal, bufferCopyRegions);
// Change texture image layout to shader read after all faces have been copied
this->imageLayout = imageLayout;
context.setImageLayout(copyCmd, image, vk::ImageLayout::eTransferDstOptimal, imageLayout, subresourceRange);
});
// Create sampler
// Create a defaultsampler
vk::SamplerCreateInfo samplerCreateInfo;
samplerCreateInfo.magFilter = vk::Filter::eLinear;
samplerCreateInfo.minFilter = vk::Filter::eLinear;
samplerCreateInfo.mipmapMode = vk::SamplerMipmapMode::eLinear;
samplerCreateInfo.addressModeU = vk::SamplerAddressMode::eClampToEdge;
samplerCreateInfo.addressModeV = vk::SamplerAddressMode::eClampToEdge;
samplerCreateInfo.addressModeW = vk::SamplerAddressMode::eClampToEdge;
// Max level-of-detail should match mip level count
samplerCreateInfo.maxLod = (float)mipLevels;
// Only enable anisotropic filtering if enabled on the devicec
samplerCreateInfo.maxAnisotropy = context.deviceFeatures.samplerAnisotropy ? context.deviceProperties.limits.maxSamplerAnisotropy : 1.0f;
samplerCreateInfo.anisotropyEnable = context.deviceFeatures.samplerAnisotropy;
samplerCreateInfo.borderColor = vk::BorderColor::eFloatOpaqueWhite;
sampler = device.createSampler(samplerCreateInfo);
// Create image view
// Textures are not directly accessed by the shaders and
// are abstracted by image views containing additional
// information and sub resource ranges
view = device.createImageView(vk::ImageViewCreateInfo{
{}, image, vk::ImageViewType::eCube, format,{},
vk::ImageSubresourceRange{ vk::ImageAspectFlagBits::eColor, 0, mipLevels, 0, 6 }
});
stagingBuffer.destroy();
// Update descriptor image info member that can be used for setting up descriptor sets
updateDescriptor();
}
};
} }

View file

@ -0,0 +1,197 @@
//
// Created by Bradley Austin Davis on 2016/05/26
// Copyright 2013-2016 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include "VKWindow.h"
#include <QtGui/qevent.h>
#include <QtCore/QTimer>
#include "Config.h"
#include "Swapchain.h"
#include "Context.h"
VKWindow::VKWindow(QScreen* screen) : QWindow(screen) {
_resizeTimer = new QTimer(this);
_resizeTimer->setTimerType(Qt::TimerType::PreciseTimer);
_resizeTimer->setInterval(50);
_resizeTimer->setSingleShot(true);
connect(_resizeTimer, &QTimer::timeout, this, &VKWindow::resizeFramebuffer);
}
const vk::SurfaceKHR& VKWindow::createSurface() {
_surface = _context.instance.createWin32SurfaceKHR({ {}, GetModuleHandle(NULL), (HWND)winId() });
_swapchain.setSurface(_surface);
return _surface;
}
void VKWindow::createSwapchain() {
if (!_surface) {
throw std::runtime_error("No surface");
}
{
auto qsize = size();
_extent = { (uint32_t)qsize.width(), (uint32_t)qsize.height() };
}
_swapchain.create(_extent, true);
setupRenderPass();
setupDepthStencil();
setupFramebuffers();
}
void VKWindow::setupDepthStencil() {
if (_depthStencil) {
_depthStencil.destroy();
_depthStencil = {};
}
vk::ImageAspectFlags aspect = vk::ImageAspectFlagBits::eDepth | vk::ImageAspectFlagBits::eStencil;
vk::ImageCreateInfo depthStencilCreateInfo;
depthStencilCreateInfo.imageType = vk::ImageType::e2D;
depthStencilCreateInfo.extent = vk::Extent3D{ _extent.width, _extent.height, 1 };
depthStencilCreateInfo.format = vk::Format::eD24UnormS8Uint;
depthStencilCreateInfo.mipLevels = 1;
depthStencilCreateInfo.arrayLayers = 1;
depthStencilCreateInfo.usage = vk::ImageUsageFlagBits::eDepthStencilAttachment | vk::ImageUsageFlagBits::eTransferSrc;
_depthStencil = _context.createImage(depthStencilCreateInfo, vk::MemoryPropertyFlagBits::eDeviceLocal);
vk::ImageViewCreateInfo depthStencilView;
depthStencilView.viewType = vk::ImageViewType::e2D;
depthStencilView.format = vk::Format::eD24UnormS8Uint;
depthStencilView.subresourceRange.aspectMask = aspect;
depthStencilView.subresourceRange.levelCount = 1;
depthStencilView.subresourceRange.layerCount = 1;
depthStencilView.image = _depthStencil.image;
_depthStencil.view = _device.createImageView(depthStencilView);
}
void VKWindow::setupFramebuffers() {
// Recreate the frame buffers
if (!_framebuffers.empty()) {
for (auto& framebuffer : _framebuffers) {
_device.destroy(framebuffer);
}
_framebuffers.clear();
}
vk::ImageView attachments[2];
// Depth/Stencil attachment is the same for all frame buffers
attachments[1] = _depthStencil.view;
vk::FramebufferCreateInfo framebufferCreateInfo;
framebufferCreateInfo.renderPass = _renderPass;
framebufferCreateInfo.attachmentCount = 2;
framebufferCreateInfo.pAttachments = attachments;
framebufferCreateInfo.width = _extent.width;
framebufferCreateInfo.height = _extent.height;
framebufferCreateInfo.layers = 1;
// Create frame buffers for every swap chain image
_framebuffers = _swapchain.createFramebuffers(framebufferCreateInfo);
}
void VKWindow::setupRenderPass() {
if (_renderPass) {
_device.destroy(_renderPass);
}
std::array<vk::AttachmentDescription, 2> attachments;
// Color attachment
attachments[0].format = _swapchain.colorFormat;
attachments[0].loadOp = vk::AttachmentLoadOp::eClear;
attachments[0].storeOp = vk::AttachmentStoreOp::eStore;
attachments[0].initialLayout = vk::ImageLayout::eUndefined;
attachments[0].finalLayout = vk::ImageLayout::ePresentSrcKHR;
// Depth attachment
attachments[1].format = vk::Format::eD24UnormS8Uint;
attachments[1].loadOp = vk::AttachmentLoadOp::eClear;
attachments[1].storeOp = vk::AttachmentStoreOp::eDontCare;
attachments[1].stencilLoadOp = vk::AttachmentLoadOp::eClear;
attachments[1].stencilStoreOp = vk::AttachmentStoreOp::eDontCare;
attachments[1].initialLayout = vk::ImageLayout::eUndefined;
attachments[1].finalLayout = vk::ImageLayout::eDepthStencilAttachmentOptimal;
// Only one depth attachment, so put it first in the references
vk::AttachmentReference depthReference;
depthReference.attachment = 1;
depthReference.layout = vk::ImageLayout::eDepthStencilAttachmentOptimal;
std::vector<vk::AttachmentReference> colorAttachmentReferences;
{
vk::AttachmentReference colorReference;
colorReference.attachment = 0;
colorReference.layout = vk::ImageLayout::eColorAttachmentOptimal;
colorAttachmentReferences.push_back(colorReference);
}
std::vector<vk::SubpassDescription> subpasses;
std::vector<vk::SubpassDependency> subpassDependencies;
{
{
vk::SubpassDependency dependency;
dependency.srcSubpass = 0;
dependency.srcAccessMask = vk::AccessFlagBits::eColorAttachmentWrite;
dependency.srcStageMask = vk::PipelineStageFlagBits::eColorAttachmentOutput;
dependency.dstSubpass = VK_SUBPASS_EXTERNAL;
dependency.dstAccessMask = vk::AccessFlagBits::eColorAttachmentRead;
dependency.dstStageMask = vk::PipelineStageFlagBits::eColorAttachmentOutput;
subpassDependencies.push_back(dependency);
}
vk::SubpassDescription subpass;
subpass.pipelineBindPoint = vk::PipelineBindPoint::eGraphics;
subpass.pDepthStencilAttachment = &depthReference;
subpass.colorAttachmentCount = (uint32_t)colorAttachmentReferences.size();
subpass.pColorAttachments = colorAttachmentReferences.data();
subpasses.push_back(subpass);
}
vk::RenderPassCreateInfo renderPassInfo;
renderPassInfo.attachmentCount = (uint32_t)attachments.size();
renderPassInfo.pAttachments = attachments.data();
renderPassInfo.subpassCount = (uint32_t)subpasses.size();
renderPassInfo.pSubpasses = subpasses.data();
renderPassInfo.dependencyCount = (uint32_t)subpassDependencies.size();
renderPassInfo.pDependencies = subpassDependencies.data();
_renderPass = _device.createRenderPass(renderPassInfo);
}
void VKWindow::resizeEvent(QResizeEvent* event) {
QWindow::resizeEvent(event);
auto qsize = event->size();
if (qsize.width() != _extent.width || qsize.height() != _extent.height) {
_resizeTimer->start();
}
}
void VKWindow::resizeFramebuffer() {
auto qsize = size();
_extent = { (uint32_t)qsize.width(), (uint32_t)qsize.height() };
_swapchain.waitIdle();
_swapchain.create(_extent, true);
setupDepthStencil();
setupFramebuffers();
}
VKWindow::~VKWindow() {
_swapchain.destroy();
}
void VKWindow::emitClosing() {
emit aboutToClose();
}
vk::Framebuffer VKWindow::acquireFramebuffer(const vk::Semaphore& semaphore) {
auto result = _swapchain.acquireNextImage(semaphore);
auto imageIndex = result.value;
return _framebuffers[imageIndex];
}

View file

@ -0,0 +1,57 @@
//
// Created by Bradley Austin Davis on 2016/03/19
// Copyright 2013 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#pragma once
#include <QtGui/QWindow>
#include "Config.h"
#include "Context.h"
#include "Swapchain.h"
class VKWindow : public QWindow {
Q_OBJECT
public:
VKWindow(QScreen* screen = nullptr);
virtual ~VKWindow();
void createSwapchain();
void queuePresent(const vk::ArrayProxy<const vk::Semaphore>& waitSemaphores);
const vk::SurfaceKHR& createSurface();
const vk::SurfaceKHR& getSurface() { return _surface; }
vks::Swapchain& getSwapchain() { return _swapchain; }
vk::Framebuffer acquireFramebuffer(const vk::Semaphore& semaphore);
signals:
void aboutToClose();
protected:
friend class VkCloseEventFilter;
void emitClosing();
protected slots:
virtual void resizeFramebuffer();
protected:
void resizeEvent(QResizeEvent* event) override;
void setupRenderPass();
void setupDepthStencil();
void setupFramebuffers();
public:
vks::Context& _context{ vks::Context::get() };
const vk::Device& _device{ _context.device };
vk::SurfaceKHR _surface;
vk::RenderPass _renderPass;
vk::Extent2D _extent;
vks::Swapchain _swapchain;
vks::Image _depthStencil;
std::vector<vk::Framebuffer> _framebuffers;
QTimer* _resizeTimer{ nullptr };
};

View file

@ -0,0 +1,49 @@
//
// Created by Bradley Austin Davis on 2016/03/19
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#pragma once
#include <cstdint>
#include <string>
#include <sstream>
namespace vks {
// Version information for Vulkan is stored in a single 32 bit integer
// with individual bits representing the major, minor and patch versions.
// The maximum possible major and minor version is 512 (look out nVidia)
// while the maximum possible patch version is 2048
struct Version {
Version() : vulkan_major(0), vulkan_minor(0), vulkan_patch(0) {
}
Version(uint32_t version) : Version() {
*this = version;
}
Version& operator =(uint32_t version) {
memcpy(this, &version, sizeof(uint32_t));
return *this;
}
operator uint32_t() const {
uint32_t result;
memcpy(&result, this, sizeof(uint32_t));
return result;
}
std::string toString() const {
std::stringstream buffer;
buffer << vulkan_major << "." << vulkan_minor << "." << vulkan_patch;
return buffer.str();
}
const uint32_t vulkan_patch : 12;
const uint32_t vulkan_minor : 10;
const uint32_t vulkan_major : 10;
};
}

View file

@ -0,0 +1,2 @@
#define VMA_IMPLEMENTATION
#include <vma/vk_mem_alloc.h>