Initial texture transfer functionality

This commit is contained in:
Karol Suprynowicz 2024-12-02 23:11:44 +01:00
parent f026e40e9f
commit db9617cc1d
9 changed files with 442 additions and 81 deletions

View file

@ -31,6 +31,8 @@
#include "VKShared.h"
#include "VKTexture.h"
#define FORCE_STRICT_TEXTURE 1
using namespace gpu;
using namespace gpu::vk;
@ -411,6 +413,8 @@ struct Cache {
VK_CHECK_RESULT(vkCreateRenderPass(context.device->logicalDevice, &renderPassInfo, nullptr, &renderPass));
_renderPassMap[key] = renderPass;
return renderPass;
} else {
printf("found");
}
return itr->second;
}
@ -519,7 +523,8 @@ struct Cache {
//float ra.depthBiasConstantFactor;
//float ra.depthBiasClamp;
//float ra.depthBiasSlopeFactor;
ra.depthClampEnable = stateData.flags.depthClampEnable ? VK_TRUE : VK_FALSE;
ra.depthClampEnable = VK_TRUE;
//ra.depthClampEnable = stateData.flags.depthClampEnable ? VK_TRUE : VK_FALSE; // VKTODO
ra.frontFace = stateData.flags.frontFaceClockwise ? VK_FRONT_FACE_CLOCKWISE : VK_FRONT_FACE_COUNTER_CLOCKWISE;
// ra.lineWidth
ra.polygonMode = (VkPolygonMode)(2 - stateData.fillMode);
@ -546,7 +551,8 @@ struct Cache {
// Depth/Stencil
{
auto& ds = builder.depthStencilState;
ds.depthTestEnable = stateData.depthTest.isEnabled() ? VK_TRUE : VK_FALSE;
ds.depthTestEnable = VK_FALSE;
//ds.depthTestEnable = stateData.depthTest.isEnabled() ? VK_TRUE : VK_FALSE; //VKTODO
ds.depthWriteEnable = stateData.depthTest.getWriteMask() != 0 ? VK_TRUE : VK_FALSE;
ds.depthCompareOp = (VkCompareOp)stateData.depthTest.getFunction();
ds.front = getStencilOp(stateData.stencilTestFront);
@ -564,7 +570,12 @@ struct Cache {
for (const auto& entry : format.getChannels()) {
const auto& slot = entry.first;
const auto& channel = entry.second;
bd.push_back({ slot, (uint32_t)channel._stride, (VkVertexInputRate)(channel._frequency) });
VkVertexInputBindingDescription bindingDescription {};
bindingDescription.binding = slot;
bindingDescription.stride = (uint32_t)channel._stride;
qDebug() << "binding " << bindingDescription.binding << "stride" << bindingDescription.stride;
bindingDescription.inputRate = (VkVertexInputRate)(channel._frequency);
bd.push_back(bindingDescription);
}
bool colorFound = false;
@ -730,14 +741,16 @@ void VKBackend::executeFrame(const FramePointer& frame) {
// generate pipeline
// find unique descriptor targets
// do we need to transfer data to the GPU?
{
PROFILE_RANGE(gpu_vk_detail, "Transfer");
renderPassTransfer(batch);
}
{
PROFILE_RANGE(gpu_vk_detail, _stereo._enable ? "Render Stereo" : "Render");
renderPassDraw(batch);
}
}
{
PROFILE_RANGE(gpu_vk_detail, "Transfer");
renderPassTransfer(batch);
}
{
PROFILE_RANGE(gpu_vk_detail, _stereo._enable ? "Render Stereo" : "Render");
renderPassDraw(batch);
}
if (renderpassActive) {
@ -1014,11 +1027,16 @@ void VKBackend::updateVkDescriptorWriteSetsTexture(VkDescriptorSet target) {
if (_resource._textures[i].texture) {
// VKTODO: move vulkan texture creation to the transfer parts
// VKTODO: this doesn't work yet
//VKTexture * texture = syncGPUObject(*_resource._textures[i]._texture);
VKTexture *texture = syncGPUObject(*_resource._textures[i].texture);
VkDescriptorImageInfo imageInfo{};
imageInfo.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
imageInfo.imageView = _defaultTexture.view;
imageInfo.sampler = _defaultTexture.sampler;
if (texture) {
imageInfo = texture->getDescriptorImageInfo();
} else {
imageInfo = _defaultTexture.descriptor;
}
//imageInfo.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
//imageInfo.imageView = texture->;
//imageInfo.sampler = _defaultTexture.sampler;
VkWriteDescriptorSet descriptorWriteSet{};
descriptorWriteSet.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
@ -1157,6 +1175,50 @@ void VKBackend::resetQueryStage() {
_queryStage._rangeQueryDepth = 0;
}
void VKBackend::updateRenderPass() {
auto renderPass = _cache.pipelineState.getRenderPass(_context);
// Current render pass is already up to date
// VKTODO: check if framebuffer has changed and if so update render pass too
if (_currentRenderPass == renderPass) {
return;
}
// Current render pass needs to be finished before starting new one
if (_currentRenderPass) {
vkCmdEndRenderPass(_currentCommandBuffer);
}
_currentRenderPass = renderPass;
auto renderPassBeginInfo = vks::initializers::renderPassBeginInfo();
renderPassBeginInfo.renderPass = renderPass;
Q_ASSERT(_cache.pipelineState.framebuffer);
auto framebuffer = VKFramebuffer::sync(*this, *_cache.pipelineState.framebuffer);
Q_ASSERT(framebuffer);
renderPassBeginInfo.framebuffer = framebuffer->vkFramebuffer;
renderPassBeginInfo.clearValueCount = framebuffer->attachments.size();
std::vector<VkClearValue> clearValues;
clearValues.resize(framebuffer->attachments.size());
for (size_t i = 0; i < framebuffer->attachments.size(); i++) {
if (framebuffer->attachments[i].isDepthStencil()) {
clearValues[i].depthStencil = { 1.0f, 0 };
} else {
clearValues[i].color = { { 0.2f, 0.5f, 0.1f, 1.0f } };
}
}
renderPassBeginInfo.pClearValues = clearValues.data();
renderPassBeginInfo.renderArea = VkRect2D{VkOffset2D {_transform._viewport.x, _transform._viewport.y}, VkExtent2D {(uint32_t)_transform._viewport.z, (uint32_t)_transform._viewport.w}};
vkCmdBeginRenderPass(_currentCommandBuffer, &renderPassBeginInfo, VK_SUBPASS_CONTENTS_INLINE);
}
void VKBackend::resetRenderPass() {
if (_currentRenderPass) {
_currentRenderPass = VK_NULL_HANDLE;
vkCmdEndRenderPass(_currentCommandBuffer);
}
}
void VKBackend::renderPassTransfer(const Batch& batch) {
const size_t numCommands = batch.getCommands().size();
const Batch::Commands::value_type* command = batch.getCommands().data();
@ -1245,27 +1307,7 @@ void VKBackend::renderPassDraw(const Batch& batch) {
if (_cache.pipelineState.framebuffer->getRenderBuffers()[0]._texture->getTexelFormat().getSemantic() == gpu::R11G11B10) {
printf("Test");
}
auto renderPassBeginInfo = vks::initializers::renderPassBeginInfo();
renderPassBeginInfo.renderPass = _cache.pipelineState.getRenderPass(_context);
Q_ASSERT(_cache.pipelineState.framebuffer);
//auto framebuffer = getGPUObject<VKFramebuffer>(*_cache.pipelineState.framebuffer);
auto framebuffer = VKFramebuffer::sync(*this, *_cache.pipelineState.framebuffer);
Q_ASSERT(framebuffer);
renderPassBeginInfo.framebuffer = framebuffer->vkFramebuffer;
renderPassBeginInfo.clearValueCount = framebuffer->attachments.size();
std::vector<VkClearValue> clearValues;
clearValues.resize(framebuffer->attachments.size());
for (size_t i = 0; i < framebuffer->attachments.size(); i++) {
if (framebuffer->attachments[i].isDepthStencil()) {
clearValues[i].depthStencil = { 1.0f, 0 };
} else {
clearValues[i].color = { { 0.2f, 0.5f, 0.1f, 1.0f } };
}
}
renderPassBeginInfo.pClearValues = clearValues.data();
renderPassBeginInfo.renderArea = VkRect2D{VkOffset2D {_transform._viewport.x, _transform._viewport.y}, VkExtent2D {(uint32_t)_transform._viewport.z, (uint32_t)_transform._viewport.w}};
// VKTODO: this is inefficient
vkCmdBeginRenderPass(_currentCommandBuffer, &renderPassBeginInfo, VK_SUBPASS_CONTENTS_INLINE);
updateRenderPass();
// VKTODO: this is inefficient
vkCmdBindPipeline(_currentCommandBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, _cache.getPipeline(_context));
// VKTODO: this will create too many set viewport commands, but should work
@ -1331,7 +1373,6 @@ void VKBackend::renderPassDraw(const Batch& batch) {
}
CommandCall call = _commandCalls[(*command)];
(this->*(call))(batch, *offset);
vkCmdEndRenderPass(_currentCommandBuffer);
break;
}
default: {
@ -1344,6 +1385,7 @@ void VKBackend::renderPassDraw(const Batch& batch) {
command++;
offset++;
}
resetRenderPass();
}
void VKBackend::draw(VkPrimitiveTopology mode, uint32 numVertices, uint32 startVertex) {
@ -1429,9 +1471,21 @@ VKTexture* VKBackend::syncGPUObject(const Texture& texture) {
case TextureUsageType::RESOURCE:
#endif
case TextureUsageType::STRICT_RESOURCE:
// VKTODO
//qCDebug(gpu_vk_logging) << "Strict texture " << texture.source().c_str();
//object = new GL45StrictResourceTexture(shared_from_this(), texture);
if (texture.getStoredSize() == 0){
qDebug(gpu_vk_logging) << "No data on texture";
return nullptr;
}
if (evalTexelFormatInternal(texture.getStoredMipFormat()) != evalTexelFormatInternal(texture.getTexelFormat())) {
qDebug() << "Format mismatch, stored: " << evalTexelFormatInternal(texture.getStoredMipFormat()) << " texel: " << evalTexelFormatInternal(texture.getTexelFormat());
return nullptr;
}
// VKTODO: What is strict resource?
qWarning() << "TextureUsageType::STRICT_RESOURCE";
qCDebug(gpu_vk_logging) << "Strict texture " << texture.source().c_str();
object = new VKStrictResourceTexture(shared_from_this(), texture);
break;
#if !FORCE_STRICT_TEXTURE
@ -1583,6 +1637,8 @@ void VKBackend::updateInput() {
for (size_t buffer = 0; buffer < _input._buffers.size(); buffer++, vbo++, offset++, stride++) {
if (_input._invalidBuffers.test(buffer)) {
auto vkBuffer = VKBuffer::getBuffer(*this, *_input._buffers[buffer]);
qDebug() << "Vertex buffer size: " << _input._buffers[buffer]->getSize();
qDebug() << "Vertex buffer usage: " << _input._buffers[buffer]->getUsage();
VkDeviceSize vkOffset = _input._bufferOffsets[buffer];
vkCmdBindVertexBuffers(_currentCommandBuffer, buffer, 1, &vkBuffer, &vkOffset);
//glBindVertexBuffer(buffer, (*vbo), (*offset), (GLsizei)(*stride));

View file

@ -245,10 +245,10 @@ protected:
void resetResourceStage();
// VKTODO
struct OutputStageState {
/*struct OutputStageState {
FramebufferReference _framebuffer{};
int _drawFBO{ 0 };
} _output;
} _output;*/
// VKTODO
struct QueryStageState {
@ -257,6 +257,11 @@ protected:
void resetQueryStage();
VkRenderPass _currentRenderPass{ VK_NULL_HANDLE };
// Checks if renderpass change is needed and changes it if required
void updateRenderPass();
void resetRenderPass();
// VKTODO: one instance per each frame
// Contains objects that are created per frame and need to be deleted after the frame is rendered
struct FrameData {
@ -298,6 +303,7 @@ public:
void downloadFramebuffer(const FramebufferPointer& srcFramebuffer, const Vec4i& region, QImage& destImage) final;
void setDrawCommandBuffer(VkCommandBuffer commandBuffer);
size_t getNumInputBuffers() const { return _input._invalidBuffers.size(); }
VkDescriptorImageInfo getDefaultTextureDescriptorInfo() { return _defaultTexture.descriptor; };
void trash(const VKBuffer& buffer);

View file

@ -65,7 +65,7 @@ void gpu::vk::VKFramebuffer::update() {
attachmentCI.format = gpu::vk::evalTexelFormatInternal(vkTexture->_gpuObject.getTexelFormat());
attachmentCI.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
attachmentCI.imageSampleCount = VK_SAMPLE_COUNT_1_BIT;
addAttachment(attachmentCI, vkTexture->_texture);
addAttachment(attachmentCI, vkTexture->_vkImage);
//glNamedFramebufferTexture(_id, colorAttachments[unit], gltexture->_texture, 0);
// VKTODO: how to do this?
/*} else if (vkTexture->_target == GL_TEXTURE_2D_MULTISAMPLE) {
@ -111,7 +111,7 @@ void gpu::vk::VKFramebuffer::update() {
attachmentCI.format = gpu::vk::evalTexelFormatInternal(vkTexture->_gpuObject.getTexelFormat());
attachmentCI.usage = VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT;
attachmentCI.imageSampleCount = VK_SAMPLE_COUNT_1_BIT;
addAttachment(attachmentCI, vkTexture->_texture);
addAttachment(attachmentCI, vkTexture->_vkImage);
//glNamedFramebufferTexture(_id, attachement, gltexture->_texture, 0);
// VKTODO
/*}

View file

@ -12,6 +12,7 @@
#include <NumericalConstants.h>
#include "VKBackend.h"
#include "vk/Allocation.h"
using namespace gpu;
using namespace gpu::vk;
@ -146,7 +147,7 @@ VKTexture::VKTexture(const std::weak_ptr<VKBackend>& backend, const Texture& tex
VKTexture::~VKTexture() {
auto backend = _backend.lock();
if (backend && _texture == VK_NULL_HANDLE) {
if (backend && _vkImage == VK_NULL_HANDLE) {
// VKTODO
// backend->releaseTexture(_id, 0);
}
@ -184,7 +185,7 @@ VkImageViewType VKTexture::getVKTextureType(const Texture& texture) {
}
// From VKS
void VKAttachmentTexture::createTexture() {
void VKAttachmentTexture::createTexture(VKBackend &backend) {
VkImageCreateInfo imageCI = vks::initializers::imageCreateInfo();
imageCI.imageType = VK_IMAGE_TYPE_2D;
imageCI.format = evalTexelFormatInternal(_gpuObject.getTexelFormat());
@ -209,15 +210,18 @@ void VKAttachmentTexture::createTexture() {
auto device = _backend.lock()->getContext().device->logicalDevice;
// Create image for this attachment
VK_CHECK_RESULT(vkCreateImage(device, &imageCI, nullptr, &_texture));
/*VK_CHECK_RESULT(vkCreateImage(device, &imageCI, nullptr, &_texture));
VkMemoryAllocateInfo memAlloc = vks::initializers::memoryAllocateInfo();
VkMemoryRequirements memReqs;
vkGetImageMemoryRequirements(device, _texture, &memReqs);
memAlloc.allocationSize = memReqs.size;
memAlloc.memoryTypeIndex = _backend.lock()->getContext().device->getMemoryType(memReqs.memoryTypeBits, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT);
// VKTODO: this may need to be changed to VMA
VK_CHECK_RESULT(vkAllocateMemory(device, &memAlloc, nullptr, &_vkDeviceMemory));
VK_CHECK_RESULT(vkBindImageMemory(device, _texture, _vkDeviceMemory, 0));
VK_CHECK_RESULT(vkBindImageMemory(device, _texture, _vkDeviceMemory, 0));*/
VmaAllocationCreateInfo allocationCI = {};
allocationCI.usage = VMA_MEMORY_USAGE_GPU_ONLY;
vmaCreateImage(vks::Allocation::getAllocator(), &imageCI, &allocationCI, &_vkImage, &_vmaAllocation, nullptr);
/*attachment.subresourceRange = {};
attachment.subresourceRange.aspectMask = aspectMask;
@ -235,6 +239,223 @@ void VKAttachmentTexture::createTexture() {
}
void VKStrictResourceTexture::createTexture(VKBackend &backend) {
VkImageCreateInfo imageCI = vks::initializers::imageCreateInfo();
imageCI.imageType = VK_IMAGE_TYPE_2D;
imageCI.format = evalTexelFormatInternal(_gpuObject.getTexelFormat());
imageCI.extent.width = _gpuObject.getWidth();
imageCI.extent.height = _gpuObject.getHeight();
imageCI.extent.depth = 1;
imageCI.arrayLayers = _gpuObject.isArray() ? _gpuObject.getNumSlices() : 1;
imageCI.samples = VK_SAMPLE_COUNT_1_BIT;
imageCI.tiling = VK_IMAGE_TILING_OPTIMAL;
imageCI.usage = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT;
//auto device = _backend.lock()->getContext().device->logicalDevice;
// Create image for this attachment
/*VK_CHECK_RESULT(vkCreateImage(device, &imageCI, nullptr, &_texture));
VkMemoryAllocateInfo memAlloc = vks::initializers::memoryAllocateInfo();
VkMemoryRequirements memReqs;
vkGetImageMemoryRequirements(device, _texture, &memReqs);
memAlloc.allocationSize = memReqs.size;
memAlloc.memoryTypeIndex = _backend.lock()->getContext().device->getMemoryType(memReqs.memoryTypeBits, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT);
VK_CHECK_RESULT(vkAllocateMemory(device, &memAlloc, nullptr, &_vkDeviceMemory));
VK_CHECK_RESULT(vkBindImageMemory(device, _texture, _vkDeviceMemory, 0));*/
// We need to lock mip data here so that it doesn't change or get deleted before transfer
_transferData.mipLevels = _gpuObject.getNumMips();
_transferData.width = _gpuObject.getWidth();
_transferData.height = _gpuObject.getHeight();
_transferData.buffer_size = 0;
for (uint16_t sourceMip = 0; sourceMip < _transferData.mipLevels; ++sourceMip) {
if (!_gpuObject.isStoredMipFaceAvailable(sourceMip)) {
continue;
}
// VKTODO: iterate through faces?
auto dim = _gpuObject.evalMipDimensions(sourceMip);
auto mipData = _gpuObject.accessStoredMipFace(sourceMip, 0); // VKTODO: only one face for now
auto mipSize = _gpuObject.getStoredMipFaceSize(sourceMip, 0);
if (mipData) {
TransferData::Mip mip {};
mip.offset = _transferData.buffer_size;
mip.size = mipSize;
mip.data = mipData;
mip.width = dim.x;
mip.height = dim.y;
_transferData.buffer_size += mipSize;
_transferData.mips.push_back(mip);
// VKTODO auto texelFormat = evalTexelFormatInternal(_gpuObject.getStoredMipFormat());
//return copyMipFaceLinesFromTexture(targetMip, face, dim, 0, texelFormat.internalFormat, texelFormat.format, texelFormat.type, mipSize, mipData->readData());
} else {
qCDebug(gpu_vk_logging) << "Missing mipData level=" << sourceMip << " face=" << 0/*(int)face*/ << " for texture " << _gpuObject.source().c_str();
}
}
imageCI.mipLevels = _transferData.mips.size();
VmaAllocationCreateInfo allocationCI = {};
allocationCI.usage = VMA_MEMORY_USAGE_GPU_ONLY;
qDebug() << "storedSize: " << _gpuObject.getStoredSize();
VK_CHECK_RESULT(vmaCreateImage(vks::Allocation::getAllocator(), &imageCI, &allocationCI, &_vkImage, &_vmaAllocation, nullptr));
}
void VKStrictResourceTexture::transfer(VKBackend &backend) {
VkMemoryAllocateInfo memAllocInfo = vks::initializers::memoryAllocateInfo();
VkMemoryRequirements memReqs;
auto device = backend.getContext().device;
// From VKS
// Use a separate command buffer for texture loading
VkCommandBuffer copyCmd = device->createCommandBuffer(device->transferCommandPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY, true);
// Create a host-visible staging buffer that contains the raw image data
VkBuffer stagingBuffer;
VkDeviceMemory stagingMemory;
VkBufferCreateInfo bufferCreateInfo = vks::initializers::bufferCreateInfo();
// This buffer is used as a transfer source for the buffer copy
bufferCreateInfo.size = _transferData.buffer_size;
bufferCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
bufferCreateInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
VK_CHECK_RESULT(vkCreateBuffer(device->logicalDevice, &bufferCreateInfo, nullptr, &stagingBuffer));
// Get memory requirements for the staging buffer (alignment, memory type bits)
vkGetBufferMemoryRequirements(device->logicalDevice, stagingBuffer, &memReqs);
memAllocInfo.allocationSize = memReqs.size;
// Get memory type index for a host visible buffer
memAllocInfo.memoryTypeIndex = device->getMemoryType(memReqs.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT);
VK_CHECK_RESULT(vkAllocateMemory(device->logicalDevice, &memAllocInfo, nullptr, &stagingMemory));
VK_CHECK_RESULT(vkBindBufferMemory(device->logicalDevice, stagingBuffer, stagingMemory, 0));
// Copy texture data into staging buffer
uint8_t *data;
VK_CHECK_RESULT(vkMapMemory(device->logicalDevice, stagingMemory, 0, memReqs.size, 0, (void **)&data));
for (auto &mip : _transferData.mips) {
memcpy(data + mip.offset, mip.data->data(), mip.data->size());
}
vkUnmapMemory(device->logicalDevice, stagingMemory);
std::vector<VkBufferImageCopy> bufferCopyRegions;
for (size_t mipLevel = 0; mipLevel < _transferData.mips.size(); mipLevel++) {
VkBufferImageCopy bufferCopyRegion = {};
bufferCopyRegion.imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
bufferCopyRegion.imageSubresource.mipLevel = mipLevel;
bufferCopyRegion.imageSubresource.baseArrayLayer = 0;
bufferCopyRegion.imageSubresource.layerCount = 1;
bufferCopyRegion.imageExtent.width = _transferData.mips[mipLevel].width;
bufferCopyRegion.imageExtent.height = _transferData.mips[mipLevel].height;
bufferCopyRegion.imageExtent.depth = 1;
bufferCopyRegion.bufferOffset = _transferData.mips[mipLevel].offset;
bufferCopyRegions.push_back(bufferCopyRegion);
}
// Create optimal tiled target image
VkImageCreateInfo imageCreateInfo = vks::initializers::imageCreateInfo();
imageCreateInfo.imageType = VK_IMAGE_TYPE_2D;
imageCreateInfo.format = evalTexelFormatInternal(_gpuObject.getTexelFormat());
imageCreateInfo.mipLevels = _transferData.mips.size();
imageCreateInfo.arrayLayers = 1;
imageCreateInfo.samples = VK_SAMPLE_COUNT_1_BIT;
imageCreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL;
imageCreateInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
imageCreateInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
imageCreateInfo.extent = { _transferData.width, _transferData.height, 1 };
imageCreateInfo.usage = VK_IMAGE_USAGE_SAMPLED_BIT;
// Ensure that the TRANSFER_DST bit is set for staging
if (!(imageCreateInfo.usage & VK_IMAGE_USAGE_TRANSFER_DST_BIT))
{
imageCreateInfo.usage |= VK_IMAGE_USAGE_TRANSFER_DST_BIT;
}
/*VK_CHECK_RESULT(vkCreateImage(device->logicalDevice, &imageCreateInfo, nullptr, &_vkImage));
vkGetImageMemoryRequirements(device->logicalDevice, _vkImage, &memReqs);
memAllocInfo.allocationSize = memReqs.size;
// VKTODO: Switch to VMA
memAllocInfo.memoryTypeIndex = device->getMemoryType(memReqs.memoryTypeBits, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT);
VK_CHECK_RESULT(vkAllocateMemory(device->logicalDevice, &memAllocInfo, nullptr, &deviceMemory));
VK_CHECK_RESULT(vkBindImageMemory(device->logicalDevice, _vkImage, deviceMemory, 0));*/
VkImageSubresourceRange subresourceRange = {};
subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
subresourceRange.baseMipLevel = 0;
subresourceRange.levelCount = _transferData.mips.size();
subresourceRange.layerCount = 1;
// Image barrier for optimal image (target)
// Optimal image will be used as destination for the copy
vks::tools::setImageLayout(
copyCmd,
_vkImage,
VK_IMAGE_LAYOUT_UNDEFINED,
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
subresourceRange);
// Copy mip levels from staging buffer
vkCmdCopyBufferToImage(
copyCmd,
stagingBuffer,
_vkImage,
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
bufferCopyRegions.size(),
bufferCopyRegions.data()
);
// Change texture image layout to shader read after all mip levels have been copied
_vkImageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
vks::tools::setImageLayout(
copyCmd,
_vkImage,
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
_vkImageLayout,
subresourceRange);
device->flushCommandBuffer(copyCmd, backend.getContext().transferQueue, device->transferCommandPool);
// Clean up staging resources
vkDestroyBuffer(device->logicalDevice, stagingBuffer, nullptr);
vkFreeMemory(device->logicalDevice, stagingMemory, nullptr);
}
void VKStrictResourceTexture::postTransfer(VKBackend &backend) {
auto device = backend.getContext().device;
// Create sampler
VkSamplerCreateInfo samplerCreateInfo = {};
samplerCreateInfo.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO;
samplerCreateInfo.magFilter = VK_FILTER_LINEAR; // VKTODO
samplerCreateInfo.minFilter = VK_FILTER_LINEAR; // VKTODO
samplerCreateInfo.mipmapMode = VK_SAMPLER_MIPMAP_MODE_LINEAR;
samplerCreateInfo.addressModeU = VK_SAMPLER_ADDRESS_MODE_REPEAT;
samplerCreateInfo.addressModeV = VK_SAMPLER_ADDRESS_MODE_REPEAT;
samplerCreateInfo.addressModeW = VK_SAMPLER_ADDRESS_MODE_REPEAT;
samplerCreateInfo.mipLodBias = 0.0f;
samplerCreateInfo.compareOp = VK_COMPARE_OP_NEVER;
samplerCreateInfo.minLod = 0.0f;
samplerCreateInfo.maxLod = 0.0f;
samplerCreateInfo.maxAnisotropy = 1.0f;
VK_CHECK_RESULT(vkCreateSampler(device->logicalDevice, &samplerCreateInfo, nullptr, &_vkSampler));
// Create image view
VkImageViewCreateInfo viewCreateInfo = {};
viewCreateInfo.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
viewCreateInfo.pNext = nullptr;
viewCreateInfo.viewType = VK_IMAGE_VIEW_TYPE_2D;
viewCreateInfo.format = evalTexelFormatInternal(_gpuObject.getTexelFormat());
viewCreateInfo.subresourceRange = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1 };
viewCreateInfo.subresourceRange.levelCount = 1;
viewCreateInfo.image = _vkImage;
VK_CHECK_RESULT(vkCreateImageView(device->logicalDevice, &viewCreateInfo, nullptr, &_vkImageView));
};
/*Size VKTexture::copyMipFaceFromTexture(uint16_t sourceMip, uint16_t targetMip, uint8_t face) const {
if (!_gpuObject.isStoredMipFaceAvailable(sourceMip)) {
return 0;
@ -370,6 +591,14 @@ TransferJob::~TransferJob() {
Backend::texturePendingGPUTransferMemSize.update(_transferSize, 0);
}*/
VkDescriptorImageInfo VKStrictResourceTexture::getDescriptorImageInfo() {
VkDescriptorImageInfo result {};
result.sampler = _vkSampler;
result.imageLayout = _vkImageLayout;
result.imageView = _vkImageView;
return result;
}
#if 0
#include "VKTexture.h"

View file

@ -25,7 +25,7 @@ public:
//static void initTextureTransferHelper();
//static std::shared_ptr<VKTextureTransferHelper> _textureTransferHelper;
template <typename VKTextureType>
/*template <typename VKTextureType>
static VKTextureType* sync(VKBackend& backend, const TexturePointer& texturePointer, bool needTransfer) {
const Texture& texture = *texturePointer;
if (!texture.isDefined()) {
@ -42,9 +42,9 @@ public:
// This automatically any previous texture
object = new VKTextureType(backend.shared_from_this(), texture, needTransfer);
if (!object->_transferable) {
object->createTexture();
object->createTexture(backend);
object->_contentStamp = texture.getDataStamp();
object->postTransfer();
object->postTransfer(backend);
}
}
@ -55,7 +55,7 @@ public:
// If we just did a transfer, return the object after doing post-transfer work
if (VKSyncState::Transferred == object->getSyncState()) {
object->postTransfer();
object->postTransfer(backend);
return object;
}
@ -76,12 +76,13 @@ public:
}
return object;
}
}*/
template <typename VKTextureType>
static VkImage getHandle(VKBackend& backend, const TexturePointer& texture, bool shouldSync) {
/*template <typename VKTextureType>
static VkDescriptorImageInfo getDescriptorImageInfoFromTexture(VKBackend& backend, const TexturePointer& texture, bool shouldSync) {
if (!texture) {
return VK_NULL_HANDLE;
Q_ASSERT(false);
return backend.getDefaultTextureDescriptorInfo();
}
VKTextureType* object { nullptr };
if (shouldSync) {
@ -90,23 +91,27 @@ public:
object = Backend::getGPUObject<VKTextureType>(*texture);
}
if (!object) {
return VK_NULL_HANDLE;
Q_ASSERT(false);
return backend.getDefaultTextureDescriptorInfo();
}
VkImage result = object->_id;
VkDescriptorImageInfo result {};
//= object->_;
// VKTODO
// Don't return textures that are in transfer state
if (shouldSync) {
/*if (shouldSync) {
if ((object->getSyncState() != VKSyncState::Idle) ||
// Don't return transferable textures that have never completed transfer
(!object->_transferable || 0 != object->_transferCount)) {
// Will be either 0 or the original texture being downsampled.
Q_ASSERT(false);
result = object->_downsampleSource._texture;
}
}
}*
return result;
}
return object->getDescriptorImageInfo();
}*/
// Used by derived classes and helpers to ensure the actual VK object exceeds the lifetime of `this`
/*VkImage takeOwnership() {
@ -117,7 +122,7 @@ public:
virtual ~VKTexture();
VkImage _texture { VK_NULL_HANDLE };
VkImage _vkImage{ VK_NULL_HANDLE };
//const Stamp _storageStamp;
const VkImageViewType _target;
//const uint16 _maxMip;
@ -129,7 +134,7 @@ public:
struct DownsampleSource {
using Pointer = std::shared_ptr<DownsampleSource>;
DownsampleSource(const std::weak_ptr<VKBackend>& backend) : _backend(backend), _size(0), _texture(0), _minMip(0), _maxMip(0) {}
DownsampleSource(const std::weak_ptr<VKBackend>& backend) : _backend(backend), _size(0), _texture(VK_NULL_HANDLE), _minMip(0), _maxMip(0) {}
DownsampleSource(const std::weak_ptr<VKBackend>& backend, VKTexture* originalTexture) : _backend(backend), _size(0), _texture(0), _minMip(0), _maxMip(0) { Q_ASSERT(false); }; // VKTODO
~DownsampleSource() {}; // VKTODO
void reset() { _texture = VK_NULL_HANDLE; }
@ -159,7 +164,7 @@ public:
};
// Execute any post-move operations that must occur only on the main thread
virtual void postTransfer() = 0;
virtual void postTransfer(VKBackend &backend) = 0;
// VKTODO: can be done later
bool isOverMaxMemory() const { return false; };
@ -194,11 +199,27 @@ protected:
void setSyncState(VKSyncState syncState) { _syncState = syncState; }
//uint16 usedMipLevels() const { return (_maxMip - _minMip) + 1; }
virtual void createTexture() = 0;
virtual void createTexture(VKBackend &backend) = 0;
virtual VkDescriptorImageInfo getDescriptorImageInfo() = 0;
//virtual void allocateStorage() const = 0;
//virtual void updateSize() const = 0;
virtual void transfer() const = 0;
struct TransferData {
uint16_t mipLevels;
uint16_t width;
uint16_t height;
size_t buffer_size;
struct Mip {
size_t offset;
size_t size;
uint32_t width;
uint32_t height;
std::shared_ptr<const storage::Storage> data;
};
std::vector<Mip> mips;
};
TransferData _transferData{};
virtual void transfer(VKBackend &backend) = 0;
//virtual void syncSampler() const = 0;
// VKTODO
//virtual void generateMips() const = 0;
@ -218,7 +239,7 @@ private:
class VKFixedAllocationTexture : public VKTexture {
using Parent = VKTexture;
friend class GL45Backend;
friend class VKBackend;
public:
VKFixedAllocationTexture(const std::weak_ptr<VKBackend>& backend, const Texture& texture, bool isTransferable) :
@ -228,30 +249,61 @@ public:
protected:
Size size() const override { return _size; }
// VKTODO
//void allocateStorage() const;
//void allocateStorage();
// VKTODO
//void syncSampler() const override;
// VKTODO
//void updateSize() const override {};
VmaAllocation _vmaAllocation;
const Size _size{ 0 };
};
class VKAttachmentTexture : public VKFixedAllocationTexture {
using Parent = VKFixedAllocationTexture;
friend class VKBackend;
protected:
VKAttachmentTexture(const std::weak_ptr<VKBackend>& backend, const Texture& texture) :
VKFixedAllocationTexture(backend, texture, false) {
VKAttachmentTexture::createTexture();
VKAttachmentTexture::createTexture(*backend.lock());
};
virtual ~VKAttachmentTexture() {}; // VKTODO: delete image and image view, release memory
void createTexture() override;
void transfer() const override {}; // VKTODO
void postTransfer() override {}; // VKTODO
//VkImage vkImage { VK_NULL_HANDLE };
VkDeviceMemory _vkDeviceMemory{ VK_NULL_HANDLE };
void createTexture(VKBackend &backend) override;
void transfer(VKBackend &backend) override {}; // VKTODO
void postTransfer(VKBackend &backend) override {}; // VKTODO
VkDescriptorImageInfo getDescriptorImageInfo() override {
Q_ASSERT(false);
return {};
}; // VKTODO
//VkImage _vkImage { VK_NULL_HANDLE };
//VkDeviceMemory _vkDeviceMemory{ VK_NULL_HANDLE };
};
class VKStrictResourceTexture: public VKFixedAllocationTexture {
friend class VKBackend;
protected:
// VKTODO: how to handle mipmaps?
VKStrictResourceTexture(const std::weak_ptr<VKBackend>& backend, const Texture& texture) :
VKFixedAllocationTexture(backend, texture, true) {
VKBackend& vkBackend = *backend.lock();
VKStrictResourceTexture::createTexture(vkBackend);
// VKTODO: transfer on transfer tread
VKStrictResourceTexture::transfer(vkBackend);
VKStrictResourceTexture::postTransfer(vkBackend);
};
~VKStrictResourceTexture() override {}; // VKTODO: delete image and image view, release memory
void createTexture(VKBackend &backend) override;
void transfer(VKBackend &backend) override;
void postTransfer(VKBackend &backend) override;
VkDescriptorImageInfo getDescriptorImageInfo() override;
//VkImage _vkImage { VK_NULL_HANDLE };
VkImageView _vkImageView { VK_NULL_HANDLE };
VkImageLayout _vkImageLayout {}; // VKTODO
VkSampler _vkSampler { VK_NULL_HANDLE };
// This need to be moved to VKFixedAllocationTexture and allocated in allocateStorage()
//VkDeviceMemory _vkDeviceMemory{ VK_NULL_HANDLE };
};
} }

View file

@ -58,7 +58,7 @@ LogHandler::LogHandler() {
#endif
#ifdef HAS_JOURNALD
_useJournald = true;
_useJournald = false; // VKTODO
#endif
parseOptions(logOptions, "OVERTE_LOG_OPTIONS");

View file

@ -108,11 +108,14 @@ void Context::createInstance() {
qDebug() << "Found debug marker extension";
}
//requireExtensions({ VK_EXT_DEPTH_CLIP_CONTROL_EXTENSION_NAME });
//requireExtensions({ VK_EXT_DEPTH_CLAMP_ZERO_ONE_EXTENSION_NAME });
// Vulkan instance
VkApplicationInfo appInfo{};
appInfo.pApplicationName = "VulkanExamples";
appInfo.pEngineName = "VulkanExamples";
appInfo.apiVersion = VK_API_VERSION_1_0;
appInfo.apiVersion = VK_API_VERSION_1_1;
std::set<std::string> instanceExtensions = { VK_KHR_SURFACE_EXTENSION_NAME };
@ -409,10 +412,20 @@ void Context::buildDevice() {
for (const auto& extension : requestedDeviceExtensions) {
enabledExtensions.push_back(extension.c_str());
}
enabledExtensions.push_back(VK_EXT_DEPTH_CLIP_CONTROL_EXTENSION_NAME);
// Needed for OpenGL depth buffer compatibility
VkPhysicalDeviceDepthClipControlFeaturesEXT depthClipControl{};
depthClipControl.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_CLIP_CONTROL_FEATURES_EXT;
depthClipControl.depthClipControl = true;
void *pNextChain = &depthClipControl;
enabledFeatures.depthClamp = true;
Q_ASSERT(!device);
device.reset(new VulkanDevice(physicalDevice));
device->createLogicalDevice(enabledFeatures, enabledExtensions, nullptr, true,
device->createLogicalDevice(enabledFeatures, enabledExtensions, pNextChain, true,
VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_TRANSFER_BIT | VK_QUEUE_COMPUTE_BIT);
}

View file

@ -78,10 +78,14 @@ namespace vks {
struct PipelineViewportStateCreateInfo : public VkPipelineViewportStateCreateInfo {
std::vector<VkViewport> viewports;
std::vector<VkRect2D> scissors;
VkPipelineViewportDepthClipControlCreateInfoEXT depthClipControlCI{};
PipelineViewportStateCreateInfo() :
VkPipelineViewportStateCreateInfo{} {
sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO;
depthClipControlCI.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_DEPTH_CLIP_CONTROL_CREATE_INFO_EXT;
depthClipControlCI.negativeOneToOne = true;
pNext = &depthClipControlCI;
}
void update() {

View file

@ -260,6 +260,7 @@ namespace vks
// If the device will be used for presenting to a display via a swapchain we need to request the swapchain extension
deviceExtensions.push_back(VK_KHR_SWAPCHAIN_EXTENSION_NAME);
}
//deviceExtensions.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
VkDeviceCreateInfo deviceCreateInfo = {};
deviceCreateInfo.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO;