Add vulkan backend

This commit is contained in:
Brad Davis 2018-11-10 09:47:54 -08:00 committed by Karol Suprynowicz
parent 7f9c3deefd
commit 5bce2dfbf0
25 changed files with 3136 additions and 18 deletions

View file

@ -406,8 +406,14 @@ macro(AUTOSCRIBE_SHADER_LIBS)
configure_file(shaders.qrc.in ${CMAKE_CURRENT_BINARY_DIR}/shaders.qrc)
list(APPEND QT_RESOURCES_FILE ${CMAKE_CURRENT_BINARY_DIR}/shaders.qrc)
list(APPEND AUTOSCRIBE_SHADER_HEADERS ${AUTOSCRIBE_HEADER_DIR}/mono.glsl ${AUTOSCRIBE_HEADER_DIR}/stereo.glsl)
list(APPEND AUTOSCRIBE_SHADER_HEADERS ${AUTOSCRIBE_HEADER_DIR}/450/header.glsl ${AUTOSCRIBE_HEADER_DIR}/410/header.glsl ${AUTOSCRIBE_HEADER_DIR}/320es/header.glsl)
list(APPEND AUTOSCRIBE_SHADER_HEADERS
${AUTOSCRIBE_HEADER_DIR}/mono.glsl
${AUTOSCRIBE_HEADER_DIR}/stereo.glsl
${AUTOSCRIBE_HEADER_DIR}/450/header.glsl
${AUTOSCRIBE_HEADER_DIR}/410/header.glsl
${AUTOSCRIBE_HEADER_DIR}/320es/header.glsl
)
source_group("Shader Headers" FILES ${AUTOSCRIBE_HEADER_DIR}/mono.glsl ${AUTOSCRIBE_HEADER_DIR}/stereo.glsl)
source_group("Shader Headers\\450" FILES ${AUTOSCRIBE_HEADER_DIR}/450/header.glsl)
source_group("Shader Headers\\410" FILES ${AUTOSCRIBE_HEADER_DIR}/410/header.glsl)

View file

@ -0,0 +1,6 @@
set(TARGET_NAME gpu-vk)
setup_hifi_library()
link_hifi_libraries(shared shaders vk gpu)
GroupSources("src")
target_vulkan()

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,141 @@
//
// Created by Bradley Austin Davis on 2016/08/07
// Copyright 2013-2016 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#ifndef hifi_gpu_vk_VKBackend_h
#define hifi_gpu_vk_VKBackend_h
#include <assert.h>
#include <functional>
#include <memory>
#include <bitset>
#include <queue>
#include <utility>
#include <list>
#include <array>
#include <gpu/Forward.h>
#include <gpu/Context.h>
#include <vk/Config.h>
#include <vk/Context.h>
#include <vk/Debug.h>
#include "VKForward.h"
namespace gpu { namespace vulkan {
class VKBackend : public Backend, public std::enable_shared_from_this<VKBackend> {
// Context Backend static interface required
friend class gpu::Context;
static void init();
static BackendPointer createBackend();
public:
VKBackend();
~VKBackend();
void syncCache() override {}
void recycle() const override {}
void setCameraCorrection(const Mat4& correction, const Mat4& prevRenderView, bool reset = false) override {}
uint32_t getTextureID(const TexturePointer&) override { return 0; }
void executeFrame(const FramePointer& frame) final override;
bool isTextureManagementSparseEnabled() const override;
bool supportedTextureFormat(const gpu::Element& format) const override;
const std::string& getVersion() const override;
void downloadFramebuffer(const FramebufferPointer& srcFramebuffer, const Vec4i& region, QImage& destImage) final override {}
void trash(const VKBuffer& buffer);
#if 0
// Draw Stage
virtual void do_draw(const Batch& batch, size_t paramOffset) final;
virtual void do_drawIndexed(const Batch& batch, size_t paramOffset) final;
virtual void do_drawInstanced(const Batch& batch, size_t paramOffset) final;
virtual void do_drawIndexedInstanced(const Batch& batch, size_t paramOffset) final;
virtual void do_multiDrawIndirect(const Batch& batch, size_t paramOffset) final;
virtual void do_multiDrawIndexedIndirect(const Batch& batch, size_t paramOffset) final;
// Input Stage
virtual void do_setInputFormat(const Batch& batch, size_t paramOffset) final;
virtual void do_setInputBuffer(const Batch& batch, size_t paramOffset) final;
virtual void do_setIndexBuffer(const Batch& batch, size_t paramOffset) final;
virtual void do_setIndirectBuffer(const Batch& batch, size_t paramOffset) final;
virtual void do_generateTextureMips(const Batch& batch, size_t paramOffset) final;
virtual void do_glUniform1f(const Batch& batch, size_t paramOffset) final;
virtual void do_glUniform2f(const Batch& batch, size_t paramOffset) final;
virtual void do_glUniform3f(const Batch& batch, size_t paramOffset) final;
virtual void do_glUniform4f(const Batch& batch, size_t paramOffset) final;
virtual void do_glColor4f(const Batch& batch, size_t paramOffset) final;
// Transform Stage
virtual void do_setModelTransform(const Batch& batch, size_t paramOffset) final;
virtual void do_setViewTransform(const Batch& batch, size_t paramOffset) final;
virtual void do_setProjectionTransform(const Batch& batch, size_t paramOffset) final;
virtual void do_setProjectionJitter(const Batch& batch, size_t paramOffset) final;
virtual void do_setViewportTransform(const Batch& batch, size_t paramOffset) final;
virtual void do_setDepthRangeTransform(const Batch& batch, size_t paramOffset) final;
// Uniform Stage
virtual void do_setUniformBuffer(const Batch& batch, size_t paramOffset) final;
// Resource Stage
virtual void do_setResourceBuffer(const Batch& batch, size_t paramOffset) final;
virtual void do_setResourceTexture(const Batch& batch, size_t paramOffset) final;
virtual void do_setResourceTextureTable(const Batch& batch, size_t paramOffset);
virtual void do_setResourceFramebufferSwapChainTexture(const Batch& batch, size_t paramOffset) final;
// Pipeline Stage
virtual void do_setPipeline(const Batch& batch, size_t paramOffset) final;
// Output stage
virtual void do_setFramebuffer(const Batch& batch, size_t paramOffset) final;
virtual void do_setFramebufferSwapChain(const Batch& batch, size_t paramOffset) final;
virtual void do_clearFramebuffer(const Batch& batch, size_t paramOffset) final;
virtual void do_blit(const Batch& batch, size_t paramOffset) final;
virtual void do_advance(const Batch& batch, size_t paramOffset) final;
virtual void do_setStateBlendFactor(const Batch& batch, size_t paramOffset) final;
virtual void do_setStateScissorRect(const Batch& batch, size_t paramOffset) final;
// Query section
virtual void do_beginQuery(const Batch& batch, size_t paramOffset) final;
virtual void do_endQuery(const Batch& batch, size_t paramOffset) final;
virtual void do_getQuery(const Batch& batch, size_t paramOffset) final;
// Reset stages
virtual void do_resetStages(const Batch& batch, size_t paramOffset) final;
virtual void do_disableContextViewCorrection(const Batch& batch, size_t paramOffset) final;
virtual void do_restoreContextViewCorrection(const Batch& batch, size_t paramOffset) final;
virtual void do_disableContextStereo(const Batch& batch, size_t paramOffset) final;
virtual void do_restoreContextStereo(const Batch& batch, size_t paramOffset) final;
// Other
virtual void do_runLambda(const Batch& batch, size_t paramOffset) final;
virtual void do_startNamedCall(const Batch& batch, size_t paramOffset) final;
virtual void do_stopNamedCall(const Batch& batch, size_t paramOffset) final;
// Performance profiling markers
virtual void do_pushProfileRange(const Batch& batch, size_t paramOffset) final;
virtual void do_popProfileRange(const Batch& batch, size_t paramOffset) final;
#endif
protected:
// Logical device, application's view of the physical device (GPU)
// vk::Pipeline cache object
vk::PipelineCache _pipelineCache;
vks::Context& _context{ vks::Context::get() };
const vk::PhysicalDeviceProperties& _deviceProperties { _context.deviceProperties };
const vk::PhysicalDeviceFeatures& _deviceFeatures { _context.deviceFeatures };
const vk::PhysicalDeviceMemoryProperties& _memoryProperties { _context.deviceMemoryProperties };
const vk::Device& _device{ _context.device };
const vk::Queue _graphicsQueue;
const vk::Queue _transferQueue;
friend class VKBuffer;
};
}} // namespace gpu::vulkan
#endif

View file

@ -0,0 +1,58 @@
//
// Created by Bradley Austin Davis on 2016/08/07
// Copyright 2013-2016 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include "VKBuffer.h"
#include "VKBackend.h"
using namespace gpu::vulkan;
VKBuffer* VKBuffer::sync(VKBackend& backend, const gpu::Buffer& buffer) {
if (buffer.getSysmem().getSize() != 0) {
if (buffer._getUpdateCount == 0) {
qWarning() << "Unsynced buffer";
}
if (buffer._getUpdateCount < buffer._applyUpdateCount) {
qWarning() << "Unsynced buffer " << buffer._getUpdateCount << " " << buffer._applyUpdateCount;
}
}
VKBuffer* object = gpu::Backend::getGPUObject<VKBuffer>(buffer);
// Has the storage size changed?
if (!object || object->_stamp != buffer._renderSysmem.getStamp()) {
object = new VKBuffer(backend, buffer);
}
return object;
}
vk::Buffer VKBuffer::getBuffer(VKBackend& backend, const gpu::Buffer& buffer) {
VKBuffer* bo = sync(backend, buffer);
if (bo) {
return bo->buffer;
} else {
return nullptr;
}
}
VKBuffer::VKBuffer(VKBackend& backend, const gpu::Buffer& buffer) : VKObject(backend, buffer) {
vk::BufferUsageFlags usageFlags{ (VkBufferUsageFlags)buffer.getUsage() };
(vks::Buffer&)(*this) =
backend._context.createBuffer(usageFlags, buffer.getSize(), vk::MemoryPropertyFlagBits::eDeviceLocal);
Backend::bufferCount.increment();
Backend::bufferGPUMemSize.update(0, size);
}
VKBuffer::~VKBuffer() {
Backend::bufferGPUMemSize.update(size, 0);
Backend::bufferCount.decrement();
auto backend = _backend.lock();
if (backend) {
backend->trash(*this);
}
}

View file

@ -0,0 +1,30 @@
//
// Created by Bradley Austin Davis on 2016/08/07
// Copyright 2013-2016 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#ifndef hifi_gpu_vk_VKBuffer_h
#define hifi_gpu_vk_VKBuffer_h
#include "VKForward.h"
#include "VKShared.h"
#include <vk/Buffer.h>
namespace gpu { namespace vulkan {
class VKBuffer : public VKObject<gpu::Buffer>, public vks::Buffer {
public:
static VKBuffer* sync(VKBackend& backend, const gpu::Buffer& buffer);
static ::vk::Buffer getBuffer(VKBackend& backend, const gpu::Buffer& buffer);
~VKBuffer();
protected:
VKBuffer(VKBackend& backend, const gpu::Buffer& buffer);
const Stamp _stamp{ 0 };
};
} }
#endif

View file

@ -0,0 +1,30 @@
//
// Created by Bradley Austin Davis on 2016/09/22
// Copyright 2013-2016 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#ifndef hifi_gpu_VKForward_h
#define hifi_gpu_VKForward_h
#include <vk/Config.h>
#include <gpu/Forward.h>
namespace gpu { namespace vulkan {
class VKBuffer;
class VKShader;
class VKTexture;
class VKBackend;
class VKFramebuffer;
class VKPipeline;
class VKQuery;
} }
#endif

View file

@ -0,0 +1,49 @@
//
// Created by Bradley Austin Davis on 2016/08/07
// Copyright 2013-2016 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#if 0
#include "VKFramebuffer.h"
#include "VKBackend.h"
using namespace gpu;
using namespace gpu::gl;
VKFramebuffer::~VKFramebuffer() {
if (_id) {
auto backend = _backend.lock();
if (backend) {
backend->releaseFramebuffer(_id);
}
}
}
bool VKFramebuffer::checkStatus(VKenum target) const {
bool result = false;
switch (_status) {
case VK_FRAMEBUFFER_COMPLETE:
// Success !
result = true;
break;
case VK_FRAMEBUFFER_INCOMPLETE_ATTACHMENT:
qCDebug(gpu_vk_logging) << "VKFramebuffer::syncGPUObject : Framebuffer not valid, VK_FRAMEBUFFER_INCOMPLETE_ATTACHMENT.";
break;
case VK_FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT:
qCDebug(gpu_vk_logging) << "VKFramebuffer::syncGPUObject : Framebuffer not valid, VK_FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT.";
break;
case VK_FRAMEBUFFER_INCOMPLETE_DRAW_BUFFER:
qCDebug(gpu_vk_logging) << "VKFramebuffer::syncGPUObject : Framebuffer not valid, VK_FRAMEBUFFER_INCOMPLETE_DRAW_BUFFER.";
break;
case VK_FRAMEBUFFER_INCOMPLETE_READ_BUFFER:
qCDebug(gpu_vk_logging) << "VKFramebuffer::syncGPUObject : Framebuffer not valid, VK_FRAMEBUFFER_INCOMPLETE_READ_BUFFER.";
break;
case VK_FRAMEBUFFER_UNSUPPORTED:
qCDebug(gpu_vk_logging) << "VKFramebuffer::syncGPUObject : Framebuffer not valid, VK_FRAMEBUFFER_UNSUPPORTED.";
break;
}
return result;
}
#endif

View file

@ -0,0 +1,77 @@
//
// Created by Bradley Austin Davis on 2016/08/07
// Copyright 2013-2016 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#ifndef hifi_gpu_vk_VKFramebuffer_h
#define hifi_gpu_vk_VKFramebuffer_h
#include "VKShared.h"
#include "VKBackend.h"
namespace gpu { namespace vk {
class VKFramebuffer : public VKObject<Framebuffer> {
public:
template <typename VKFramebufferType>
static VKFramebufferType* sync(VKBackend& backend, const Framebuffer& framebuffer) {
VKFramebufferType* object = Backend::getGPUObject<VKFramebufferType>(framebuffer);
bool needsUpate { false };
if (!object ||
framebuffer.getDepthStamp() != object->_depthStamp ||
framebuffer.getColorStamps() != object->_colorStamps) {
needsUpate = true;
}
// If GPU object already created and in sync
if (!needsUpate) {
return object;
} else if (framebuffer.isEmpty()) {
// NO framebuffer definition yet so let's avoid thinking
return nullptr;
}
// need to have a gpu object?
if (!object) {
// All is green, assign the gpuobject to the Framebuffer
object = new VKFramebufferType(backend.shared_from_this(), framebuffer);
Backend::setGPUObject(framebuffer, object);
(void)CHECK_VK_ERROR();
}
object->update();
return object;
}
template <typename VKFramebufferType>
static VKuint getId(VKBackend& backend, const Framebuffer& framebuffer) {
VKFramebufferType* fbo = sync<VKFramebufferType>(backend, framebuffer);
if (fbo) {
return fbo->_id;
} else {
return 0;
}
}
const VKuint& _fbo { _id };
std::vector<VKenum> _colorBuffers;
Stamp _depthStamp { 0 };
std::vector<Stamp> _colorStamps;
protected:
VKenum _status { VK_FRAMEBUFFER_COMPLETE };
virtual void update() = 0;
bool checkStatus(VKenum target) const;
VKFramebuffer(const std::weak_ptr<VKBackend>& backend, const Framebuffer& framebuffer, VKuint id) : VKObject(backend, framebuffer, id) {}
~VKFramebuffer();
};
} }
#endif

View file

@ -0,0 +1,60 @@
//
// Created by Bradley Austin Davis on 2016/08/07
// Copyright 2013-2016 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#if 0
#include "VKPipeline.h"
#include "VKShader.h"
#include "VKState.h"
using namespace gpu;
using namespace gpu::gl;
VKPipeline* VKPipeline::sync(VKBackend& backend, const Pipeline& pipeline) {
VKPipeline* object = Backend::getGPUObject<VKPipeline>(pipeline);
// If GPU object already created then good
if (object) {
return object;
}
// No object allocated yet, let's see if it's worth it...
ShaderPointer shader = pipeline.getProgram();
// If this pipeline's shader has already failed to compile, don't try again
if (shader->compilationHasFailed()) {
return nullptr;
}
VKShader* programObject = VKShader::sync(backend, *shader);
if (programObject == nullptr) {
shader->setCompilationHasFailed(true);
return nullptr;
}
StatePointer state = pipeline.getState();
VKState* stateObject = VKState::sync(*state);
if (stateObject == nullptr) {
return nullptr;
}
// Program and state are valid, we can create the pipeline object
if (!object) {
object = new VKPipeline();
Backend::setGPUObject(pipeline, object);
}
// Special case for view correction matrices, any pipeline that declares the correction buffer
// uniform will automatically have it provided without any client code necessary.
// Required for stable lighting in the HMD.
object->_cameraCorrection = shader->getBuffers().findLocation("cameraCorrectionBuffer");
object->_program = programObject;
object->_state = stateObject;
return object;
}
#endif

View file

@ -0,0 +1,29 @@
//
// Created by Bradley Austin Davis on 2016/08/07
// Copyright 2013-2016 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#ifndef hifi_gpu_vk_VKPipeline_h
#define hifi_gpu_vk_VKPipeline_h
#include "VKShared.h"
namespace gpu { namespace vk {
class VKPipeline : public GPUObject {
public:
static VKPipeline* sync(VKBackend& backend, const Pipeline& pipeline);
VKShader* _program { nullptr };
VKState* _state { nullptr };
// Bit of a hack, any pipeline can need the camera correction buffer at execution time, so
// we store whether a given pipeline has declared the uniform buffer for it.
int32 _cameraCorrection { -1 };
};
} }
#endif

View file

@ -0,0 +1,64 @@
//
// Created by Bradley Austin Davis on 2016/08/07
// Copyright 2013-2016 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#ifndef hifi_gpu_vk_VKQuery_h
#define hifi_gpu_vk_VKQuery_h
#include "VKShared.h"
#include "VKBackend.h"
namespace gpu { namespace vk {
class VKQuery : public VKObject<Query> {
using Parent = gpu::vk::VKObject<Query>;
public:
template <typename VKQueryType>
static VKQueryType* sync(VKBackend& backend, const Query& query) {
VKQueryType* object = Backend::getGPUObject<VKQueryType>(query);
// need to have a gpu object?
if (!object) {
// All is green, assign the gpuobject to the Query
object = new VKQueryType(backend.shared_from_this(), query);
(void)CHECK_VK_ERROR();
Backend::setGPUObject(query, object);
}
return object;
}
template <typename VKQueryType>
static VKuint getId(VKBackend& backend, const QueryPointer& query) {
if (!query) {
return 0;
}
VKQuery* object = sync<VKQueryType>(backend, *query);
if (!object) {
return 0;
}
return object->_endqo;
}
const VKuint& _endqo = { _id };
const VKuint _beginqo = { 0 };
VKuint64 _result { (VKuint64)-1 };
protected:
VKQuery(const std::weak_ptr<VKBackend>& backend, const Query& query, VKuint endId, VKuint beginId) : Parent(backend, query, endId), _beginqo(beginId) {}
~VKQuery() {
if (_id) {
VKuint ids[2] = { _endqo, _beginqo };
glDeleteQueries(2, ids);
}
}
};
} }
#endif

View file

@ -0,0 +1,192 @@
//
// Created by Bradley Austin Davis on 2016/08/07
// Copyright 2013-2016 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#if 0
#include "VKShader.h"
#include "VKBackend.h"
using namespace gpu;
using namespace gpu::gl;
VKShader::VKShader(const std::weak_ptr<VKBackend>& backend) : _backend(backend) {
}
VKShader::~VKShader() {
for (auto& so : _shaderObjects) {
auto backend = _backend.lock();
if (backend) {
if (so.glshader != 0) {
backend->releaseShader(so.glshader);
}
if (so.glprogram != 0) {
backend->releaseProgram(so.glprogram);
}
}
}
}
// VKSL version
static const std::string glslVersion {
"#version 410 core"
};
// Shader domain
static const size_t NUM_SHADER_DOMAINS = 3;
// VK Shader type enums
// Must match the order of type specified in gpu::Shader::Type
static const std::array<VKenum, NUM_SHADER_DOMAINS> SHADER_DOMAINS { {
VK_VERTEX_SHADER,
VK_FRAGMENT_SHADER,
VK_GEOMETRY_SHADER,
} };
// Domain specific defines
// Must match the order of type specified in gpu::Shader::Type
static const std::array<std::string, NUM_SHADER_DOMAINS> DOMAIN_DEFINES { {
"#define GPU_VERTEX_SHADER",
"#define GPU_PIXEL_SHADER",
"#define GPU_GEOMETRY_SHADER",
} };
// Versions specific of the shader
static const std::array<std::string, VKShader::NumVersions> VERSION_DEFINES { {
""
} };
VKShader* compileBackendShader(VKBackend& backend, const Shader& shader) {
// Any VKSLprogram ? normally yes...
const std::string& shaderSource = shader.getSource().getCode();
VKenum shaderDomain = SHADER_DOMAINS[shader.getType()];
VKShader::ShaderObjects shaderObjects;
for (int version = 0; version < VKShader::NumVersions; version++) {
auto& shaderObject = shaderObjects[version];
std::string shaderDefines = glslVersion + "\n" + DOMAIN_DEFINES[shader.getType()] + "\n" + VERSION_DEFINES[version];
bool result = compileShader(shaderDomain, shaderSource, shaderDefines, shaderObject.glshader, shaderObject.glprogram);
if (!result) {
return nullptr;
}
}
// So far so good, the shader is created successfully
VKShader* object = new VKShader(backend.shared_from_this());
object->_shaderObjects = shaderObjects;
return object;
}
VKShader* compileBackendProgram(VKBackend& backend, const Shader& program) {
if (!program.isProgram()) {
return nullptr;
}
VKShader::ShaderObjects programObjects;
for (int version = 0; version < VKShader::NumVersions; version++) {
auto& programObject = programObjects[version];
// Let's go through every shaders and make sure they are ready to go
std::vector< VKuint > shaderGLObjects;
for (auto subShader : program.getShaders()) {
auto object = VKShader::sync(backend, *subShader);
if (object) {
shaderGLObjects.push_back(object->_shaderObjects[version].glshader);
} else {
qCDebug(gpu_vk_logging) << "VKShader::compileBackendProgram - One of the shaders of the program is not compiled?";
return nullptr;
}
}
VKuint glprogram = compileProgram(shaderGLObjects);
if (glprogram == 0) {
return nullptr;
}
programObject.glprogram = glprogram;
makeProgramBindings(programObject);
}
// So far so good, the program versions have all been created successfully
VKShader* object = new VKShader(backend.shared_from_this());
object->_shaderObjects = programObjects;
return object;
}
VKShader* VKShader::sync(VKBackend& backend, const Shader& shader) {
VKShader* object = Backend::getGPUObject<VKShader>(shader);
// If GPU object already created then good
if (object) {
return object;
}
// need to have a gpu object?
if (shader.isProgram()) {
VKShader* tempObject = compileBackendProgram(backend, shader);
if (tempObject) {
object = tempObject;
Backend::setGPUObject(shader, object);
}
} else if (shader.isDomain()) {
VKShader* tempObject = compileBackendShader(backend, shader);
if (tempObject) {
object = tempObject;
Backend::setGPUObject(shader, object);
}
}
glFinish();
return object;
}
bool VKShader::makeProgram(VKBackend& backend, Shader& shader, const Shader::BindingSet& slotBindings) {
// First make sure the Shader has been compiled
VKShader* object = sync(backend, shader);
if (!object) {
return false;
}
// Apply bindings to all program versions and generate list of slots from default version
for (int version = 0; version < VKShader::NumVersions; version++) {
auto& shaderObject = object->_shaderObjects[version];
if (shaderObject.glprogram) {
Shader::SlotSet buffers;
makeUniformBlockSlots(shaderObject.glprogram, slotBindings, buffers);
Shader::SlotSet uniforms;
Shader::SlotSet textures;
Shader::SlotSet samplers;
makeUniformSlots(shaderObject.glprogram, slotBindings, uniforms, textures, samplers);
Shader::SlotSet inputs;
makeInputSlots(shaderObject.glprogram, slotBindings, inputs);
Shader::SlotSet outputs;
makeOutputSlots(shaderObject.glprogram, slotBindings, outputs);
// Define the public slots only from the default version
if (version == 0) {
shader.defineSlots(uniforms, buffers, textures, samplers, inputs, outputs);
} else {
VKShader::UniformMapping mapping;
for (auto srcUniform : shader.getUniforms()) {
mapping[srcUniform._location] = uniforms.findLocation(srcUniform._name);
}
object->_uniformMappings.push_back(mapping);
}
}
}
return true;
}
#endif

View file

@ -0,0 +1,53 @@
//
// Created by Bradley Austin Davis on 2016/08/07
// Copyright 2013-2016 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#ifndef hifi_gpu_vk_VKShader_h
#define hifi_gpu_vk_VKShader_h
#include "VKShared.h"
namespace gpu { namespace vk {
class VKShader : public GPUObject {
public:
static VKShader* sync(VKBackend& backend, const Shader& shader);
static bool makeProgram(VKBackend& backend, Shader& shader, const Shader::BindingSet& slotBindings);
enum Version {
Mono = 0,
NumVersions
};
using ShaderObject = gpu::vk::ShaderObject;
using ShaderObjects = std::array< ShaderObject, NumVersions >;
using UniformMapping = std::map<VKint, VKint>;
using UniformMappingVersions = std::vector<UniformMapping>;
VKShader(const std::weak_ptr<VKBackend>& backend);
~VKShader();
ShaderObjects _shaderObjects;
UniformMappingVersions _uniformMappings;
VKuint getProgram(Version version = Mono) const {
return _shaderObjects[version].glprogram;
}
VKint getUniformLocation(VKint srcLoc, Version version = Mono) {
// THIS will be used in the future PR as we grow the number of versions
// return _uniformMappings[version][srcLoc];
return srcLoc;
}
const std::weak_ptr<VKBackend> _backend;
};
} }
#endif

View file

@ -0,0 +1,366 @@
//
// Created by Bradley Austin Davis on 2016/08/07
// Copyright 2013-2016 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include "VKShared.h"
Q_LOGGING_CATEGORY(gpu_vk_logging, "hifi.gpu.vk")
Q_LOGGING_CATEGORY(trace_gpu_vk, "trace.gpu.vk")
Q_LOGGING_CATEGORY(trace_gpu_vk_detail, "trace.gpu.vk.detail")
vk::Format gpu::vulkan::evalTexelFormatInternal(const gpu::Element& dstFormat) {
vk::Format result = vk::Format::eR8G8B8Unorm;
switch (dstFormat.getDimension()) {
case gpu::SCALAR:
{
switch (dstFormat.getSemantic()) {
case gpu::RED:
case gpu::RGB:
case gpu::RGBA:
case gpu::SRGB:
case gpu::SRGBA:
switch (dstFormat.getType()) {
case gpu::UINT32:
result = vk::Format::eR32Uint;
break;
case gpu::INT32:
result = vk::Format::eR32Sint;
break;
case gpu::NUINT32:
result = vk::Format::eR8Unorm;
break;
case gpu::NINT32:
result = vk::Format::eR8Snorm;
break;
case gpu::FLOAT:
result = vk::Format::eR32Sfloat;
break;
case gpu::UINT16:
result = vk::Format::eR16Uint;
break;
case gpu::INT16:
result = vk::Format::eR16Sint;
break;
case gpu::NUINT16:
result = vk::Format::eR16Unorm;
break;
case gpu::NINT16:
result = vk::Format::eR16Snorm;
break;
case gpu::HALF:
result = vk::Format::eR16Sfloat;
break;
case gpu::UINT8:
result = vk::Format::eR8Uint;
break;
case gpu::INT8:
result = vk::Format::eR8Sint;
break;
case gpu::NUINT8:
if ((dstFormat.getSemantic() == gpu::SRGB || dstFormat.getSemantic() == gpu::SRGBA)) {
result = vk::Format::eR8Srgb;
} else {
result = vk::Format::eR8Unorm;
}
break;
case gpu::NINT8:
result = vk::Format::eR8Snorm;
break;
default:
Q_UNREACHABLE();
break;
}
break;
case gpu::R11G11B10:
// the type should be float
result = vk::Format::eB10G11R11UfloatPack32;
break;
case gpu::RGB9E5:
// the type should be float
result = vk::Format::eE5B9G9R9UfloatPack32;
break;
case gpu::DEPTH:
result = vk::Format::eD32Sfloat;
switch (dstFormat.getType()) {
case gpu::UINT32:
case gpu::INT32:
case gpu::NUINT32:
case gpu::NINT32:
case gpu::FLOAT:
result = vk::Format::eD32Sfloat;
break;
case gpu::UINT16:
case gpu::INT16:
case gpu::NUINT16:
case gpu::NINT16:
case gpu::HALF:
result = vk::Format::eD16Unorm;
break;
case gpu::UINT8:
case gpu::INT8:
case gpu::NUINT8:
case gpu::NINT8:
default:
Q_UNREACHABLE();
break;
}
break;
case gpu::DEPTH_STENCIL:
result = vk::Format::eD24UnormS8Uint;
break;
default:
qCWarning(gpu_vk_logging) << "Unknown combination of texel format";
}
break;
}
case gpu::VEC2:
{
switch (dstFormat.getSemantic()) {
case gpu::RGB:
case gpu::RGBA:
case gpu::XY:
case gpu::XYZ:
case gpu::UV:
switch (dstFormat.getType()) {
case gpu::UINT32:
result = vk::Format::eR32G32Uint;
break;
case gpu::INT32:
result = vk::Format::eR32G32Sint;
break;
case gpu::FLOAT:
result = vk::Format::eR32G32Sfloat;
break;
case gpu::UINT16:
result = vk::Format::eR16G16Uint;
break;
case gpu::INT16:
result = vk::Format::eR16G16Sint;
break;
case gpu::NUINT16:
result = vk::Format::eR16G16Unorm;
break;
case gpu::NINT16:
result = vk::Format::eR16G16Snorm;
break;
case gpu::HALF:
result = vk::Format::eR16G16Sfloat;
break;
case gpu::UINT8:
result = vk::Format::eR8G8Uint;
break;
case gpu::INT8:
result = vk::Format::eR8G8Sint;
break;
case gpu::NUINT8:
result = vk::Format::eR8G8Unorm;
break;
case gpu::NINT8:
result = vk::Format::eR8G8Snorm;
break;
case gpu::NUINT32:
case gpu::NINT32:
case gpu::NUINT2:
case gpu::NINT2_10_10_10:
case gpu::COMPRESSED:
case gpu::NUM_TYPES: // quiet compiler
Q_UNREACHABLE();
}
break;
default:
qCWarning(gpu_vk_logging) << "Unknown combination of texel format";
}
break;
}
case gpu::VEC3:
{
switch (dstFormat.getSemantic()) {
case gpu::RGB:
case gpu::RGBA:
result = vk::Format::eR8G8B8Unorm;
break;
case gpu::SRGB:
case gpu::SRGBA:
result = vk::Format::eR8G8B8Srgb; // standard 2.2 gamma correction color
break;
case gpu::XYZ:
switch (dstFormat.getType()) {
case gpu::FLOAT:
result = vk::Format::eR32G32B32Sfloat;
break;
default:
qCWarning(gpu_vk_logging) << "Unknown combination of texel format";
}
break;
default:
qCWarning(gpu_vk_logging) << "Unknown combination of texel format";
}
break;
}
case gpu::VEC4:
{
switch (dstFormat.getSemantic()) {
case gpu::RGB:
result = vk::Format::eR8G8B8Unorm;
break;
case gpu::RGBA:
case gpu::XYZW:
switch (dstFormat.getType()) {
case gpu::UINT32:
result = vk::Format::eR32G32B32A32Uint;
break;
case gpu::INT32:
result = vk::Format::eR32G32B32A32Sint;
break;
case gpu::FLOAT:
result = vk::Format::eR32G32B32A32Sfloat;
break;
case gpu::UINT16:
result = vk::Format::eR16G16B16A16Uint;
break;
case gpu::INT16:
result = vk::Format::eR16G16B16A16Sint;
break;
case gpu::NUINT16:
result = vk::Format::eR16G16B16A16Unorm;
break;
case gpu::NINT16:
result = vk::Format::eR16G16B16A16Snorm;
break;
case gpu::HALF:
result = vk::Format::eR16G16B16A16Sfloat;
break;
case gpu::UINT8:
result = vk::Format::eR8G8B8A8Uint;
break;
case gpu::INT8:
result = vk::Format::eR8G8B8A8Sint;
break;
case gpu::NUINT8:
result = vk::Format::eR8G8B8A8Unorm;
break;
case gpu::NINT8:
result = vk::Format::eR8G8B8A8Snorm;
break;
case gpu::NINT2_10_10_10:
result = vk::Format::eA2B10G10R10SnormPack32;
break;
case gpu::NUINT32:
case gpu::NINT32:
case gpu::NUINT2:
case gpu::COMPRESSED:
case gpu::NUM_TYPES: // quiet compiler
Q_UNREACHABLE();
}
break;
case gpu::SRGB:
result = vk::Format::eR8G8B8Srgb; // standard 2.2 gamma correction color
break;
case gpu::SRGBA:
result = vk::Format::eR8G8B8A8Srgb; // standard 2.2 gamma correction color
break;
default:
qCWarning(gpu_vk_logging) << "Unknown combination of texel format";
}
break;
}
case gpu::TILE4x4:
{
switch (dstFormat.getSemantic()) {
case gpu::COMPRESSED_BC4_RED:
result = vk::Format::eBc4UnormBlock;
break;
case gpu::COMPRESSED_BC1_SRGB:
result = vk::Format::eBc1RgbSrgbBlock;
break;
case gpu::COMPRESSED_BC1_SRGBA:
result = vk::Format::eBc1RgbaSrgbBlock;
break;
case gpu::COMPRESSED_BC3_SRGBA:
result = vk::Format::eBc3SrgbBlock;
break;
case gpu::COMPRESSED_BC5_XY:
result = vk::Format::eBc5UnormBlock;
break;
case gpu::COMPRESSED_BC6_RGB:
result = vk::Format::eBc6HUfloatBlock;
break;
case gpu::COMPRESSED_BC7_SRGBA:
result = vk::Format::eBc7SrgbBlock;
break;
case gpu::COMPRESSED_ETC2_RGB:
result = vk::Format::eEtc2R8G8B8UnormBlock;
break;
case gpu::COMPRESSED_ETC2_SRGB:
result = vk::Format::eEtc2R8G8B8SrgbBlock;
break;
case gpu::COMPRESSED_ETC2_RGB_PUNCHTHROUGH_ALPHA:
result = vk::Format::eEtc2R8G8B8A1UnormBlock;
break;
case gpu::COMPRESSED_ETC2_SRGB_PUNCHTHROUGH_ALPHA:
result = vk::Format::eEtc2R8G8B8A1SrgbBlock;
break;
case gpu::COMPRESSED_ETC2_RGBA:
result = vk::Format::eEtc2R8G8B8A8UnormBlock;
break;
case gpu::COMPRESSED_ETC2_SRGBA:
result = vk::Format::eEtc2R8G8B8A8SrgbBlock;
break;
case gpu::COMPRESSED_EAC_RED:
result = vk::Format::eEacR11UnormBlock;
break;
case gpu::COMPRESSED_EAC_RED_SIGNED:
result = vk::Format::eEacR11SnormBlock;
break;
case gpu::COMPRESSED_EAC_XY:
result = vk::Format::eEacR11G11UnormBlock;
break;
case gpu::COMPRESSED_EAC_XY_SIGNED:
result = vk::Format::eEacR11G11SnormBlock;
break;
default:
qCWarning(gpu_vk_logging) << "Unknown combination of texel format";
}
break;
}
default:
qCWarning(gpu_vk_logging) << "Unknown combination of texel format";
}
return result;
}
bool gpu::vulkan::isDepthStencilFormat(vk::Format format) {
switch (format) {
case vk::Format::eD16Unorm:
case vk::Format::eX8D24UnormPack32:
case vk::Format::eD32Sfloat:
case vk::Format::eS8Uint:
case vk::Format::eD16UnormS8Uint:
case vk::Format::eD24UnormS8Uint:
case vk::Format::eD32SfloatS8Uint:
return true;
default:
break;
}
return false;
}

View file

@ -0,0 +1,126 @@
//
// Created by Bradley Austin Davis on 2016/08/07
// Copyright 2013-2016 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#ifndef hifi_gpu_VKShared_h
#define hifi_gpu_VKShared_h
#include <vk/Config.h>
#include <gpu/Forward.h>
#include <gpu/Format.h>
#include <gpu/Context.h>
Q_DECLARE_LOGGING_CATEGORY(gpu_vk_logging)
Q_DECLARE_LOGGING_CATEGORY(trace_gpu_vk)
Q_DECLARE_LOGGING_CATEGORY(trace_gpu_vk_detail)
namespace gpu { namespace vulkan {
gpu::Size getDedicatedMemory();
ComparisonFunction comparisonFuncFromGL(vk::CompareOp func);
State::StencilOp stencilOpFromGL(vk::StencilOp stencilOp);
State::BlendOp blendOpFromGL(vk::BlendOp blendOp);
State::BlendArg blendArgFromGL(vk::BlendFactor blendArg);
struct ShaderObject {
vk::ShaderModule glshader{ nullptr };
int32_t transformCameraSlot { -1 };
int32_t transformObjectSlot { -1 };
};
vk::Format evalTexelFormatInternal(const Element& dstFormat);
bool isDepthStencilFormat(vk::Format format);
static const vk::BlendOp BLEND_OPS_TO_VK[State::NUM_BLEND_OPS] = {
vk::BlendOp::eAdd,
vk::BlendOp::eSubtract,
vk::BlendOp::eReverseSubtract,
vk::BlendOp::eMin,
vk::BlendOp::eMax
};
static const vk::BlendFactor BLEND_ARGS_TO_VK[State::NUM_BLEND_ARGS] = {
vk::BlendFactor::eZero,
vk::BlendFactor::eOne,
vk::BlendFactor::eSrcColor,
vk::BlendFactor::eOneMinusSrcColor,
vk::BlendFactor::eSrcAlpha,
vk::BlendFactor::eOneMinusSrcAlpha,
vk::BlendFactor::eDstAlpha,
vk::BlendFactor::eOneMinusDstAlpha,
vk::BlendFactor::eDstColor,
vk::BlendFactor::eOneMinusDstColor,
vk::BlendFactor::eSrcAlphaSaturate,
vk::BlendFactor::eConstantColor,
vk::BlendFactor::eOneMinusConstantColor,
vk::BlendFactor::eConstantAlpha,
vk::BlendFactor::eOneMinusConstantAlpha,
};
static const vk::CompareOp COMPARISON_TO_VK[gpu::NUM_COMPARISON_FUNCS] = {
vk::CompareOp::eNever,
vk::CompareOp::eLess,
vk::CompareOp::eEqual,
vk::CompareOp::eLessOrEqual,
vk::CompareOp::eGreater,
vk::CompareOp::eNotEqual,
vk::CompareOp::eGreaterOrEqual,
vk::CompareOp::eAlways
};
static const vk::PrimitiveTopology PRIMITIVE_TO_VK[gpu::NUM_PRIMITIVES] = {
vk::PrimitiveTopology::ePointList,
vk::PrimitiveTopology::eLineList,
vk::PrimitiveTopology::eLineStrip,
vk::PrimitiveTopology::eTriangleList,
vk::PrimitiveTopology::eTriangleStrip,
vk::PrimitiveTopology::eTriangleFan,
};
//static const VKenum ELEMENT_TYPE_TO_VK[gpu::NUM_TYPES] = {
// VK_FLOAT,
// VK_INT,
// VK_UNSIGNED_INT,
// VK_HALF_FLOAT,
// VK_SHORT,
// VK_UNSIGNED_SHORT,
// VK_BYTE,
// VK_UNSIGNED_BYTE,
// // Normalized values
// VK_INT,
// VK_UNSIGNED_INT,
// VK_SHORT,
// VK_UNSIGNED_SHORT,
// VK_BYTE,
// VK_UNSIGNED_BYTE
//};
bool checkGLError(const char* name = nullptr);
bool checkGLErrorDebug(const char* name = nullptr);
class VKBackend;
template <typename GPUType>
struct VKObject : public GPUObject {
public:
VKObject(VKBackend& backend, const GPUType& gpuObject) : _gpuObject(gpuObject), _backend(backend.shared_from_this()) {}
virtual ~VKObject() { }
const GPUType& _gpuObject;
protected:
const std::weak_ptr<VKBackend> _backend;
};
} } // namespace gpu::gl
#define CHECK_VK_ERROR() gpu::vk::checkGLErrorDebug(__FUNCTION__)
#endif

View file

@ -0,0 +1,302 @@
//
// Created by Bradley Austin Davis on 2016/08/07
// Copyright 2013-2016 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#if 0
#include "VKTexture.h"
#include <NumericalConstants.h>
#include "VKTextureTransfer.h"
#include "VKBackend.h"
using namespace gpu;
using namespace gpu::gl;
std::shared_ptr<VKTextureTransferHelper> VKTexture::_textureTransferHelper;
static std::map<uint16, size_t> _textureCountByMips;
static uint16 _currentMaxMipCount { 0 };
// FIXME placeholder for texture memory over-use
#define DEFAULT_MAX_MEMORY_MB 256
const VKenum VKTexture::CUBE_FACE_LAYOUT[6] = {
VK_TEXTURE_CUBE_MAP_POSITIVE_X, VK_TEXTURE_CUBE_MAP_NEGATIVE_X,
VK_TEXTURE_CUBE_MAP_POSITIVE_Y, VK_TEXTURE_CUBE_MAP_NEGATIVE_Y,
VK_TEXTURE_CUBE_MAP_POSITIVE_Z, VK_TEXTURE_CUBE_MAP_NEGATIVE_Z
};
const VKenum VKTexture::WRAP_MODES[Sampler::NUM_WRAP_MODES] = {
VK_REPEAT, // WRAP_REPEAT,
VK_MIRRORED_REPEAT, // WRAP_MIRROR,
VK_CLAMP_TO_EDGE, // WRAP_CLAMP,
VK_CLAMP_TO_BORDER, // WRAP_BORDER,
VK_MIRROR_CLAMP_TO_EDGE_EXT // WRAP_MIRROR_ONCE,
};
const VKFilterMode VKTexture::FILTER_MODES[Sampler::NUM_FILTERS] = {
{ VK_NEAREST, VK_NEAREST }, //FILTER_MIN_MAG_POINT,
{ VK_NEAREST, VK_LINEAR }, //FILTER_MIN_POINT_MAG_LINEAR,
{ VK_LINEAR, VK_NEAREST }, //FILTER_MIN_LINEAR_MAG_POINT,
{ VK_LINEAR, VK_LINEAR }, //FILTER_MIN_MAG_LINEAR,
{ VK_NEAREST_MIPMAP_NEAREST, VK_NEAREST }, //FILTER_MIN_MAG_MIP_POINT,
{ VK_NEAREST_MIPMAP_LINEAR, VK_NEAREST }, //FILTER_MIN_MAG_POINT_MIP_LINEAR,
{ VK_NEAREST_MIPMAP_NEAREST, VK_LINEAR }, //FILTER_MIN_POINT_MAG_LINEAR_MIP_POINT,
{ VK_NEAREST_MIPMAP_LINEAR, VK_LINEAR }, //FILTER_MIN_POINT_MAG_MIP_LINEAR,
{ VK_LINEAR_MIPMAP_NEAREST, VK_NEAREST }, //FILTER_MIN_LINEAR_MAG_MIP_POINT,
{ VK_LINEAR_MIPMAP_LINEAR, VK_NEAREST }, //FILTER_MIN_LINEAR_MAG_POINT_MIP_LINEAR,
{ VK_LINEAR_MIPMAP_NEAREST, VK_LINEAR }, //FILTER_MIN_MAG_LINEAR_MIP_POINT,
{ VK_LINEAR_MIPMAP_LINEAR, VK_LINEAR }, //FILTER_MIN_MAG_MIP_LINEAR,
{ VK_LINEAR_MIPMAP_LINEAR, VK_LINEAR } //FILTER_ANISOTROPIC,
};
VKenum VKTexture::getGLTextureType(const Texture& texture) {
switch (texture.getType()) {
case Texture::TEX_2D:
return VK_TEXTURE_2D;
break;
case Texture::TEX_CUBE:
return VK_TEXTURE_CUBE_MAP;
break;
default:
qFatal("Unsupported texture type");
}
Q_UNREACHABLE();
return VK_TEXTURE_2D;
}
const std::vector<VKenum>& VKTexture::getFaceTargets(VKenum target) {
static std::vector<VKenum> cubeFaceTargets {
VK_TEXTURE_CUBE_MAP_POSITIVE_X, VK_TEXTURE_CUBE_MAP_NEGATIVE_X,
VK_TEXTURE_CUBE_MAP_POSITIVE_Y, VK_TEXTURE_CUBE_MAP_NEGATIVE_Y,
VK_TEXTURE_CUBE_MAP_POSITIVE_Z, VK_TEXTURE_CUBE_MAP_NEGATIVE_Z
};
static std::vector<VKenum> faceTargets {
VK_TEXTURE_2D
};
switch (target) {
case VK_TEXTURE_2D:
return faceTargets;
case VK_TEXTURE_CUBE_MAP:
return cubeFaceTargets;
default:
Q_UNREACHABLE();
break;
}
Q_UNREACHABLE();
return faceTargets;
}
float VKTexture::getMemoryPressure() {
// Check for an explicit memory limit
auto availableTextureMemory = Texture::getAllowedGPUMemoryUsage();
// If no memory limit has been set, use a percentage of the total dedicated memory
if (!availableTextureMemory) {
auto totalGpuMemory = gpu::vk::getDedicatedMemory();
// If no limit has been explicitly set, and the dedicated memory can't be determined,
// just use a fallback fixed value of 256 MB
if (!totalGpuMemory) {
totalGpuMemory = MB_TO_BYTES(DEFAULT_MAX_MEMORY_MB);
}
// Allow 75% of all available GPU memory to be consumed by textures
// FIXME overly conservative?
availableTextureMemory = (totalGpuMemory >> 2) * 3;
}
// Return the consumed texture memory divided by the available texture memory.
auto consumedGpuMemory = Context::getTextureGPUMemoryUsage();
return (float)consumedGpuMemory / (float)availableTextureMemory;
}
VKTexture::DownsampleSource::DownsampleSource(const std::weak_ptr<vk::VKBackend>& backend, VKTexture* oldTexture) :
_backend(backend),
_size(oldTexture ? oldTexture->_size : 0),
_texture(oldTexture ? oldTexture->takeOwnership() : 0),
_minMip(oldTexture ? oldTexture->_minMip : 0),
_maxMip(oldTexture ? oldTexture->_maxMip : 0)
{
}
VKTexture::DownsampleSource::~DownsampleSource() {
if (_texture) {
auto backend = _backend.lock();
if (backend) {
backend->releaseTexture(_texture, _size);
}
}
}
VKTexture::VKTexture(const std::weak_ptr<VKBackend>& backend, const gpu::Texture& texture, VKuint id, VKTexture* originalTexture, bool transferrable) :
VKObject(backend, texture, id),
_storageStamp(texture.getStamp()),
_target(getGLTextureType(texture)),
_maxMip(texture.maxMip()),
_minMip(texture.minMip()),
_virtualSize(texture.evalTotalSize()),
_transferrable(transferrable),
_downsampleSource(backend, originalTexture)
{
if (_transferrable) {
uint16 mipCount = usedMipLevels();
_currentMaxMipCount = std::max(_currentMaxMipCount, mipCount);
if (!_textureCountByMips.count(mipCount)) {
_textureCountByMips[mipCount] = 1;
} else {
++_textureCountByMips[mipCount];
}
}
Backend::incrementTextureGPUCount();
Backend::updateTextureGPUVirtualMemoryUsage(0, _virtualSize);
}
// Create the texture and allocate storage
VKTexture::VKTexture(const std::weak_ptr<vk::VKBackend>& backend, const Texture& texture, VKuint id, bool transferrable) :
VKTexture(backend, texture, id, nullptr, transferrable)
{
// FIXME, do during allocation
//Backend::updateTextureGPUMemoryUsage(0, _size);
Backend::setGPUObject(texture, this);
}
// Create the texture and copy from the original higher resolution version
VKTexture::VKTexture(const std::weak_ptr<vk::VKBackend>& backend, const gpu::Texture& texture, VKuint id, VKTexture* originalTexture) :
VKTexture(backend, texture, id, originalTexture, originalTexture->_transferrable)
{
Q_ASSERT(_minMip >= originalTexture->_minMip);
// Set the GPU object last because that implicitly destroys the originalTexture object
Backend::setGPUObject(texture, this);
}
VKTexture::~VKTexture() {
if (_transferrable) {
uint16 mipCount = usedMipLevels();
Q_ASSERT(_textureCountByMips.count(mipCount));
auto& numTexturesForMipCount = _textureCountByMips[mipCount];
--numTexturesForMipCount;
if (0 == numTexturesForMipCount) {
_textureCountByMips.erase(mipCount);
if (mipCount == _currentMaxMipCount) {
_currentMaxMipCount = (_textureCountByMips.empty() ? 0 : _textureCountByMips.rbegin()->first);
}
}
}
if (_id) {
auto backend = _backend.lock();
if (backend) {
backend->releaseTexture(_id, _size);
}
}
Backend::updateTextureGPUVirtualMemoryUsage(_virtualSize, 0);
}
void VKTexture::createTexture() {
withPreservedTexture([&] {
allocateStorage();
(void)CHECK_VK_ERROR();
syncSampler();
(void)CHECK_VK_ERROR();
});
}
void VKTexture::setSize(VKuint size) const {
Backend::updateTextureGPUMemoryUsage(_size, size);
const_cast<VKuint&>(_size) = size;
}
bool VKTexture::isInvalid() const {
return _storageStamp < _gpuObject.getStamp();
}
bool VKTexture::isOutdated() const {
return VKSyncState::Idle == _syncState && _contentStamp < _gpuObject.getDataStamp();
}
bool VKTexture::isOverMaxMemory() const {
// FIXME switch to using the max mip count used from the previous frame
if (usedMipLevels() < _currentMaxMipCount) {
return false;
}
Q_ASSERT(usedMipLevels() == _currentMaxMipCount);
if (getMemoryPressure() < 1.0f) {
return false;
}
return true;
}
bool VKTexture::isReady() const {
// If we have an invalid texture, we're never ready
if (isInvalid()) {
return false;
}
// If we're out of date, but the transfer is in progress, report ready
// as a special case
auto syncState = _syncState.load();
if (isOutdated()) {
return Idle != syncState;
}
if (Idle != syncState) {
return false;
}
return true;
}
// Do any post-transfer operations that might be required on the main context / rendering thread
void VKTexture::postTransfer() {
setSyncState(VKSyncState::Idle);
++_transferCount;
//// The public gltexture becaomes available
//_id = _privateTexture;
_downsampleSource.reset();
// At this point the mip pixels have been loaded, we can notify the gpu texture to abandon it's memory
switch (_gpuObject.getType()) {
case Texture::TEX_2D:
for (uint16_t i = 0; i < Sampler::MAX_MIP_LEVEL; ++i) {
if (_gpuObject.isStoredMipFaceAvailable(i)) {
_gpuObject.notifyMipFaceGPULoaded(i);
}
}
break;
case Texture::TEX_CUBE:
// transfer pixels from each faces
for (uint8_t f = 0; f < CUBE_NUM_FACES; f++) {
for (uint16_t i = 0; i < Sampler::MAX_MIP_LEVEL; ++i) {
if (_gpuObject.isStoredMipFaceAvailable(i, f)) {
_gpuObject.notifyMipFaceGPULoaded(i, f);
}
}
}
break;
default:
qCWarning(gpu_vk_logging) << __FUNCTION__ << " case for Texture Type " << _gpuObject.getType() << " not supported";
break;
}
}
void VKTexture::initTextureTransferHelper() {
_textureTransferHelper = std::make_shared<VKTextureTransferHelper>();
}
#endif

View file

@ -0,0 +1,203 @@
//
// Created by Bradley Austin Davis on 2016/08/07
// Copyright 2013-2016 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#ifndef hifi_gpu_vk_VKTexture_h
#define hifi_gpu_vk_VKTexture_h
#include "VKShared.h"
#include "VKTextureTransfer.h"
#include "VKBackend.h"
namespace gpu { namespace vk {
struct VKFilterMode {
vk::Filter minFilter;
vk::Filter magFilter;
};
class VKTexture : public VKObject<Texture, vk::Image> {
public:
static void initTextureTransferHelper();
static std::shared_ptr<VKTextureTransferHelper> _textureTransferHelper;
template <typename VKTextureType>
static VKTextureType* sync(VKBackend& backend, const TexturePointer& texturePointer, bool needTransfer) {
const Texture& texture = *texturePointer;
if (!texture.isDefined()) {
// NO texture definition yet so let's avoid thinking
return nullptr;
}
// If the object hasn't been created, or the object definition is out of date, drop and re-create
VKTextureType* object = Backend::getGPUObject<VKTextureType>(texture);
// Create the texture if need be (force re-creation if the storage stamp changes
// for easier use of immutable storage)
if (!object || object->isInvalid()) {
// This automatically any previous texture
object = new VKTextureType(backend.shared_from_this(), texture, needTransfer);
if (!object->_transferrable) {
object->createTexture();
object->_contentStamp = texture.getDataStamp();
object->postTransfer();
}
}
// Object maybe doens't neet to be tranasferred after creation
if (!object->_transferrable) {
return object;
}
// If we just did a transfer, return the object after doing post-transfer work
if (VKSyncState::Transferred == object->getSyncState()) {
object->postTransfer();
return object;
}
if (object->isReady()) {
// Do we need to reduce texture memory usage?
if (object->isOverMaxMemory() && texturePointer->incremementMinMip()) {
// WARNING, this code path will essentially `delete this`,
// so no dereferencing of this instance should be done past this point
object = new VKTextureType(backend.shared_from_this(), texture, object);
_textureTransferHelper->transferTexture(texturePointer);
}
} else if (object->isOutdated()) {
// Object might be outdated, if so, start the transfer
// (outdated objects that are already in transfer will have reported 'true' for ready()
_textureTransferHelper->transferTexture(texturePointer);
}
return object;
}
template <typename VKTextureType>
static vk::Image getId(VKBackend& backend, const TexturePointer& texture, bool shouldSync) {
if (!texture) {
return 0;
}
VKTextureType* object { nullptr };
if (shouldSync) {
object = sync<VKTextureType>(backend, texture, shouldSync);
} else {
object = Backend::getGPUObject<VKTextureType>(*texture);
}
if (!object) {
return 0;
}
VKuint result = object->_id;
// Don't return textures that are in transfer state
if (shouldSync) {
if ((object->getSyncState() != VKSyncState::Idle) ||
// Don't return transferrable textures that have never completed transfer
(!object->_transferrable || 0 != object->_transferCount)) {
// Will be either 0 or the original texture being downsampled.
result = object->_downsampleSource._texture;
}
}
return result;
}
// Used by derived classes and helpers to ensure the actual VK object exceeds the lifetime of `this`
vk::Image takeOwnership() {
vk::Image result = _id;
const_cast<vk::Image&>(_id) = 0;
return result;
}
~VKTexture();
const Stamp _storageStamp;
const vk::ImageType _target;
const uint16 _maxMip;
const uint16 _minMip;
const Size _virtualSize; // theoretical size as expected
Stamp _contentStamp { 0 };
const bool _transferrable;
Size _transferCount { 0 };
struct DownsampleSource {
using Pointer = std::shared_ptr<DownsampleSource>;
DownsampleSource(const std::weak_ptr<vk::VKBackend>& backend) : _backend(backend), _size(0), _texture(0), _minMip(0), _maxMip(0) {}
DownsampleSource(const std::weak_ptr<vk::VKBackend>& backend, VKTexture* originalTexture);
~DownsampleSource();
void reset() const { const_cast<VKuint&>(_texture) = 0; }
const std::weak_ptr<vk::VKBackend>& _backend;
const Size _size { 0 };
const vk::Image _texture { 0 };
const uint16 _minMip { 0 };
const uint16 _maxMip { 0 };
} _downsampleSource;
VKuint size() const { return _size; }
VKSyncState getSyncState() const { return _syncState; }
// Is the storage out of date relative to the gpu texture?
bool isInvalid() const;
// Is the content out of date relative to the gpu texture?
bool isOutdated() const;
// Is the texture in a state where it can be rendered with no work?
bool isReady() const;
// Execute any post-move operations that must occur only on the main thread
void postTransfer();
bool isOverMaxMemory() const;
protected:
static const size_t CUBE_NUM_FACES = 6;
static const VKenum CUBE_FACE_LAYOUT[6];
static const VKFilterMode FILTER_MODES[Sampler::NUM_FILTERS];
static const VKenum WRAP_MODES[Sampler::NUM_WRAP_MODES];
static const std::vector<VKenum>& getFaceTargets(VKenum textureType);
static vk::ImageType getGLTextureType(const Texture& texture);
// Return a floating point value indicating how much of the allowed
// texture memory we are currently consuming. A value of 0 indicates
// no texture memory usage, while a value of 1 indicates all available / allowed memory
// is consumed. A value above 1 indicates that there is a problem.
static float getMemoryPressure();
const VKuint _size { 0 }; // true size as reported by the gl api
std::atomic<VKSyncState> _syncState { VKSyncState::Idle };
VKTexture(const std::weak_ptr<vk::VKBackend>& backend, const Texture& texture, VKuint id, bool transferrable);
VKTexture(const std::weak_ptr<vk::VKBackend>& backend, const Texture& texture, VKuint id, VKTexture* originalTexture);
void setSyncState(VKSyncState syncState) { _syncState = syncState; }
uint16 usedMipLevels() const { return (_maxMip - _minMip) + 1; }
void createTexture();
virtual void allocateStorage() const = 0;
virtual void updateSize() const = 0;
virtual void transfer() const = 0;
virtual void syncSampler() const = 0;
virtual void generateMips() const = 0;
virtual void withPreservedTexture(std::function<void()> f) const = 0;
protected:
void setSize(VKuint size) const;
private:
VKTexture(const std::weak_ptr<VKBackend>& backend, const gpu::Texture& gpuTexture, VKuint id, VKTexture* originalTexture, bool transferrable);
friend class VKTextureTransferHelper;
friend class VKBackend;
};
} }
#endif

View file

@ -97,6 +97,10 @@ struct Reflection {
// Needed for procedural code, will map to push constants for Vulkan
LocationMap uniforms;
size_t descriptorCount() const {
return textures.size() + uniformBuffers.size() + resourceBuffers.size();
}
static std::vector<std::string> getNames(const LocationMap& locations);
private:
@ -173,4 +177,8 @@ inline uint32_t getFragmentId(uint32_t programId) {
return programId & UINT16_MAX;
}
inline uint32_t makeProgramId(uint32_t vertexId, uint32_t fragmentId) {
return (vertexId << 16) & fragmentId;
}
} // namespace shader

View file

@ -37,6 +37,20 @@ namespace geometry {
TexCoordVector texCoords;
FaceVector<N> faces;
template <typename T>
std::vector<T> getIndices() const {
size_t count = faces.size() * N;
std::vector<T> indices;
indices.reserve(count);
for (const auto& face : faces) {
for (const auto& index : face) {
indices.push_back((T)index);
}
}
return indices;
}
Solid<N>& fitDimension(float newMaxDimension) {
float maxDimension = 0;
for (const auto& vertex : vertices) {

View file

@ -72,7 +72,6 @@ void OculusDisplayPlugin::cycleDebugOutput() {
void OculusDisplayPlugin::customizeContext() {
Parent::customizeContext();
_outputFramebuffer.reset(gpu::Framebuffer::create("OculusOutput", gpu::Element::COLOR_SRGBA_32, _renderTargetSize.x, _renderTargetSize.y));
ovrTextureSwapChainDesc desc = { };
desc.Type = ovrTexture_2D;
desc.ArraySize = 1;
@ -126,7 +125,6 @@ void OculusDisplayPlugin::uncustomizeContext() {
ovr_DestroyTextureSwapChain(_session, _textureSwapChain);
_textureSwapChain = nullptr;
_outputFramebuffer.reset();
_customized = false;
Parent::uncustomizeContext();
}

View file

@ -37,7 +37,6 @@ protected:
private:
static const char* NAME;
ovrTextureSwapChain _textureSwapChain;
gpu::FramebufferPointer _outputFramebuffer;
bool _customized { false };
std::atomic_bool _aswActive;

View file

@ -9,23 +9,24 @@ if (APPLE)
endif()
setup_hifi_project(Gui Widgets)
# link in the shared libraries
link_hifi_libraries(shared ktx shaders gpu)
if (APPLE)
unset(CMAKE_MACOSX_BUNDLE)
endif()
# link in the shared libraries
link_hifi_libraries(
shared ktx shaders gpu
# vk gpu-vk
gl ${PLATFORM_GL_BACKEND}
)
option(BUILD_FRAME_PLAYER_GL "Use OpenGL backend for frame player" ON)
target_compile_definitions(${TARGET_NAME} PRIVATE USE_GL)
set(OpenGL_GL_PREFERENCE "GLVND")
target_opengl()
#target_vulkan()
if (BUILD_FRAME_PLAYER_GL)
target_compile_definitions(${TARGET_NAME} PRIVATE USE_GL)
set(OpenGL_GL_PREFERENCE "GLVND")
link_hifi_libraries(gl ${PLATFORM_GL_BACKEND})
target_opengl()
else()
link_hifi_libraries(vk gpu-vk)
target_vulkan()
endif()
package_libraries_for_deployment()

View file

@ -8,7 +8,10 @@
#include "RenderThread.h"
#include <QtGui/QWindow>
#ifdef USE_GL
#include <gl/QOpenGLContextWrapper.h>
#endif
void RenderThread::submitFrame(const gpu::FramePointer& frame) {
std::unique_lock<std::mutex> lock(_frameLock);

View file

@ -227,8 +227,6 @@ def processCommand(line):
# Generate the optimized GLSL output
spirvCrossDialect = dialect
# 310es causes spirv-cross to inject "#extension GL_OES_texture_buffer : require" into the output
if (dialect == '310es'): spirvCrossDialect = '320es'
spirvCrossArgs = [spirvCrossExec, '--output', glslFile, spirvFile, '--version', spirvCrossDialect]
if (dialect == '410'): spirvCrossArgs.append('--no-420pack-extension')
executeSubprocess(spirvCrossArgs)