mirror of
https://github.com/JulianGro/overte.git
synced 2025-04-07 10:42:35 +02:00
Breaking up GL agnostic code from 4.1 specific code
This commit is contained in:
parent
67891abf17
commit
3cc08cdcfc
44 changed files with 3685 additions and 3260 deletions
|
@ -7,18 +7,21 @@
|
|||
#
|
||||
macro(TARGET_NSIGHT)
|
||||
if (WIN32 AND USE_NSIGHT)
|
||||
|
||||
|
||||
# grab the global CHECKED_FOR_NSIGHT_ONCE property
|
||||
get_property(NSIGHT_CHECKED GLOBAL PROPERTY CHECKED_FOR_NSIGHT_ONCE)
|
||||
get_property(NSIGHT_UNAVAILABLE GLOBAL PROPERTY CHECKED_FOR_NSIGHT_ONCE)
|
||||
|
||||
if (NOT NSIGHT_CHECKED)
|
||||
if (NOT NSIGHT_UNAVAILABLE)
|
||||
# try to find the Nsight package and add it to the build if we find it
|
||||
find_package(NSIGHT)
|
||||
|
||||
# set the global CHECKED_FOR_NSIGHT_ONCE property so that we only debug that we couldn't find it once
|
||||
set_property(GLOBAL PROPERTY CHECKED_FOR_NSIGHT_ONCE TRUE)
|
||||
# Cache the failure to find nsight, so that we don't check over and over
|
||||
if (NOT NSIGHT_FOUND)
|
||||
set_property(GLOBAL PROPERTY CHECKED_FOR_NSIGHT_ONCE TRUE)
|
||||
endif()
|
||||
endif ()
|
||||
|
||||
|
||||
# try to find the Nsight package and add it to the build if we find it
|
||||
if (NSIGHT_FOUND)
|
||||
include_directories(${NSIGHT_INCLUDE_DIRS})
|
||||
add_definitions(-DNSIGHT_FOUND)
|
||||
|
|
|
@ -16,17 +16,51 @@
|
|||
#include <functional>
|
||||
#include <glm/gtc/type_ptr.hpp>
|
||||
|
||||
#include "../gl41/GLBackend.h"
|
||||
|
||||
#if defined(NSIGHT_FOUND)
|
||||
#include "nvToolsExt.h"
|
||||
#endif
|
||||
|
||||
#include <GPUIdent.h>
|
||||
#include <NumericalConstants.h>
|
||||
#include "GLBackendShared.h"
|
||||
#include <gl/QOpenGLContextWrapper.h>
|
||||
#include <QtCore/QProcessEnvironment>
|
||||
|
||||
#include "GLTexture.h"
|
||||
#include "GLShader.h"
|
||||
using namespace gpu;
|
||||
using namespace gpu::gl;
|
||||
|
||||
|
||||
static const QString DEBUG_FLAG("HIFI_ENABLE_OPENGL_45");
|
||||
static bool enableOpenGL45 = QProcessEnvironment::systemEnvironment().contains(DEBUG_FLAG);
|
||||
|
||||
Backend* GLBackend::createBackend() {
|
||||
auto version = QOpenGLContextWrapper::currentContextVersion();
|
||||
|
||||
// FIXME provide a mechanism to override the backend for testing
|
||||
// Where the gpuContext is initialized and where the TRUE Backend is created and assigned
|
||||
#if 0
|
||||
GLBackend* result;
|
||||
if (enableOpenGL45 && version >= 0x0405) {
|
||||
result = new gpu::gl45::GLBackend;
|
||||
} else {
|
||||
result = new gpu::gl41::GLBackend;
|
||||
}
|
||||
#else
|
||||
GLBackend* result = new gpu::gl41::GLBackend;
|
||||
#endif
|
||||
result->initInput();
|
||||
result->initTransform();
|
||||
gl::GLTexture::initTextureTransferHelper();
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
bool GLBackend::makeProgram(Shader& shader, const Shader::BindingSet& slotBindings) {
|
||||
return GLShader::makeProgram(shader, slotBindings);
|
||||
}
|
||||
|
||||
GLBackend::CommandCall GLBackend::_commandCalls[Batch::NUM_COMMANDS] =
|
||||
{
|
||||
(&::gpu::gl::GLBackend::do_draw),
|
||||
|
@ -95,16 +129,13 @@ void GLBackend::init() {
|
|||
std::call_once(once, [] {
|
||||
|
||||
TEXTURE_ID_RESOLVER = [](const Texture& texture)->uint32 {
|
||||
auto object = Backend::getGPUObject<GLBackend::GLTexture>(texture);
|
||||
auto object = Backend::getGPUObject<GLTexture>(texture);
|
||||
if (!object) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (object->getSyncState() != GLTexture::Idle) {
|
||||
if (object->_downsampleSource) {
|
||||
return object->_downsampleSource->_texture;
|
||||
}
|
||||
return 0;
|
||||
if (object->getSyncState() != GLSyncState::Idle) {
|
||||
return object->_downsampleSource._texture;
|
||||
}
|
||||
return object->_texture;
|
||||
};
|
||||
|
@ -148,61 +179,11 @@ void GLBackend::init() {
|
|||
});
|
||||
}
|
||||
|
||||
Context::Size GLBackend::getDedicatedMemory() {
|
||||
static Context::Size dedicatedMemory { 0 };
|
||||
static std::once_flag once;
|
||||
std::call_once(once, [&] {
|
||||
#ifdef Q_OS_WIN
|
||||
if (!dedicatedMemory && wglGetGPUIDsAMD && wglGetGPUInfoAMD) {
|
||||
UINT maxCount = wglGetGPUIDsAMD(0, 0);
|
||||
std::vector<UINT> ids;
|
||||
ids.resize(maxCount);
|
||||
wglGetGPUIDsAMD(maxCount, &ids[0]);
|
||||
GLuint memTotal;
|
||||
wglGetGPUInfoAMD(ids[0], WGL_GPU_RAM_AMD, GL_UNSIGNED_INT, sizeof(GLuint), &memTotal);
|
||||
dedicatedMemory = MB_TO_BYTES(memTotal);
|
||||
}
|
||||
#endif
|
||||
|
||||
if (!dedicatedMemory) {
|
||||
GLint atiGpuMemory[4];
|
||||
// not really total memory, but close enough if called early enough in the application lifecycle
|
||||
glGetIntegerv(GL_TEXTURE_FREE_MEMORY_ATI, atiGpuMemory);
|
||||
if (GL_NO_ERROR == glGetError()) {
|
||||
dedicatedMemory = KB_TO_BYTES(atiGpuMemory[0]);
|
||||
}
|
||||
}
|
||||
|
||||
if (!dedicatedMemory) {
|
||||
GLint nvGpuMemory { 0 };
|
||||
glGetIntegerv(GL_GPU_MEMORY_INFO_DEDICATED_VIDMEM_NVX, &nvGpuMemory);
|
||||
if (GL_NO_ERROR == glGetError()) {
|
||||
dedicatedMemory = KB_TO_BYTES(nvGpuMemory);
|
||||
}
|
||||
}
|
||||
|
||||
if (!dedicatedMemory) {
|
||||
auto gpuIdent = GPUIdent::getInstance();
|
||||
if (gpuIdent && gpuIdent->isValid()) {
|
||||
dedicatedMemory = MB_TO_BYTES(gpuIdent->getMemory());
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
return dedicatedMemory;
|
||||
}
|
||||
|
||||
Backend* GLBackend::createBackend() {
|
||||
return new GLBackend();
|
||||
}
|
||||
|
||||
GLBackend::GLBackend() {
|
||||
glGetIntegerv(GL_UNIFORM_BUFFER_OFFSET_ALIGNMENT, &_uboAlignment);
|
||||
initInput();
|
||||
initTransform();
|
||||
initTextureTransferHelper();
|
||||
}
|
||||
|
||||
|
||||
GLBackend::~GLBackend() {
|
||||
resetStages();
|
||||
|
||||
|
@ -260,7 +241,7 @@ void GLBackend::renderPassTransfer(Batch& batch) {
|
|||
|
||||
{ // Sync the transform buffers
|
||||
PROFILE_RANGE("syncGPUTransform");
|
||||
_transform.transfer(batch);
|
||||
transferTransformState(batch);
|
||||
}
|
||||
|
||||
_inRenderTransferPass = false;
|
||||
|
@ -355,164 +336,6 @@ void GLBackend::setupStereoSide(int side) {
|
|||
_transform.bindCurrentCamera(side);
|
||||
}
|
||||
|
||||
|
||||
void GLBackend::do_draw(Batch& batch, size_t paramOffset) {
|
||||
Primitive primitiveType = (Primitive)batch._params[paramOffset + 2]._uint;
|
||||
GLenum mode = _primitiveToGLmode[primitiveType];
|
||||
uint32 numVertices = batch._params[paramOffset + 1]._uint;
|
||||
uint32 startVertex = batch._params[paramOffset + 0]._uint;
|
||||
|
||||
if (isStereo()) {
|
||||
setupStereoSide(0);
|
||||
glDrawArrays(mode, startVertex, numVertices);
|
||||
setupStereoSide(1);
|
||||
glDrawArrays(mode, startVertex, numVertices);
|
||||
|
||||
_stats._DSNumTriangles += 2 * numVertices / 3;
|
||||
_stats._DSNumDrawcalls += 2;
|
||||
|
||||
} else {
|
||||
glDrawArrays(mode, startVertex, numVertices);
|
||||
_stats._DSNumTriangles += numVertices / 3;
|
||||
_stats._DSNumDrawcalls++;
|
||||
}
|
||||
_stats._DSNumAPIDrawcalls++;
|
||||
|
||||
(void) CHECK_GL_ERROR();
|
||||
}
|
||||
|
||||
void GLBackend::do_drawIndexed(Batch& batch, size_t paramOffset) {
|
||||
Primitive primitiveType = (Primitive)batch._params[paramOffset + 2]._uint;
|
||||
GLenum mode = _primitiveToGLmode[primitiveType];
|
||||
uint32 numIndices = batch._params[paramOffset + 1]._uint;
|
||||
uint32 startIndex = batch._params[paramOffset + 0]._uint;
|
||||
|
||||
GLenum glType = _elementTypeToGLType[_input._indexBufferType];
|
||||
|
||||
auto typeByteSize = TYPE_SIZE[_input._indexBufferType];
|
||||
GLvoid* indexBufferByteOffset = reinterpret_cast<GLvoid*>(startIndex * typeByteSize + _input._indexBufferOffset);
|
||||
|
||||
if (isStereo()) {
|
||||
setupStereoSide(0);
|
||||
glDrawElements(mode, numIndices, glType, indexBufferByteOffset);
|
||||
setupStereoSide(1);
|
||||
glDrawElements(mode, numIndices, glType, indexBufferByteOffset);
|
||||
|
||||
_stats._DSNumTriangles += 2 * numIndices / 3;
|
||||
_stats._DSNumDrawcalls += 2;
|
||||
} else {
|
||||
glDrawElements(mode, numIndices, glType, indexBufferByteOffset);
|
||||
_stats._DSNumTriangles += numIndices / 3;
|
||||
_stats._DSNumDrawcalls++;
|
||||
}
|
||||
_stats._DSNumAPIDrawcalls++;
|
||||
|
||||
(void) CHECK_GL_ERROR();
|
||||
}
|
||||
|
||||
void GLBackend::do_drawInstanced(Batch& batch, size_t paramOffset) {
|
||||
GLint numInstances = batch._params[paramOffset + 4]._uint;
|
||||
Primitive primitiveType = (Primitive)batch._params[paramOffset + 3]._uint;
|
||||
GLenum mode = _primitiveToGLmode[primitiveType];
|
||||
uint32 numVertices = batch._params[paramOffset + 2]._uint;
|
||||
uint32 startVertex = batch._params[paramOffset + 1]._uint;
|
||||
|
||||
|
||||
if (isStereo()) {
|
||||
GLint trueNumInstances = 2 * numInstances;
|
||||
|
||||
setupStereoSide(0);
|
||||
glDrawArraysInstancedARB(mode, startVertex, numVertices, numInstances);
|
||||
setupStereoSide(1);
|
||||
glDrawArraysInstancedARB(mode, startVertex, numVertices, numInstances);
|
||||
|
||||
_stats._DSNumTriangles += (trueNumInstances * numVertices) / 3;
|
||||
_stats._DSNumDrawcalls += trueNumInstances;
|
||||
} else {
|
||||
glDrawArraysInstancedARB(mode, startVertex, numVertices, numInstances);
|
||||
_stats._DSNumTriangles += (numInstances * numVertices) / 3;
|
||||
_stats._DSNumDrawcalls += numInstances;
|
||||
}
|
||||
_stats._DSNumAPIDrawcalls++;
|
||||
|
||||
(void) CHECK_GL_ERROR();
|
||||
}
|
||||
|
||||
void glbackend_glDrawElementsInstancedBaseVertexBaseInstance(GLenum mode, GLsizei count, GLenum type, const GLvoid *indices, GLsizei primcount, GLint basevertex, GLuint baseinstance) {
|
||||
#if (GPU_INPUT_PROFILE == GPU_CORE_43)
|
||||
glDrawElementsInstancedBaseVertexBaseInstance(mode, count, type, indices, primcount, basevertex, baseinstance);
|
||||
#else
|
||||
glDrawElementsInstanced(mode, count, type, indices, primcount);
|
||||
#endif
|
||||
}
|
||||
|
||||
void GLBackend::do_drawIndexedInstanced(Batch& batch, size_t paramOffset) {
|
||||
GLint numInstances = batch._params[paramOffset + 4]._uint;
|
||||
GLenum mode = _primitiveToGLmode[(Primitive)batch._params[paramOffset + 3]._uint];
|
||||
uint32 numIndices = batch._params[paramOffset + 2]._uint;
|
||||
uint32 startIndex = batch._params[paramOffset + 1]._uint;
|
||||
// FIXME glDrawElementsInstancedBaseVertexBaseInstance is only available in GL 4.3
|
||||
// and higher, so currently we ignore this field
|
||||
uint32 startInstance = batch._params[paramOffset + 0]._uint;
|
||||
GLenum glType = _elementTypeToGLType[_input._indexBufferType];
|
||||
|
||||
auto typeByteSize = TYPE_SIZE[_input._indexBufferType];
|
||||
GLvoid* indexBufferByteOffset = reinterpret_cast<GLvoid*>(startIndex * typeByteSize + _input._indexBufferOffset);
|
||||
|
||||
if (isStereo()) {
|
||||
GLint trueNumInstances = 2 * numInstances;
|
||||
|
||||
setupStereoSide(0);
|
||||
glbackend_glDrawElementsInstancedBaseVertexBaseInstance(mode, numIndices, glType, indexBufferByteOffset, numInstances, 0, startInstance);
|
||||
setupStereoSide(1);
|
||||
glbackend_glDrawElementsInstancedBaseVertexBaseInstance(mode, numIndices, glType, indexBufferByteOffset, numInstances, 0, startInstance);
|
||||
|
||||
_stats._DSNumTriangles += (trueNumInstances * numIndices) / 3;
|
||||
_stats._DSNumDrawcalls += trueNumInstances;
|
||||
} else {
|
||||
glbackend_glDrawElementsInstancedBaseVertexBaseInstance(mode, numIndices, glType, indexBufferByteOffset, numInstances, 0, startInstance);
|
||||
_stats._DSNumTriangles += (numInstances * numIndices) / 3;
|
||||
_stats._DSNumDrawcalls += numInstances;
|
||||
}
|
||||
|
||||
_stats._DSNumAPIDrawcalls++;
|
||||
|
||||
(void)CHECK_GL_ERROR();
|
||||
}
|
||||
|
||||
|
||||
void GLBackend::do_multiDrawIndirect(Batch& batch, size_t paramOffset) {
|
||||
#if (GPU_INPUT_PROFILE == GPU_CORE_43)
|
||||
uint commandCount = batch._params[paramOffset + 0]._uint;
|
||||
GLenum mode = _primitiveToGLmode[(Primitive)batch._params[paramOffset + 1]._uint];
|
||||
|
||||
glMultiDrawArraysIndirect(mode, reinterpret_cast<GLvoid*>(_input._indirectBufferOffset), commandCount, (GLsizei)_input._indirectBufferStride);
|
||||
_stats._DSNumDrawcalls += commandCount;
|
||||
_stats._DSNumAPIDrawcalls++;
|
||||
|
||||
#else
|
||||
// FIXME implement the slow path
|
||||
#endif
|
||||
(void)CHECK_GL_ERROR();
|
||||
|
||||
}
|
||||
|
||||
void GLBackend::do_multiDrawIndexedIndirect(Batch& batch, size_t paramOffset) {
|
||||
#if (GPU_INPUT_PROFILE == GPU_CORE_43)
|
||||
uint commandCount = batch._params[paramOffset + 0]._uint;
|
||||
GLenum mode = _primitiveToGLmode[(Primitive)batch._params[paramOffset + 1]._uint];
|
||||
GLenum indexType = _elementTypeToGLType[_input._indexBufferType];
|
||||
|
||||
glMultiDrawElementsIndirect(mode, indexType, reinterpret_cast<GLvoid*>(_input._indirectBufferOffset), commandCount, (GLsizei)_input._indirectBufferStride);
|
||||
_stats._DSNumDrawcalls += commandCount;
|
||||
_stats._DSNumAPIDrawcalls++;
|
||||
#else
|
||||
// FIXME implement the slow path
|
||||
#endif
|
||||
(void)CHECK_GL_ERROR();
|
||||
}
|
||||
|
||||
|
||||
void GLBackend::do_resetStages(Batch& batch, size_t paramOffset) {
|
||||
resetStages();
|
||||
}
|
||||
|
@ -543,41 +366,34 @@ void GLBackend::resetStages() {
|
|||
(void) CHECK_GL_ERROR();
|
||||
}
|
||||
|
||||
|
||||
void GLBackend::do_pushProfileRange(Batch& batch, size_t paramOffset) {
|
||||
#if defined(NSIGHT_FOUND)
|
||||
auto name = batch._profileRanges.get(batch._params[paramOffset]._uint);
|
||||
nvtxRangePush(name.c_str());
|
||||
#endif
|
||||
}
|
||||
|
||||
void GLBackend::do_popProfileRange(Batch& batch, size_t paramOffset) {
|
||||
#if defined(NSIGHT_FOUND)
|
||||
nvtxRangePop();
|
||||
#endif
|
||||
}
|
||||
|
||||
// TODO: As long as we have gl calls explicitely issued from interface
|
||||
// code, we need to be able to record and batch these calls. THe long
|
||||
// term strategy is to get rid of any GL calls in favor of the HIFI GPU API
|
||||
|
||||
#define ADD_COMMAND_GL(call) _commands.push_back(COMMAND_##call); _commandOffsets.push_back(_params.size());
|
||||
|
||||
// As long as we don;t use several versions of shaders we can avoid this more complex code path
|
||||
// #define GET_UNIFORM_LOCATION(shaderUniformLoc) _pipeline._programShader->getUniformLocation(shaderUniformLoc, isStereo());
|
||||
#define GET_UNIFORM_LOCATION(shaderUniformLoc) shaderUniformLoc
|
||||
|
||||
void Batch::_glActiveBindTexture(GLenum unit, GLenum target, GLuint texture) {
|
||||
// clean the cache on the texture unit we are going to use so the next call to setResourceTexture() at the same slot works fine
|
||||
setResourceTexture(unit - GL_TEXTURE0, nullptr);
|
||||
|
||||
ADD_COMMAND_GL(glActiveBindTexture);
|
||||
_params.push_back(texture);
|
||||
_params.push_back(target);
|
||||
_params.push_back(unit);
|
||||
}
|
||||
void GLBackend::do_glActiveBindTexture(Batch& batch, size_t paramOffset) {
|
||||
glActiveTexture(batch._params[paramOffset + 2]._uint);
|
||||
glBindTexture(
|
||||
GET_UNIFORM_LOCATION(batch._params[paramOffset + 1]._uint),
|
||||
batch._params[paramOffset + 0]._uint);
|
||||
|
||||
(void) CHECK_GL_ERROR();
|
||||
}
|
||||
|
||||
void Batch::_glUniform1i(GLint location, GLint v0) {
|
||||
if (location < 0) {
|
||||
return;
|
||||
}
|
||||
ADD_COMMAND_GL(glUniform1i);
|
||||
_params.push_back(v0);
|
||||
_params.push_back(location);
|
||||
(void)CHECK_GL_ERROR();
|
||||
}
|
||||
|
||||
void GLBackend::do_glUniform1i(Batch& batch, size_t paramOffset) {
|
||||
|
@ -591,17 +407,9 @@ void GLBackend::do_glUniform1i(Batch& batch, size_t paramOffset) {
|
|||
glUniform1f(
|
||||
GET_UNIFORM_LOCATION(batch._params[paramOffset + 1]._int),
|
||||
batch._params[paramOffset + 0]._int);
|
||||
(void) CHECK_GL_ERROR();
|
||||
(void)CHECK_GL_ERROR();
|
||||
}
|
||||
|
||||
void Batch::_glUniform1f(GLint location, GLfloat v0) {
|
||||
if (location < 0) {
|
||||
return;
|
||||
}
|
||||
ADD_COMMAND_GL(glUniform1f);
|
||||
_params.push_back(v0);
|
||||
_params.push_back(location);
|
||||
}
|
||||
void GLBackend::do_glUniform1f(Batch& batch, size_t paramOffset) {
|
||||
if (_pipeline._program == 0) {
|
||||
// We should call updatePipeline() to bind the program but we are not doing that
|
||||
|
@ -613,15 +421,7 @@ void GLBackend::do_glUniform1f(Batch& batch, size_t paramOffset) {
|
|||
glUniform1f(
|
||||
GET_UNIFORM_LOCATION(batch._params[paramOffset + 1]._int),
|
||||
batch._params[paramOffset + 0]._float);
|
||||
(void) CHECK_GL_ERROR();
|
||||
}
|
||||
|
||||
void Batch::_glUniform2f(GLint location, GLfloat v0, GLfloat v1) {
|
||||
ADD_COMMAND_GL(glUniform2f);
|
||||
|
||||
_params.push_back(v1);
|
||||
_params.push_back(v0);
|
||||
_params.push_back(location);
|
||||
(void)CHECK_GL_ERROR();
|
||||
}
|
||||
|
||||
void GLBackend::do_glUniform2f(Batch& batch, size_t paramOffset) {
|
||||
|
@ -635,16 +435,7 @@ void GLBackend::do_glUniform2f(Batch& batch, size_t paramOffset) {
|
|||
GET_UNIFORM_LOCATION(batch._params[paramOffset + 2]._int),
|
||||
batch._params[paramOffset + 1]._float,
|
||||
batch._params[paramOffset + 0]._float);
|
||||
(void) CHECK_GL_ERROR();
|
||||
}
|
||||
|
||||
void Batch::_glUniform3f(GLint location, GLfloat v0, GLfloat v1, GLfloat v2) {
|
||||
ADD_COMMAND_GL(glUniform3f);
|
||||
|
||||
_params.push_back(v2);
|
||||
_params.push_back(v1);
|
||||
_params.push_back(v0);
|
||||
_params.push_back(location);
|
||||
(void)CHECK_GL_ERROR();
|
||||
}
|
||||
|
||||
void GLBackend::do_glUniform3f(Batch& batch, size_t paramOffset) {
|
||||
|
@ -659,21 +450,9 @@ void GLBackend::do_glUniform3f(Batch& batch, size_t paramOffset) {
|
|||
batch._params[paramOffset + 2]._float,
|
||||
batch._params[paramOffset + 1]._float,
|
||||
batch._params[paramOffset + 0]._float);
|
||||
(void) CHECK_GL_ERROR();
|
||||
(void)CHECK_GL_ERROR();
|
||||
}
|
||||
|
||||
|
||||
void Batch::_glUniform4f(GLint location, GLfloat v0, GLfloat v1, GLfloat v2, GLfloat v3) {
|
||||
ADD_COMMAND_GL(glUniform4f);
|
||||
|
||||
_params.push_back(v3);
|
||||
_params.push_back(v2);
|
||||
_params.push_back(v1);
|
||||
_params.push_back(v0);
|
||||
_params.push_back(location);
|
||||
}
|
||||
|
||||
|
||||
void GLBackend::do_glUniform4f(Batch& batch, size_t paramOffset) {
|
||||
if (_pipeline._program == 0) {
|
||||
// We should call updatePipeline() to bind the program but we are not doing that
|
||||
|
@ -690,14 +469,6 @@ void GLBackend::do_glUniform4f(Batch& batch, size_t paramOffset) {
|
|||
(void)CHECK_GL_ERROR();
|
||||
}
|
||||
|
||||
void Batch::_glUniform3fv(GLint location, GLsizei count, const GLfloat* value) {
|
||||
ADD_COMMAND_GL(glUniform3fv);
|
||||
|
||||
const int VEC3_SIZE = 3 * sizeof(float);
|
||||
_params.push_back(cacheData(count * VEC3_SIZE, value));
|
||||
_params.push_back(count);
|
||||
_params.push_back(location);
|
||||
}
|
||||
void GLBackend::do_glUniform3fv(Batch& batch, size_t paramOffset) {
|
||||
if (_pipeline._program == 0) {
|
||||
// We should call updatePipeline() to bind the program but we are not doing that
|
||||
|
@ -710,18 +481,9 @@ void GLBackend::do_glUniform3fv(Batch& batch, size_t paramOffset) {
|
|||
batch._params[paramOffset + 1]._uint,
|
||||
(const GLfloat*)batch.editData(batch._params[paramOffset + 0]._uint));
|
||||
|
||||
(void) CHECK_GL_ERROR();
|
||||
(void)CHECK_GL_ERROR();
|
||||
}
|
||||
|
||||
|
||||
void Batch::_glUniform4fv(GLint location, GLsizei count, const GLfloat* value) {
|
||||
ADD_COMMAND_GL(glUniform4fv);
|
||||
|
||||
const int VEC4_SIZE = 4 * sizeof(float);
|
||||
_params.push_back(cacheData(count * VEC4_SIZE, value));
|
||||
_params.push_back(count);
|
||||
_params.push_back(location);
|
||||
}
|
||||
void GLBackend::do_glUniform4fv(Batch& batch, size_t paramOffset) {
|
||||
if (_pipeline._program == 0) {
|
||||
// We should call updatePipeline() to bind the program but we are not doing that
|
||||
|
@ -729,23 +491,15 @@ void GLBackend::do_glUniform4fv(Batch& batch, size_t paramOffset) {
|
|||
return;
|
||||
}
|
||||
updatePipeline();
|
||||
|
||||
|
||||
GLint location = GET_UNIFORM_LOCATION(batch._params[paramOffset + 2]._int);
|
||||
GLsizei count = batch._params[paramOffset + 1]._uint;
|
||||
const GLfloat* value = (const GLfloat*)batch.editData(batch._params[paramOffset + 0]._uint);
|
||||
glUniform4fv(location, count, value);
|
||||
|
||||
(void) CHECK_GL_ERROR();
|
||||
(void)CHECK_GL_ERROR();
|
||||
}
|
||||
|
||||
void Batch::_glUniform4iv(GLint location, GLsizei count, const GLint* value) {
|
||||
ADD_COMMAND_GL(glUniform4iv);
|
||||
|
||||
const int VEC4_SIZE = 4 * sizeof(int);
|
||||
_params.push_back(cacheData(count * VEC4_SIZE, value));
|
||||
_params.push_back(count);
|
||||
_params.push_back(location);
|
||||
}
|
||||
void GLBackend::do_glUniform4iv(Batch& batch, size_t paramOffset) {
|
||||
if (_pipeline._program == 0) {
|
||||
// We should call updatePipeline() to bind the program but we are not doing that
|
||||
|
@ -758,18 +512,9 @@ void GLBackend::do_glUniform4iv(Batch& batch, size_t paramOffset) {
|
|||
batch._params[paramOffset + 1]._uint,
|
||||
(const GLint*)batch.editData(batch._params[paramOffset + 0]._uint));
|
||||
|
||||
(void) CHECK_GL_ERROR();
|
||||
(void)CHECK_GL_ERROR();
|
||||
}
|
||||
|
||||
void Batch::_glUniformMatrix4fv(GLint location, GLsizei count, GLboolean transpose, const GLfloat* value) {
|
||||
ADD_COMMAND_GL(glUniformMatrix4fv);
|
||||
|
||||
const int MATRIX4_SIZE = 16 * sizeof(float);
|
||||
_params.push_back(cacheData(count * MATRIX4_SIZE, value));
|
||||
_params.push_back(transpose);
|
||||
_params.push_back(count);
|
||||
_params.push_back(location);
|
||||
}
|
||||
void GLBackend::do_glUniformMatrix4fv(Batch& batch, size_t paramOffset) {
|
||||
if (_pipeline._program == 0) {
|
||||
// We should call updatePipeline() to bind the program but we are not doing that
|
||||
|
@ -783,42 +528,20 @@ void GLBackend::do_glUniformMatrix4fv(Batch& batch, size_t paramOffset) {
|
|||
batch._params[paramOffset + 2]._uint,
|
||||
batch._params[paramOffset + 1]._uint,
|
||||
(const GLfloat*)batch.editData(batch._params[paramOffset + 0]._uint));
|
||||
(void) CHECK_GL_ERROR();
|
||||
(void)CHECK_GL_ERROR();
|
||||
}
|
||||
|
||||
void Batch::_glColor4f(GLfloat red, GLfloat green, GLfloat blue, GLfloat alpha) {
|
||||
ADD_COMMAND_GL(glColor4f);
|
||||
|
||||
_params.push_back(alpha);
|
||||
_params.push_back(blue);
|
||||
_params.push_back(green);
|
||||
_params.push_back(red);
|
||||
}
|
||||
void GLBackend::do_glColor4f(Batch& batch, size_t paramOffset) {
|
||||
|
||||
glm::vec4 newColor(
|
||||
batch._params[paramOffset + 3]._float,
|
||||
batch._params[paramOffset + 2]._float,
|
||||
batch._params[paramOffset + 1]._float,
|
||||
batch._params[paramOffset + 0]._float);
|
||||
batch._params[paramOffset + 0]._float);
|
||||
|
||||
if (_input._colorAttribute != newColor) {
|
||||
_input._colorAttribute = newColor;
|
||||
glVertexAttrib4fv(gpu::Stream::COLOR, &_input._colorAttribute.r);
|
||||
}
|
||||
(void) CHECK_GL_ERROR();
|
||||
}
|
||||
|
||||
|
||||
void GLBackend::do_pushProfileRange(Batch& batch, size_t paramOffset) {
|
||||
#if defined(NSIGHT_FOUND)
|
||||
auto name = batch._profileRanges.get(batch._params[paramOffset]._uint);
|
||||
nvtxRangePush(name.c_str());
|
||||
#endif
|
||||
}
|
||||
|
||||
void GLBackend::do_popProfileRange(Batch& batch, size_t paramOffset) {
|
||||
#if defined(NSIGHT_FOUND)
|
||||
nvtxRangePop();
|
||||
#endif
|
||||
(void)CHECK_GL_ERROR();
|
||||
}
|
||||
|
|
|
@ -8,8 +8,8 @@
|
|||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
#ifndef hifi_gpu_GLBackend_h
|
||||
#define hifi_gpu_GLBackend_h
|
||||
#ifndef hifi_gpu_gl_GLBackend_h
|
||||
#define hifi_gpu_gl_GLBackend_h
|
||||
|
||||
#include <assert.h>
|
||||
#include <functional>
|
||||
|
@ -26,325 +26,34 @@
|
|||
#include <gpu/Forward.h>
|
||||
#include <gpu/Context.h>
|
||||
|
||||
|
||||
#define GPU_CORE 1
|
||||
#define GPU_LEGACY 0
|
||||
#define GPU_CORE_41 410
|
||||
#define GPU_CORE_43 430
|
||||
#define GPU_CORE_MINIMUM GPU_CORE_41
|
||||
#define GPU_FEATURE_PROFILE GPU_CORE
|
||||
|
||||
#if defined(__APPLE__)
|
||||
|
||||
#define GPU_INPUT_PROFILE GPU_CORE_41
|
||||
|
||||
#else
|
||||
|
||||
#define GPU_INPUT_PROFILE GPU_CORE_43
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
#if (GPU_INPUT_PROFILE == GPU_CORE_43)
|
||||
// Deactivate SSBO for now, we've run into some issues
|
||||
// on GL 4.3 capable GPUs not behaving as expected
|
||||
//#define GPU_SSBO_DRAW_CALL_INFO
|
||||
#endif
|
||||
#include "GLShared.h"
|
||||
|
||||
namespace gpu { namespace gl {
|
||||
|
||||
class GLTextureTransferHelper;
|
||||
|
||||
class GLBackend : public Backend {
|
||||
|
||||
// Context Backend static interface required
|
||||
friend class gpu::Context;
|
||||
static void init();
|
||||
static Backend* createBackend();
|
||||
static bool makeProgram(Shader& shader, const Shader::BindingSet& bindings);
|
||||
static bool makeProgram(Shader& shader, const Shader::BindingSet& slotBindings);
|
||||
|
||||
protected:
|
||||
explicit GLBackend(bool syncCache);
|
||||
GLBackend();
|
||||
public:
|
||||
static Context::Size getDedicatedMemory();
|
||||
~GLBackend();
|
||||
|
||||
virtual ~GLBackend();
|
||||
|
||||
virtual void render(Batch& batch);
|
||||
void render(Batch& batch) final;
|
||||
|
||||
// This call synchronize the Full Backend cache with the current GLState
|
||||
// THis is only intended to be used when mixing raw gl calls with the gpu api usage in order to sync
|
||||
// the gpu::Backend state with the true gl state which has probably been messed up by these ugly naked gl calls
|
||||
// Let's try to avoid to do that as much as possible!
|
||||
virtual void syncCache();
|
||||
void syncCache() final;
|
||||
|
||||
// This is the ugly "download the pixels to sysmem for taking a snapshot"
|
||||
// Just avoid using it, it's ugly and will break performances
|
||||
virtual void downloadFramebuffer(const FramebufferPointer& srcFramebuffer, const Vec4i& region, QImage& destImage);
|
||||
|
||||
static void checkGLStackStable(std::function<void()> f);
|
||||
|
||||
|
||||
|
||||
class GLBuffer : public GPUObject {
|
||||
public:
|
||||
const GLuint _buffer;
|
||||
const GLuint _size;
|
||||
const Stamp _stamp;
|
||||
|
||||
GLBuffer(const Buffer& buffer, GLBuffer* original = nullptr);
|
||||
~GLBuffer();
|
||||
|
||||
virtual void transfer();
|
||||
|
||||
private:
|
||||
bool getNextTransferBlock(GLintptr& outOffset, GLsizeiptr& outSize, size_t& currentPage) const;
|
||||
|
||||
// The owning texture
|
||||
const Buffer& _gpuBuffer;
|
||||
};
|
||||
|
||||
static GLBuffer* syncGPUObject(const Buffer& buffer);
|
||||
static GLuint getBufferID(const Buffer& buffer);
|
||||
|
||||
class GLTexture : public GPUObject {
|
||||
public:
|
||||
// The public gl texture object
|
||||
GLuint _texture{ 0 };
|
||||
|
||||
const Stamp _storageStamp;
|
||||
Stamp _contentStamp { 0 };
|
||||
const GLenum _target;
|
||||
const uint16 _maxMip;
|
||||
const uint16 _minMip;
|
||||
const bool _transferrable;
|
||||
|
||||
struct DownsampleSource {
|
||||
using Pointer = std::shared_ptr<DownsampleSource>;
|
||||
DownsampleSource(GLTexture& oldTexture);
|
||||
~DownsampleSource();
|
||||
const GLuint _texture;
|
||||
const uint16 _minMip;
|
||||
const uint16 _maxMip;
|
||||
};
|
||||
|
||||
DownsampleSource::Pointer _downsampleSource;
|
||||
|
||||
GLTexture(bool transferrable, const gpu::Texture& gpuTexture);
|
||||
GLTexture(GLTexture& originalTexture, const gpu::Texture& gpuTexture);
|
||||
~GLTexture();
|
||||
|
||||
// Return a floating point value indicating how much of the allowed
|
||||
// texture memory we are currently consuming. A value of 0 indicates
|
||||
// no texture memory usage, while a value of 1 indicates all available / allowed memory
|
||||
// is consumed. A value above 1 indicates that there is a problem.
|
||||
static float getMemoryPressure();
|
||||
|
||||
void withPreservedTexture(std::function<void()> f);
|
||||
|
||||
void createTexture();
|
||||
void allocateStorage();
|
||||
|
||||
GLuint size() const { return _size; }
|
||||
GLuint virtualSize() const { return _virtualSize; }
|
||||
|
||||
void updateSize();
|
||||
|
||||
enum SyncState {
|
||||
// The texture is currently undergoing no processing, although it's content
|
||||
// may be out of date, or it's storage may be invalid relative to the
|
||||
// owning GPU texture
|
||||
Idle,
|
||||
// The texture has been queued for transfer to the GPU
|
||||
Pending,
|
||||
// The texture has been transferred to the GPU, but is awaiting
|
||||
// any post transfer operations that may need to occur on the
|
||||
// primary rendering thread
|
||||
Transferred,
|
||||
};
|
||||
|
||||
void setSyncState(SyncState syncState) { _syncState = syncState; }
|
||||
SyncState getSyncState() const { return _syncState; }
|
||||
|
||||
// Is the storage out of date relative to the gpu texture?
|
||||
bool isInvalid() const;
|
||||
|
||||
// Is the content out of date relative to the gpu texture?
|
||||
bool isOutdated() const;
|
||||
|
||||
// Is the texture in a state where it can be rendered with no work?
|
||||
bool isReady() const;
|
||||
|
||||
// Is this texture pushing us over the memory limit?
|
||||
bool isOverMaxMemory() const;
|
||||
|
||||
// Move the image bits from the CPU to the GPU
|
||||
void transfer() const;
|
||||
|
||||
// Execute any post-move operations that must occur only on the main thread
|
||||
void postTransfer();
|
||||
|
||||
uint16 usedMipLevels() const { return (_maxMip - _minMip) + 1; }
|
||||
|
||||
static const size_t CUBE_NUM_FACES = 6;
|
||||
static const GLenum CUBE_FACE_LAYOUT[6];
|
||||
|
||||
private:
|
||||
friend class GLTextureTransferHelper;
|
||||
|
||||
GLTexture(bool transferrable, const gpu::Texture& gpuTexture, bool init);
|
||||
// at creation the true texture is created in GL
|
||||
// it becomes public only when ready.
|
||||
GLuint _privateTexture{ 0 };
|
||||
|
||||
const std::vector<GLenum>& getFaceTargets() const;
|
||||
|
||||
void setSize(GLuint size);
|
||||
|
||||
const GLuint _virtualSize; // theorical size as expected
|
||||
GLuint _size { 0 }; // true size as reported by the gl api
|
||||
|
||||
void transferMip(uint16_t mipLevel, uint8_t face = 0) const;
|
||||
|
||||
// The owning texture
|
||||
const Texture& _gpuTexture;
|
||||
std::atomic<SyncState> _syncState { SyncState::Idle };
|
||||
};
|
||||
static GLTexture* syncGPUObject(const TexturePointer& texture, bool needTransfer = true);
|
||||
static GLuint getTextureID(const TexturePointer& texture, bool sync = true);
|
||||
|
||||
// very specific for now
|
||||
static void syncSampler(const Sampler& sampler, Texture::Type type, const GLTexture* object);
|
||||
|
||||
class GLShader : public GPUObject {
|
||||
public:
|
||||
enum Version {
|
||||
Mono = 0,
|
||||
|
||||
NumVersions
|
||||
};
|
||||
|
||||
struct ShaderObject {
|
||||
GLuint glshader{ 0 };
|
||||
GLuint glprogram{ 0 };
|
||||
GLint transformCameraSlot{ -1 };
|
||||
GLint transformObjectSlot{ -1 };
|
||||
};
|
||||
|
||||
using ShaderObjects = std::array< ShaderObject, NumVersions >;
|
||||
|
||||
using UniformMapping = std::map<GLint, GLint>;
|
||||
using UniformMappingVersions = std::vector<UniformMapping>;
|
||||
|
||||
GLShader();
|
||||
~GLShader();
|
||||
|
||||
ShaderObjects _shaderObjects;
|
||||
UniformMappingVersions _uniformMappings;
|
||||
|
||||
GLuint getProgram(Version version = Mono) const {
|
||||
return _shaderObjects[version].glprogram;
|
||||
}
|
||||
|
||||
GLint getUniformLocation(GLint srcLoc, Version version = Mono) {
|
||||
// THIS will be used in the future PR as we grow the number of versions
|
||||
// return _uniformMappings[version][srcLoc];
|
||||
return srcLoc;
|
||||
}
|
||||
|
||||
};
|
||||
static GLShader* syncGPUObject(const Shader& shader);
|
||||
|
||||
class GLState : public GPUObject {
|
||||
public:
|
||||
class Command {
|
||||
public:
|
||||
virtual void run(GLBackend* backend) = 0;
|
||||
Command() {}
|
||||
virtual ~Command() {};
|
||||
};
|
||||
|
||||
template <class T> class Command1 : public Command {
|
||||
public:
|
||||
typedef void (GLBackend::*GLFunction)(T);
|
||||
void run(GLBackend* backend) { (backend->*(_func))(_param); }
|
||||
Command1(GLFunction func, T param) : _func(func), _param(param) {};
|
||||
GLFunction _func;
|
||||
T _param;
|
||||
};
|
||||
template <class T, class U> class Command2 : public Command {
|
||||
public:
|
||||
typedef void (GLBackend::*GLFunction)(T, U);
|
||||
void run(GLBackend* backend) { (backend->*(_func))(_param0, _param1); }
|
||||
Command2(GLFunction func, T param0, U param1) : _func(func), _param0(param0), _param1(param1) {};
|
||||
GLFunction _func;
|
||||
T _param0;
|
||||
U _param1;
|
||||
};
|
||||
|
||||
template <class T, class U, class V> class Command3 : public Command {
|
||||
public:
|
||||
typedef void (GLBackend::*GLFunction)(T, U, V);
|
||||
void run(GLBackend* backend) { (backend->*(_func))(_param0, _param1, _param2); }
|
||||
Command3(GLFunction func, T param0, U param1, V param2) : _func(func), _param0(param0), _param1(param1), _param2(param2) {};
|
||||
GLFunction _func;
|
||||
T _param0;
|
||||
U _param1;
|
||||
V _param2;
|
||||
};
|
||||
|
||||
typedef std::shared_ptr< Command > CommandPointer;
|
||||
typedef std::vector< CommandPointer > Commands;
|
||||
|
||||
Commands _commands;
|
||||
Stamp _stamp;
|
||||
State::Signature _signature;
|
||||
|
||||
GLState();
|
||||
~GLState();
|
||||
|
||||
// The state commands to reset to default,
|
||||
static const Commands _resetStateCommands;
|
||||
|
||||
friend class GLBackend;
|
||||
};
|
||||
static GLState* syncGPUObject(const State& state);
|
||||
|
||||
class GLPipeline : public GPUObject {
|
||||
public:
|
||||
GLShader* _program = 0;
|
||||
GLState* _state = 0;
|
||||
|
||||
GLPipeline();
|
||||
~GLPipeline();
|
||||
};
|
||||
static GLPipeline* syncGPUObject(const Pipeline& pipeline);
|
||||
|
||||
|
||||
class GLFramebuffer : public GPUObject {
|
||||
public:
|
||||
GLuint _fbo = 0;
|
||||
std::vector<GLenum> _colorBuffers;
|
||||
Stamp _depthStamp { 0 };
|
||||
std::vector<Stamp> _colorStamps;
|
||||
|
||||
|
||||
GLFramebuffer();
|
||||
~GLFramebuffer();
|
||||
};
|
||||
static GLFramebuffer* syncGPUObject(const Framebuffer& framebuffer);
|
||||
static GLuint getFramebufferID(const FramebufferPointer& framebuffer);
|
||||
|
||||
class GLQuery : public GPUObject {
|
||||
public:
|
||||
GLuint _qo = 0;
|
||||
GLuint64 _result = 0;
|
||||
|
||||
GLQuery();
|
||||
~GLQuery();
|
||||
};
|
||||
static GLQuery* syncGPUObject(const Query& query);
|
||||
static GLuint getQueryID(const QueryPointer& query);
|
||||
virtual void downloadFramebuffer(const FramebufferPointer& srcFramebuffer, const Vec4i& region, QImage& destImage) final;
|
||||
|
||||
|
||||
static const int MAX_NUM_ATTRIBUTES = Stream::NUM_INPUT_SLOTS;
|
||||
|
@ -362,70 +71,131 @@ public:
|
|||
static const int MAX_NUM_RESOURCE_TEXTURES = 16;
|
||||
size_t getMaxNumResourceTextures() const { return MAX_NUM_RESOURCE_TEXTURES; }
|
||||
|
||||
// Draw Stage
|
||||
virtual void do_draw(Batch& batch, size_t paramOffset) = 0;
|
||||
virtual void do_drawIndexed(Batch& batch, size_t paramOffset) = 0;
|
||||
virtual void do_drawInstanced(Batch& batch, size_t paramOffset) = 0;
|
||||
virtual void do_drawIndexedInstanced(Batch& batch, size_t paramOffset) = 0;
|
||||
virtual void do_multiDrawIndirect(Batch& batch, size_t paramOffset) = 0;
|
||||
virtual void do_multiDrawIndexedIndirect(Batch& batch, size_t paramOffset) = 0;
|
||||
|
||||
// Input Stage
|
||||
virtual void do_setInputFormat(Batch& batch, size_t paramOffset) final;
|
||||
virtual void do_setInputBuffer(Batch& batch, size_t paramOffset) final;
|
||||
virtual void do_setIndexBuffer(Batch& batch, size_t paramOffset) final;
|
||||
virtual void do_setIndirectBuffer(Batch& batch, size_t paramOffset) final;
|
||||
virtual void do_generateTextureMips(Batch& batch, size_t paramOffset) final;
|
||||
|
||||
// Transform Stage
|
||||
virtual void do_setModelTransform(Batch& batch, size_t paramOffset) final;
|
||||
virtual void do_setViewTransform(Batch& batch, size_t paramOffset) final;
|
||||
virtual void do_setProjectionTransform(Batch& batch, size_t paramOffset) final;
|
||||
virtual void do_setViewportTransform(Batch& batch, size_t paramOffset) final;
|
||||
virtual void do_setDepthRangeTransform(Batch& batch, size_t paramOffset) final;
|
||||
|
||||
// Uniform Stage
|
||||
virtual void do_setUniformBuffer(Batch& batch, size_t paramOffset) final;
|
||||
|
||||
// Resource Stage
|
||||
virtual void do_setResourceTexture(Batch& batch, size_t paramOffset) final;
|
||||
|
||||
// Pipeline Stage
|
||||
virtual void do_setPipeline(Batch& batch, size_t paramOffset) final;
|
||||
|
||||
// Output stage
|
||||
virtual void do_setFramebuffer(Batch& batch, size_t paramOffset) final;
|
||||
virtual void do_clearFramebuffer(Batch& batch, size_t paramOffset) final;
|
||||
virtual void do_blit(Batch& batch, size_t paramOffset) = 0;
|
||||
|
||||
// Query section
|
||||
virtual void do_beginQuery(Batch& batch, size_t paramOffset) final;
|
||||
virtual void do_endQuery(Batch& batch, size_t paramOffset) final;
|
||||
virtual void do_getQuery(Batch& batch, size_t paramOffset) final;
|
||||
|
||||
// Reset stages
|
||||
virtual void do_resetStages(Batch& batch, size_t paramOffset) final;
|
||||
|
||||
virtual void do_runLambda(Batch& batch, size_t paramOffset) final;
|
||||
|
||||
virtual void do_startNamedCall(Batch& batch, size_t paramOffset) final;
|
||||
virtual void do_stopNamedCall(Batch& batch, size_t paramOffset) final;
|
||||
|
||||
virtual void do_pushProfileRange(Batch& batch, size_t paramOffset) final;
|
||||
virtual void do_popProfileRange(Batch& batch, size_t paramOffset) final;
|
||||
|
||||
// TODO: As long as we have gl calls explicitely issued from interface
|
||||
// code, we need to be able to record and batch these calls. THe long
|
||||
// term strategy is to get rid of any GL calls in favor of the HIFI GPU API
|
||||
virtual void do_glActiveBindTexture(Batch& batch, size_t paramOffset) final;
|
||||
|
||||
virtual void do_glUniform1i(Batch& batch, size_t paramOffset) final;
|
||||
virtual void do_glUniform1f(Batch& batch, size_t paramOffset) final;
|
||||
virtual void do_glUniform2f(Batch& batch, size_t paramOffset) final;
|
||||
virtual void do_glUniform3f(Batch& batch, size_t paramOffset) final;
|
||||
virtual void do_glUniform4f(Batch& batch, size_t paramOffset) final;
|
||||
virtual void do_glUniform3fv(Batch& batch, size_t paramOffset) final;
|
||||
virtual void do_glUniform4fv(Batch& batch, size_t paramOffset) final;
|
||||
virtual void do_glUniform4iv(Batch& batch, size_t paramOffset) final;
|
||||
virtual void do_glUniformMatrix4fv(Batch& batch, size_t paramOffset) final;
|
||||
|
||||
virtual void do_glColor4f(Batch& batch, size_t paramOffset) final;
|
||||
|
||||
// The State setters called by the GLState::Commands when a new state is assigned
|
||||
void do_setStateFillMode(int32 mode);
|
||||
void do_setStateCullMode(int32 mode);
|
||||
void do_setStateFrontFaceClockwise(bool isClockwise);
|
||||
void do_setStateDepthClampEnable(bool enable);
|
||||
void do_setStateScissorEnable(bool enable);
|
||||
void do_setStateMultisampleEnable(bool enable);
|
||||
void do_setStateAntialiasedLineEnable(bool enable);
|
||||
virtual void do_setStateFillMode(int32 mode) final;
|
||||
virtual void do_setStateCullMode(int32 mode) final;
|
||||
virtual void do_setStateFrontFaceClockwise(bool isClockwise) final;
|
||||
virtual void do_setStateDepthClampEnable(bool enable) final;
|
||||
virtual void do_setStateScissorEnable(bool enable) final;
|
||||
virtual void do_setStateMultisampleEnable(bool enable) final;
|
||||
virtual void do_setStateAntialiasedLineEnable(bool enable) final;
|
||||
virtual void do_setStateDepthBias(Vec2 bias) final;
|
||||
virtual void do_setStateDepthTest(State::DepthTest test) final;
|
||||
virtual void do_setStateStencil(State::StencilActivation activation, State::StencilTest frontTest, State::StencilTest backTest) final;
|
||||
virtual void do_setStateAlphaToCoverageEnable(bool enable) final;
|
||||
virtual void do_setStateSampleMask(uint32 mask) final;
|
||||
virtual void do_setStateBlend(State::BlendFunction blendFunction) final;
|
||||
virtual void do_setStateColorWriteMask(uint32 mask) final;
|
||||
virtual void do_setStateBlendFactor(Batch& batch, size_t paramOffset) final;
|
||||
virtual void do_setStateScissorRect(Batch& batch, size_t paramOffset) final;
|
||||
|
||||
void do_setStateDepthBias(Vec2 bias);
|
||||
void do_setStateDepthTest(State::DepthTest test);
|
||||
|
||||
void do_setStateStencil(State::StencilActivation activation, State::StencilTest frontTest, State::StencilTest backTest);
|
||||
|
||||
void do_setStateAlphaToCoverageEnable(bool enable);
|
||||
void do_setStateSampleMask(uint32 mask);
|
||||
|
||||
void do_setStateBlend(State::BlendFunction blendFunction);
|
||||
|
||||
void do_setStateColorWriteMask(uint32 mask);
|
||||
|
||||
protected:
|
||||
static const size_t INVALID_OFFSET = (size_t)-1;
|
||||
|
||||
bool _inRenderTransferPass;
|
||||
virtual GLuint getFramebufferID(const FramebufferPointer& framebuffer) = 0;
|
||||
virtual GLFramebuffer* syncGPUObject(const Framebuffer& framebuffer) = 0;
|
||||
|
||||
virtual GLuint getBufferID(const Buffer& buffer) = 0;
|
||||
virtual GLBuffer* syncGPUObject(const Buffer& buffer) = 0;
|
||||
|
||||
virtual GLuint getTextureID(const TexturePointer& texture, bool needTransfer = true) = 0;
|
||||
virtual GLTexture* syncGPUObject(const TexturePointer& texture, bool sync = true) = 0;
|
||||
|
||||
virtual GLuint getQueryID(const QueryPointer& query) = 0;
|
||||
virtual GLQuery* syncGPUObject(const Query& query) = 0;
|
||||
|
||||
static const size_t INVALID_OFFSET = (size_t)-1;
|
||||
bool _inRenderTransferPass { false };
|
||||
int32_t _uboAlignment { 0 };
|
||||
int _currentDraw { -1 };
|
||||
|
||||
void renderPassTransfer(Batch& batch);
|
||||
void renderPassDraw(Batch& batch);
|
||||
|
||||
void setupStereoSide(int side);
|
||||
|
||||
void initTextureTransferHelper();
|
||||
static void transferGPUObject(const TexturePointer& texture);
|
||||
virtual void initInput() final;
|
||||
virtual void killInput() final;
|
||||
virtual void syncInputStateCache() final;
|
||||
virtual void resetInputStage() final;
|
||||
virtual void updateInput() = 0;
|
||||
|
||||
static std::shared_ptr<GLTextureTransferHelper> _textureTransferHelper;
|
||||
|
||||
// Draw Stage
|
||||
void do_draw(Batch& batch, size_t paramOffset);
|
||||
void do_drawIndexed(Batch& batch, size_t paramOffset);
|
||||
void do_drawInstanced(Batch& batch, size_t paramOffset);
|
||||
void do_drawIndexedInstanced(Batch& batch, size_t paramOffset);
|
||||
void do_multiDrawIndirect(Batch& batch, size_t paramOffset);
|
||||
void do_multiDrawIndexedIndirect(Batch& batch, size_t paramOffset);
|
||||
|
||||
// Input Stage
|
||||
void do_setInputFormat(Batch& batch, size_t paramOffset);
|
||||
void do_setInputBuffer(Batch& batch, size_t paramOffset);
|
||||
void do_setIndexBuffer(Batch& batch, size_t paramOffset);
|
||||
void do_setIndirectBuffer(Batch& batch, size_t paramOffset);
|
||||
|
||||
void initInput();
|
||||
void killInput();
|
||||
void syncInputStateCache();
|
||||
void updateInput();
|
||||
void resetInputStage();
|
||||
struct InputStageState {
|
||||
bool _invalidFormat = true;
|
||||
bool _invalidFormat { true };
|
||||
Stream::FormatPointer _format;
|
||||
|
||||
typedef std::bitset<MAX_NUM_ATTRIBUTES> ActivationCache;
|
||||
ActivationCache _attributeActivation;
|
||||
ActivationCache _attributeActivation { 0 };
|
||||
|
||||
typedef std::bitset<MAX_NUM_INPUT_BUFFERS> BuffersState;
|
||||
BuffersState _invalidBuffers;
|
||||
BuffersState _invalidBuffers { 0 };
|
||||
|
||||
Buffers _buffers;
|
||||
Offsets _bufferOffsets;
|
||||
|
@ -435,39 +205,23 @@ protected:
|
|||
glm::vec4 _colorAttribute{ 0.0f };
|
||||
|
||||
BufferPointer _indexBuffer;
|
||||
Offset _indexBufferOffset;
|
||||
Type _indexBufferType;
|
||||
Offset _indexBufferOffset { 0 };
|
||||
Type _indexBufferType { UINT32 };
|
||||
|
||||
BufferPointer _indirectBuffer;
|
||||
Offset _indirectBufferOffset{ 0 };
|
||||
Offset _indirectBufferStride{ 0 };
|
||||
|
||||
GLuint _defaultVAO;
|
||||
GLuint _defaultVAO { 0 };
|
||||
|
||||
InputStageState() :
|
||||
_invalidFormat(true),
|
||||
_format(0),
|
||||
_attributeActivation(0),
|
||||
_invalidBuffers(0),
|
||||
_buffers(_invalidBuffers.size(), BufferPointer(0)),
|
||||
_buffers(_invalidBuffers.size()),
|
||||
_bufferOffsets(_invalidBuffers.size(), 0),
|
||||
_bufferStrides(_invalidBuffers.size(), 0),
|
||||
_bufferVBOs(_invalidBuffers.size(), 0),
|
||||
_indexBuffer(0),
|
||||
_indexBufferOffset(0),
|
||||
_indexBufferType(UINT32),
|
||||
_defaultVAO(0)
|
||||
{}
|
||||
_bufferVBOs(_invalidBuffers.size(), 0) {}
|
||||
} _input;
|
||||
|
||||
// Transform Stage
|
||||
void do_setModelTransform(Batch& batch, size_t paramOffset);
|
||||
void do_setViewTransform(Batch& batch, size_t paramOffset);
|
||||
void do_setProjectionTransform(Batch& batch, size_t paramOffset);
|
||||
void do_setViewportTransform(Batch& batch, size_t paramOffset);
|
||||
void do_setDepthRangeTransform(Batch& batch, size_t paramOffset);
|
||||
|
||||
void initTransform();
|
||||
virtual void initTransform() = 0;
|
||||
void killTransform();
|
||||
// Synchronize the state cache of this Backend with the actual real state of the GL Context
|
||||
void syncTransformStateCache();
|
||||
|
@ -505,153 +259,74 @@ protected:
|
|||
void preUpdate(size_t commandIndex, const StereoState& stereo);
|
||||
void update(size_t commandIndex, const StereoState& stereo) const;
|
||||
void bindCurrentCamera(int stereoSide) const;
|
||||
void transfer(const Batch& batch) const;
|
||||
} _transform;
|
||||
|
||||
int32_t _uboAlignment{ 0 };
|
||||
virtual void transferTransformState(const Batch& batch) const = 0;
|
||||
|
||||
|
||||
// Uniform Stage
|
||||
void do_setUniformBuffer(Batch& batch, size_t paramOffset);
|
||||
struct UniformStageState {
|
||||
std::array<BufferPointer, MAX_NUM_UNIFORM_BUFFERS> _buffers;
|
||||
//Buffers _buffers { };
|
||||
} _uniform;
|
||||
|
||||
void releaseUniformBuffer(uint32_t slot);
|
||||
void resetUniformStage();
|
||||
struct UniformStageState {
|
||||
Buffers _buffers;
|
||||
|
||||
UniformStageState():
|
||||
_buffers(MAX_NUM_UNIFORM_BUFFERS, nullptr)
|
||||
{}
|
||||
} _uniform;
|
||||
|
||||
// Resource Stage
|
||||
void do_setResourceTexture(Batch& batch, size_t paramOffset);
|
||||
|
||||
// update resource cache and do the gl unbind call with the current gpu::Texture cached at slot s
|
||||
void releaseResourceTexture(uint32_t slot);
|
||||
|
||||
void resetResourceStage();
|
||||
|
||||
struct ResourceStageState {
|
||||
Textures _textures;
|
||||
|
||||
std::array<TexturePointer, MAX_NUM_RESOURCE_TEXTURES> _textures;
|
||||
//Textures _textures { { MAX_NUM_RESOURCE_TEXTURES } };
|
||||
int findEmptyTextureSlot() const;
|
||||
|
||||
ResourceStageState():
|
||||
_textures(MAX_NUM_RESOURCE_TEXTURES, nullptr)
|
||||
{}
|
||||
|
||||
} _resource;
|
||||
|
||||
size_t _commandIndex{ 0 };
|
||||
|
||||
// Pipeline Stage
|
||||
void do_setPipeline(Batch& batch, size_t paramOffset);
|
||||
void do_setStateBlendFactor(Batch& batch, size_t paramOffset);
|
||||
void do_setStateScissorRect(Batch& batch, size_t paramOffset);
|
||||
|
||||
// Standard update pipeline check that the current Program and current State or good to go for a
|
||||
void updatePipeline();
|
||||
// Force to reset all the state fields indicated by the 'toBeReset" signature
|
||||
void resetPipelineState(State::Signature toBeReset);
|
||||
// Synchronize the state cache of this Backend with the actual real state of the GL Context
|
||||
void syncPipelineStateCache();
|
||||
// Grab the actual gl state into it's gpu::State equivalent. THis is used by the above call syncPipleineStateCache()
|
||||
void getCurrentGLState(State::Data& state);
|
||||
void resetPipelineStage();
|
||||
|
||||
struct PipelineStageState {
|
||||
|
||||
PipelinePointer _pipeline;
|
||||
|
||||
GLuint _program;
|
||||
GLShader* _programShader;
|
||||
bool _invalidProgram;
|
||||
GLuint _program { 0 };
|
||||
GLShader* _programShader { nullptr };
|
||||
bool _invalidProgram { false };
|
||||
|
||||
State::Data _stateCache;
|
||||
State::Signature _stateSignatureCache;
|
||||
State::Data _stateCache { State::DEFAULT };
|
||||
State::Signature _stateSignatureCache { 0 };
|
||||
|
||||
GLState* _state;
|
||||
bool _invalidState = false;
|
||||
|
||||
PipelineStageState() :
|
||||
_pipeline(),
|
||||
_program(0),
|
||||
_programShader(nullptr),
|
||||
_invalidProgram(false),
|
||||
_stateCache(State::DEFAULT),
|
||||
_stateSignatureCache(0),
|
||||
_state(nullptr),
|
||||
_invalidState(false)
|
||||
{}
|
||||
GLState* _state { nullptr };
|
||||
bool _invalidState { false };
|
||||
} _pipeline;
|
||||
|
||||
// Output stage
|
||||
void do_setFramebuffer(Batch& batch, size_t paramOffset);
|
||||
void do_clearFramebuffer(Batch& batch, size_t paramOffset);
|
||||
void do_blit(Batch& batch, size_t paramOffset);
|
||||
void do_generateTextureMips(Batch& batch, size_t paramOffset);
|
||||
|
||||
// Synchronize the state cache of this Backend with the actual real state of the GL Context
|
||||
void syncOutputStateCache();
|
||||
void resetOutputStage();
|
||||
|
||||
struct OutputStageState {
|
||||
|
||||
FramebufferPointer _framebuffer = nullptr;
|
||||
GLuint _drawFBO = 0;
|
||||
|
||||
OutputStageState() {}
|
||||
FramebufferPointer _framebuffer { nullptr };
|
||||
GLuint _drawFBO { 0 };
|
||||
} _output;
|
||||
|
||||
// Query section
|
||||
void do_beginQuery(Batch& batch, size_t paramOffset);
|
||||
void do_endQuery(Batch& batch, size_t paramOffset);
|
||||
void do_getQuery(Batch& batch, size_t paramOffset);
|
||||
|
||||
void resetQueryStage();
|
||||
struct QueryStageState {
|
||||
|
||||
};
|
||||
|
||||
// Reset stages
|
||||
void do_resetStages(Batch& batch, size_t paramOffset);
|
||||
|
||||
void do_runLambda(Batch& batch, size_t paramOffset);
|
||||
|
||||
void do_startNamedCall(Batch& batch, size_t paramOffset);
|
||||
void do_stopNamedCall(Batch& batch, size_t paramOffset);
|
||||
|
||||
void resetStages();
|
||||
|
||||
// TODO: As long as we have gl calls explicitely issued from interface
|
||||
// code, we need to be able to record and batch these calls. THe long
|
||||
// term strategy is to get rid of any GL calls in favor of the HIFI GPU API
|
||||
void do_glActiveBindTexture(Batch& batch, size_t paramOffset);
|
||||
|
||||
void do_glUniform1i(Batch& batch, size_t paramOffset);
|
||||
void do_glUniform1f(Batch& batch, size_t paramOffset);
|
||||
void do_glUniform2f(Batch& batch, size_t paramOffset);
|
||||
void do_glUniform3f(Batch& batch, size_t paramOffset);
|
||||
void do_glUniform4f(Batch& batch, size_t paramOffset);
|
||||
void do_glUniform3fv(Batch& batch, size_t paramOffset);
|
||||
void do_glUniform4fv(Batch& batch, size_t paramOffset);
|
||||
void do_glUniform4iv(Batch& batch, size_t paramOffset);
|
||||
void do_glUniformMatrix4fv(Batch& batch, size_t paramOffset);
|
||||
|
||||
void do_glColor4f(Batch& batch, size_t paramOffset);
|
||||
|
||||
void do_pushProfileRange(Batch& batch, size_t paramOffset);
|
||||
void do_popProfileRange(Batch& batch, size_t paramOffset);
|
||||
|
||||
int _currentDraw { -1 };
|
||||
|
||||
typedef void (GLBackend::*CommandCall)(Batch&, size_t);
|
||||
static CommandCall _commandCalls[Batch::NUM_COMMANDS];
|
||||
|
||||
friend class GLState;
|
||||
};
|
||||
|
||||
} }
|
||||
|
||||
Q_DECLARE_LOGGING_CATEGORY(gpugllogging)
|
||||
|
||||
|
||||
#endif
|
||||
|
|
|
@ -1,124 +0,0 @@
|
|||
//
|
||||
// GLBackendBuffer.cpp
|
||||
// libraries/gpu/src/gpu
|
||||
//
|
||||
// Created by Sam Gateau on 3/8/2015.
|
||||
// Copyright 2014 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
#include "GLBackend.h"
|
||||
#include "GLBackendShared.h"
|
||||
|
||||
using namespace gpu;
|
||||
using namespace gpu::gl;
|
||||
|
||||
GLuint allocateSingleBuffer() {
|
||||
GLuint result;
|
||||
glGenBuffers(1, &result);
|
||||
(void)CHECK_GL_ERROR();
|
||||
return result;
|
||||
}
|
||||
|
||||
GLBackend::GLBuffer::GLBuffer(const Buffer& buffer, GLBuffer* original) :
|
||||
_buffer(allocateSingleBuffer()),
|
||||
_size((GLuint)buffer._sysmem.getSize()),
|
||||
_stamp(buffer._sysmem.getStamp()),
|
||||
_gpuBuffer(buffer) {
|
||||
glBindBuffer(GL_ARRAY_BUFFER, _buffer);
|
||||
if (GLEW_VERSION_4_4 || GLEW_ARB_buffer_storage) {
|
||||
glBufferStorage(GL_ARRAY_BUFFER, _size, nullptr, GL_DYNAMIC_STORAGE_BIT);
|
||||
(void)CHECK_GL_ERROR();
|
||||
} else {
|
||||
glBufferData(GL_ARRAY_BUFFER, _size, nullptr, GL_DYNAMIC_DRAW);
|
||||
(void)CHECK_GL_ERROR();
|
||||
}
|
||||
glBindBuffer(GL_ARRAY_BUFFER, 0);
|
||||
|
||||
if (original) {
|
||||
glBindBuffer(GL_COPY_WRITE_BUFFER, _buffer);
|
||||
glBindBuffer(GL_COPY_READ_BUFFER, original->_buffer);
|
||||
glCopyBufferSubData(GL_COPY_READ_BUFFER, GL_COPY_WRITE_BUFFER, 0, 0, original->_size);
|
||||
glBindBuffer(GL_COPY_WRITE_BUFFER, 0);
|
||||
glBindBuffer(GL_COPY_READ_BUFFER, 0);
|
||||
(void)CHECK_GL_ERROR();
|
||||
}
|
||||
|
||||
Backend::setGPUObject(buffer, this);
|
||||
Backend::incrementBufferGPUCount();
|
||||
Backend::updateBufferGPUMemoryUsage(0, _size);
|
||||
}
|
||||
|
||||
GLBackend::GLBuffer::~GLBuffer() {
|
||||
if (_buffer != 0) {
|
||||
glDeleteBuffers(1, &_buffer);
|
||||
}
|
||||
Backend::updateBufferGPUMemoryUsage(_size, 0);
|
||||
Backend::decrementBufferGPUCount();
|
||||
}
|
||||
|
||||
void GLBackend::GLBuffer::transfer() {
|
||||
glBindBuffer(GL_ARRAY_BUFFER, _buffer);
|
||||
(void)CHECK_GL_ERROR();
|
||||
GLintptr offset;
|
||||
GLsizeiptr size;
|
||||
size_t currentPage { 0 };
|
||||
auto data = _gpuBuffer.getSysmem().readData();
|
||||
while (getNextTransferBlock(offset, size, currentPage)) {
|
||||
glBufferSubData(GL_ARRAY_BUFFER, offset, size, data + offset);
|
||||
(void)CHECK_GL_ERROR();
|
||||
}
|
||||
glBindBuffer(GL_ARRAY_BUFFER, 0);
|
||||
(void)CHECK_GL_ERROR();
|
||||
_gpuBuffer._flags &= ~Buffer::DIRTY;
|
||||
}
|
||||
|
||||
bool GLBackend::GLBuffer::getNextTransferBlock(GLintptr& outOffset, GLsizeiptr& outSize, size_t& currentPage) const {
|
||||
size_t pageCount = _gpuBuffer._pages.size();
|
||||
// Advance to the first dirty page
|
||||
while (currentPage < pageCount && (0 == (Buffer::DIRTY & _gpuBuffer._pages[currentPage]))) {
|
||||
++currentPage;
|
||||
}
|
||||
|
||||
// If we got to the end, we're done
|
||||
if (currentPage >= pageCount) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Advance to the next clean page
|
||||
outOffset = static_cast<GLintptr>(currentPage * _gpuBuffer._pageSize);
|
||||
while (currentPage < pageCount && (0 != (Buffer::DIRTY & _gpuBuffer._pages[currentPage]))) {
|
||||
_gpuBuffer._pages[currentPage] &= ~Buffer::DIRTY;
|
||||
++currentPage;
|
||||
}
|
||||
outSize = static_cast<GLsizeiptr>((currentPage * _gpuBuffer._pageSize) - outOffset);
|
||||
return true;
|
||||
}
|
||||
|
||||
GLBackend::GLBuffer* GLBackend::syncGPUObject(const Buffer& buffer) {
|
||||
GLBuffer* object = Backend::getGPUObject<GLBackend::GLBuffer>(buffer);
|
||||
|
||||
// Has the storage size changed?
|
||||
if (!object || object->_stamp != buffer.getSysmem().getStamp()) {
|
||||
object = new GLBuffer(buffer, object);
|
||||
}
|
||||
|
||||
if (0 != (buffer._flags & Buffer::DIRTY)) {
|
||||
object->transfer();
|
||||
}
|
||||
|
||||
return object;
|
||||
}
|
||||
|
||||
|
||||
|
||||
GLuint GLBackend::getBufferID(const Buffer& buffer) {
|
||||
GLBuffer* bo = GLBackend::syncGPUObject(buffer);
|
||||
if (bo) {
|
||||
return bo->_buffer;
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
|
@ -9,7 +9,7 @@
|
|||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
#include "GLBackend.h"
|
||||
#include "GLBackendShared.h"
|
||||
#include "GLShared.h"
|
||||
|
||||
using namespace gpu;
|
||||
using namespace gpu::gl;
|
||||
|
@ -59,9 +59,6 @@ void GLBackend::do_setInputBuffer(Batch& batch, size_t paramOffset) {
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
void GLBackend::initInput() {
|
||||
if(!_input._defaultVAO) {
|
||||
glGenVertexArrays(1, &_input._defaultVAO);
|
||||
|
@ -88,176 +85,6 @@ void GLBackend::syncInputStateCache() {
|
|||
glBindVertexArray(_input._defaultVAO);
|
||||
}
|
||||
|
||||
// Core 41 doesn't expose the features to really separate the vertex format from the vertex buffers binding
|
||||
// Core 43 does :)
|
||||
// FIXME crashing problem with glVertexBindingDivisor / glVertexAttribFormat
|
||||
#if 1 || (GPU_INPUT_PROFILE == GPU_CORE_41)
|
||||
#define NO_SUPPORT_VERTEX_ATTRIB_FORMAT
|
||||
#else
|
||||
#define SUPPORT_VERTEX_ATTRIB_FORMAT
|
||||
#endif
|
||||
|
||||
void GLBackend::updateInput() {
|
||||
#if defined(SUPPORT_VERTEX_ATTRIB_FORMAT)
|
||||
if (_input._invalidFormat) {
|
||||
|
||||
InputStageState::ActivationCache newActivation;
|
||||
|
||||
// Assign the vertex format required
|
||||
if (_input._format) {
|
||||
for (auto& it : _input._format->getAttributes()) {
|
||||
const Stream::Attribute& attrib = (it).second;
|
||||
|
||||
GLuint slot = attrib._slot;
|
||||
GLuint count = attrib._element.getLocationScalarCount();
|
||||
uint8_t locationCount = attrib._element.getLocationCount();
|
||||
GLenum type = _elementTypeToGLType[attrib._element.getType()];
|
||||
GLuint offset = attrib._offset;;
|
||||
GLboolean isNormalized = attrib._element.isNormalized();
|
||||
|
||||
GLenum perLocationSize = attrib._element.getLocationSize();
|
||||
|
||||
for (size_t locNum = 0; locNum < locationCount; ++locNum) {
|
||||
newActivation.set(slot + locNum);
|
||||
glVertexAttribFormat(slot + locNum, count, type, isNormalized, offset + locNum * perLocationSize);
|
||||
glVertexAttribBinding(slot + locNum, attrib._channel);
|
||||
}
|
||||
glVertexBindingDivisor(attrib._channel, attrib._frequency);
|
||||
}
|
||||
(void) CHECK_GL_ERROR();
|
||||
}
|
||||
|
||||
// Manage Activation what was and what is expected now
|
||||
for (size_t i = 0; i < newActivation.size(); i++) {
|
||||
bool newState = newActivation[i];
|
||||
if (newState != _input._attributeActivation[i]) {
|
||||
if (newState) {
|
||||
glEnableVertexAttribArray(i);
|
||||
} else {
|
||||
glDisableVertexAttribArray(i);
|
||||
}
|
||||
_input._attributeActivation.flip(i);
|
||||
}
|
||||
}
|
||||
(void) CHECK_GL_ERROR();
|
||||
|
||||
_input._invalidFormat = false;
|
||||
_stats._ISNumFormatChanges++;
|
||||
}
|
||||
|
||||
if (_input._invalidBuffers.any()) {
|
||||
int numBuffers = _input._buffers.size();
|
||||
auto buffer = _input._buffers.data();
|
||||
auto vbo = _input._bufferVBOs.data();
|
||||
auto offset = _input._bufferOffsets.data();
|
||||
auto stride = _input._bufferStrides.data();
|
||||
|
||||
for (int bufferNum = 0; bufferNum < numBuffers; bufferNum++) {
|
||||
if (_input._invalidBuffers.test(bufferNum)) {
|
||||
glBindVertexBuffer(bufferNum, (*vbo), (*offset), (*stride));
|
||||
}
|
||||
buffer++;
|
||||
vbo++;
|
||||
offset++;
|
||||
stride++;
|
||||
}
|
||||
_input._invalidBuffers.reset();
|
||||
(void) CHECK_GL_ERROR();
|
||||
}
|
||||
#else
|
||||
if (_input._invalidFormat || _input._invalidBuffers.any()) {
|
||||
|
||||
if (_input._invalidFormat) {
|
||||
InputStageState::ActivationCache newActivation;
|
||||
|
||||
_stats._ISNumFormatChanges++;
|
||||
|
||||
// Check expected activation
|
||||
if (_input._format) {
|
||||
for (auto& it : _input._format->getAttributes()) {
|
||||
const Stream::Attribute& attrib = (it).second;
|
||||
uint8_t locationCount = attrib._element.getLocationCount();
|
||||
for (int i = 0; i < locationCount; ++i) {
|
||||
newActivation.set(attrib._slot + i);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Manage Activation what was and what is expected now
|
||||
for (unsigned int i = 0; i < newActivation.size(); i++) {
|
||||
bool newState = newActivation[i];
|
||||
if (newState != _input._attributeActivation[i]) {
|
||||
|
||||
if (newState) {
|
||||
glEnableVertexAttribArray(i);
|
||||
} else {
|
||||
glDisableVertexAttribArray(i);
|
||||
}
|
||||
(void) CHECK_GL_ERROR();
|
||||
|
||||
_input._attributeActivation.flip(i);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// now we need to bind the buffers and assign the attrib pointers
|
||||
if (_input._format) {
|
||||
const Buffers& buffers = _input._buffers;
|
||||
const Offsets& offsets = _input._bufferOffsets;
|
||||
const Offsets& strides = _input._bufferStrides;
|
||||
|
||||
const Stream::Format::AttributeMap& attributes = _input._format->getAttributes();
|
||||
auto& inputChannels = _input._format->getChannels();
|
||||
_stats._ISNumInputBufferChanges++;
|
||||
|
||||
GLuint boundVBO = 0;
|
||||
for (auto& channelIt : inputChannels) {
|
||||
const Stream::Format::ChannelMap::value_type::second_type& channel = (channelIt).second;
|
||||
if ((channelIt).first < buffers.size()) {
|
||||
int bufferNum = (channelIt).first;
|
||||
|
||||
if (_input._invalidBuffers.test(bufferNum) || _input._invalidFormat) {
|
||||
// GLuint vbo = gpu::GLBackend::getBufferID((*buffers[bufferNum]));
|
||||
GLuint vbo = _input._bufferVBOs[bufferNum];
|
||||
if (boundVBO != vbo) {
|
||||
glBindBuffer(GL_ARRAY_BUFFER, vbo);
|
||||
(void) CHECK_GL_ERROR();
|
||||
boundVBO = vbo;
|
||||
}
|
||||
_input._invalidBuffers[bufferNum] = false;
|
||||
|
||||
for (unsigned int i = 0; i < channel._slots.size(); i++) {
|
||||
const Stream::Attribute& attrib = attributes.at(channel._slots[i]);
|
||||
GLuint slot = attrib._slot;
|
||||
GLuint count = attrib._element.getLocationScalarCount();
|
||||
uint8_t locationCount = attrib._element.getLocationCount();
|
||||
GLenum type = _elementTypeToGLType[attrib._element.getType()];
|
||||
// GLenum perLocationStride = strides[bufferNum];
|
||||
GLenum perLocationStride = attrib._element.getLocationSize();
|
||||
GLuint stride = (GLuint)strides[bufferNum];
|
||||
GLuint pointer = (GLuint)(attrib._offset + offsets[bufferNum]);
|
||||
GLboolean isNormalized = attrib._element.isNormalized();
|
||||
|
||||
for (size_t locNum = 0; locNum < locationCount; ++locNum) {
|
||||
glVertexAttribPointer(slot + (GLuint)locNum, count, type, isNormalized, stride,
|
||||
reinterpret_cast<GLvoid*>(pointer + perLocationStride * (GLuint)locNum));
|
||||
glVertexAttribDivisor(slot + (GLuint)locNum, attrib._frequency);
|
||||
}
|
||||
|
||||
// TODO: Support properly the IAttrib version
|
||||
|
||||
(void) CHECK_GL_ERROR();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// everything format related should be in sync now
|
||||
_input._invalidFormat = false;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
void GLBackend::resetInputStage() {
|
||||
// Reset index buffer
|
||||
_input._indexBufferType = UINT32;
|
||||
|
|
|
@ -9,180 +9,14 @@
|
|||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
#include "GLBackend.h"
|
||||
#include "GLShared.h"
|
||||
#include "GLFramebuffer.h"
|
||||
|
||||
#include <QtGui/QImage>
|
||||
|
||||
#include "GLBackendShared.h"
|
||||
|
||||
using namespace gpu;
|
||||
using namespace gpu::gl;
|
||||
|
||||
GLBackend::GLFramebuffer::GLFramebuffer() {}
|
||||
|
||||
GLBackend::GLFramebuffer::~GLFramebuffer() {
|
||||
if (_fbo != 0) {
|
||||
glDeleteFramebuffers(1, &_fbo);
|
||||
}
|
||||
}
|
||||
|
||||
GLBackend::GLFramebuffer* GLBackend::syncGPUObject(const Framebuffer& framebuffer) {
|
||||
GLFramebuffer* object = Backend::getGPUObject<GLBackend::GLFramebuffer>(framebuffer);
|
||||
|
||||
bool needsUpate { false };
|
||||
if (!object ||
|
||||
framebuffer.getDepthStamp() != object->_depthStamp ||
|
||||
framebuffer.getColorStamps() != object->_colorStamps) {
|
||||
needsUpate = true;
|
||||
}
|
||||
|
||||
// If GPU object already created and in sync
|
||||
if (!needsUpate) {
|
||||
return object;
|
||||
} else if (framebuffer.isEmpty()) {
|
||||
// NO framebuffer definition yet so let's avoid thinking
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// need to have a gpu object?
|
||||
if (!object) {
|
||||
// All is green, assign the gpuobject to the Framebuffer
|
||||
object = new GLFramebuffer();
|
||||
Backend::setGPUObject(framebuffer, object);
|
||||
glGenFramebuffers(1, &object->_fbo);
|
||||
(void)CHECK_GL_ERROR();
|
||||
}
|
||||
|
||||
if (needsUpate) {
|
||||
GLint currentFBO = -1;
|
||||
glGetIntegerv(GL_DRAW_FRAMEBUFFER_BINDING, ¤tFBO);
|
||||
glBindFramebuffer(GL_FRAMEBUFFER, object->_fbo);
|
||||
|
||||
GLTexture* gltexture = nullptr;
|
||||
TexturePointer surface;
|
||||
if (framebuffer.getColorStamps() != object->_colorStamps) {
|
||||
if (framebuffer.hasColor()) {
|
||||
object->_colorBuffers.clear();
|
||||
static const GLenum colorAttachments[] = {
|
||||
GL_COLOR_ATTACHMENT0,
|
||||
GL_COLOR_ATTACHMENT1,
|
||||
GL_COLOR_ATTACHMENT2,
|
||||
GL_COLOR_ATTACHMENT3,
|
||||
GL_COLOR_ATTACHMENT4,
|
||||
GL_COLOR_ATTACHMENT5,
|
||||
GL_COLOR_ATTACHMENT6,
|
||||
GL_COLOR_ATTACHMENT7,
|
||||
GL_COLOR_ATTACHMENT8,
|
||||
GL_COLOR_ATTACHMENT9,
|
||||
GL_COLOR_ATTACHMENT10,
|
||||
GL_COLOR_ATTACHMENT11,
|
||||
GL_COLOR_ATTACHMENT12,
|
||||
GL_COLOR_ATTACHMENT13,
|
||||
GL_COLOR_ATTACHMENT14,
|
||||
GL_COLOR_ATTACHMENT15 };
|
||||
|
||||
int unit = 0;
|
||||
for (auto& b : framebuffer.getRenderBuffers()) {
|
||||
surface = b._texture;
|
||||
if (surface) {
|
||||
gltexture = GLBackend::syncGPUObject(surface, false); // Grab the gltexture and don't transfer
|
||||
} else {
|
||||
gltexture = nullptr;
|
||||
}
|
||||
|
||||
if (gltexture) {
|
||||
glFramebufferTexture2D(GL_FRAMEBUFFER, colorAttachments[unit], GL_TEXTURE_2D, gltexture->_texture, 0);
|
||||
object->_colorBuffers.push_back(colorAttachments[unit]);
|
||||
} else {
|
||||
glFramebufferTexture2D(GL_FRAMEBUFFER, colorAttachments[unit], GL_TEXTURE_2D, 0, 0);
|
||||
}
|
||||
unit++;
|
||||
}
|
||||
}
|
||||
object->_colorStamps = framebuffer.getColorStamps();
|
||||
}
|
||||
|
||||
GLenum attachement = GL_DEPTH_STENCIL_ATTACHMENT;
|
||||
if (!framebuffer.hasStencil()) {
|
||||
attachement = GL_DEPTH_ATTACHMENT;
|
||||
} else if (!framebuffer.hasDepth()) {
|
||||
attachement = GL_STENCIL_ATTACHMENT;
|
||||
}
|
||||
|
||||
if (framebuffer.getDepthStamp() != object->_depthStamp) {
|
||||
auto surface = framebuffer.getDepthStencilBuffer();
|
||||
if (framebuffer.hasDepthStencil() && surface) {
|
||||
gltexture = GLBackend::syncGPUObject(surface, false); // Grab the gltexture and don't transfer
|
||||
}
|
||||
|
||||
if (gltexture) {
|
||||
glFramebufferTexture2D(GL_FRAMEBUFFER, attachement, GL_TEXTURE_2D, gltexture->_texture, 0);
|
||||
} else {
|
||||
glFramebufferTexture2D(GL_FRAMEBUFFER, attachement, GL_TEXTURE_2D, 0, 0);
|
||||
}
|
||||
object->_depthStamp = framebuffer.getDepthStamp();
|
||||
}
|
||||
|
||||
|
||||
// Last but not least, define where we draw
|
||||
if (!object->_colorBuffers.empty()) {
|
||||
glDrawBuffers((GLsizei)object->_colorBuffers.size(), object->_colorBuffers.data());
|
||||
} else {
|
||||
glDrawBuffer( GL_NONE );
|
||||
}
|
||||
|
||||
// Now check for completness
|
||||
GLenum status = glCheckFramebufferStatus(GL_FRAMEBUFFER);
|
||||
|
||||
// restore the current framebuffer
|
||||
if (currentFBO != -1) {
|
||||
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, currentFBO);
|
||||
}
|
||||
|
||||
bool result = false;
|
||||
switch (status) {
|
||||
case GL_FRAMEBUFFER_COMPLETE :
|
||||
// Success !
|
||||
result = true;
|
||||
break;
|
||||
case GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT :
|
||||
qCDebug(gpugllogging) << "GLFramebuffer::syncGPUObject : Framebuffer not valid, GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT.";
|
||||
break;
|
||||
case GL_FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT :
|
||||
qCDebug(gpugllogging) << "GLFramebuffer::syncGPUObject : Framebuffer not valid, GL_FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT.";
|
||||
break;
|
||||
case GL_FRAMEBUFFER_INCOMPLETE_DRAW_BUFFER :
|
||||
qCDebug(gpugllogging) << "GLFramebuffer::syncGPUObject : Framebuffer not valid, GL_FRAMEBUFFER_INCOMPLETE_DRAW_BUFFER.";
|
||||
break;
|
||||
case GL_FRAMEBUFFER_INCOMPLETE_READ_BUFFER :
|
||||
qCDebug(gpugllogging) << "GLFramebuffer::syncGPUObject : Framebuffer not valid, GL_FRAMEBUFFER_INCOMPLETE_READ_BUFFER.";
|
||||
break;
|
||||
case GL_FRAMEBUFFER_UNSUPPORTED :
|
||||
qCDebug(gpugllogging) << "GLFramebuffer::syncGPUObject : Framebuffer not valid, GL_FRAMEBUFFER_UNSUPPORTED.";
|
||||
break;
|
||||
}
|
||||
if (!result && object->_fbo) {
|
||||
glDeleteFramebuffers(1, &object->_fbo);
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
return object;
|
||||
}
|
||||
|
||||
|
||||
|
||||
GLuint GLBackend::getFramebufferID(const FramebufferPointer& framebuffer) {
|
||||
if (!framebuffer) {
|
||||
return 0;
|
||||
}
|
||||
GLFramebuffer* object = GLBackend::syncGPUObject(*framebuffer);
|
||||
if (object) {
|
||||
return object->_fbo;
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
void GLBackend::syncOutputStateCache() {
|
||||
GLint currentFBO;
|
||||
glGetIntegerv(GL_DRAW_FRAMEBUFFER_BINDING, ¤tFBO);
|
||||
|
@ -303,44 +137,6 @@ void GLBackend::do_clearFramebuffer(Batch& batch, size_t paramOffset) {
|
|||
(void) CHECK_GL_ERROR();
|
||||
}
|
||||
|
||||
void GLBackend::do_blit(Batch& batch, size_t paramOffset) {
|
||||
auto srcframebuffer = batch._framebuffers.get(batch._params[paramOffset]._uint);
|
||||
Vec4i srcvp;
|
||||
for (auto i = 0; i < 4; ++i) {
|
||||
srcvp[i] = batch._params[paramOffset + 1 + i]._int;
|
||||
}
|
||||
|
||||
auto dstframebuffer = batch._framebuffers.get(batch._params[paramOffset + 5]._uint);
|
||||
Vec4i dstvp;
|
||||
for (auto i = 0; i < 4; ++i) {
|
||||
dstvp[i] = batch._params[paramOffset + 6 + i]._int;
|
||||
}
|
||||
|
||||
// Assign dest framebuffer if not bound already
|
||||
auto newDrawFBO = getFramebufferID(dstframebuffer);
|
||||
if (_output._drawFBO != newDrawFBO) {
|
||||
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, newDrawFBO);
|
||||
}
|
||||
|
||||
// always bind the read fbo
|
||||
glBindFramebuffer(GL_READ_FRAMEBUFFER, getFramebufferID(srcframebuffer));
|
||||
|
||||
// Blit!
|
||||
glBlitFramebuffer(srcvp.x, srcvp.y, srcvp.z, srcvp.w,
|
||||
dstvp.x, dstvp.y, dstvp.z, dstvp.w,
|
||||
GL_COLOR_BUFFER_BIT, GL_LINEAR);
|
||||
|
||||
// Always clean the read fbo to 0
|
||||
glBindFramebuffer(GL_READ_FRAMEBUFFER, 0);
|
||||
|
||||
// Restore draw fbo if changed
|
||||
if (_output._drawFBO != newDrawFBO) {
|
||||
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, _output._drawFBO);
|
||||
}
|
||||
|
||||
(void) CHECK_GL_ERROR();
|
||||
}
|
||||
|
||||
void GLBackend::downloadFramebuffer(const FramebufferPointer& srcFramebuffer, const Vec4i& region, QImage& destImage) {
|
||||
auto readFBO = getFramebufferID(srcFramebuffer);
|
||||
if (srcFramebuffer && readFBO) {
|
||||
|
|
|
@ -9,54 +9,16 @@
|
|||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
#include "GLBackend.h"
|
||||
#include "GLBackendShared.h"
|
||||
#include "GLShared.h"
|
||||
#include "GLPipeline.h"
|
||||
#include "GLShader.h"
|
||||
#include "GLState.h"
|
||||
#include "GLBuffer.h"
|
||||
#include "GLTexture.h"
|
||||
|
||||
using namespace gpu;
|
||||
using namespace gpu::gl;
|
||||
|
||||
GLBackend::GLPipeline::GLPipeline() :
|
||||
_program(nullptr),
|
||||
_state(nullptr)
|
||||
{}
|
||||
|
||||
GLBackend::GLPipeline::~GLPipeline() {
|
||||
_program = nullptr;
|
||||
_state = nullptr;
|
||||
}
|
||||
|
||||
GLBackend::GLPipeline* GLBackend::syncGPUObject(const Pipeline& pipeline) {
|
||||
GLPipeline* object = Backend::getGPUObject<GLBackend::GLPipeline>(pipeline);
|
||||
|
||||
// If GPU object already created then good
|
||||
if (object) {
|
||||
return object;
|
||||
}
|
||||
|
||||
// No object allocated yet, let's see if it's worth it...
|
||||
ShaderPointer shader = pipeline.getProgram();
|
||||
GLShader* programObject = GLBackend::syncGPUObject((*shader));
|
||||
if (programObject == nullptr) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
StatePointer state = pipeline.getState();
|
||||
GLState* stateObject = GLBackend::syncGPUObject((*state));
|
||||
if (stateObject == nullptr) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// Program and state are valid, we can create the pipeline object
|
||||
if (!object) {
|
||||
object = new GLPipeline();
|
||||
Backend::setGPUObject(pipeline, object);
|
||||
}
|
||||
|
||||
object->_program = programObject;
|
||||
object->_state = stateObject;
|
||||
|
||||
return object;
|
||||
}
|
||||
|
||||
void GLBackend::do_setPipeline(Batch& batch, size_t paramOffset) {
|
||||
PipelinePointer pipeline = batch._pipelines.get(batch._params[paramOffset + 0]._uint);
|
||||
|
||||
|
@ -78,7 +40,7 @@ void GLBackend::do_setPipeline(Batch& batch, size_t paramOffset) {
|
|||
_pipeline._state = nullptr;
|
||||
_pipeline._invalidState = true;
|
||||
} else {
|
||||
auto pipelineObject = syncGPUObject((*pipeline));
|
||||
auto pipelineObject = GLPipeline::sync(*pipeline);
|
||||
if (!pipelineObject) {
|
||||
return;
|
||||
}
|
||||
|
@ -155,14 +117,12 @@ void GLBackend::resetPipelineStage() {
|
|||
glUseProgram(0);
|
||||
}
|
||||
|
||||
|
||||
void GLBackend::releaseUniformBuffer(uint32_t slot) {
|
||||
auto& buf = _uniform._buffers[slot];
|
||||
if (buf) {
|
||||
auto* object = Backend::getGPUObject<GLBackend::GLBuffer>(*buf);
|
||||
auto* object = Backend::getGPUObject<GLBuffer>(*buf);
|
||||
if (object) {
|
||||
glBindBufferBase(GL_UNIFORM_BUFFER, slot, 0); // RELEASE
|
||||
|
||||
(void) CHECK_GL_ERROR();
|
||||
}
|
||||
buf.reset();
|
||||
|
@ -181,9 +141,6 @@ void GLBackend::do_setUniformBuffer(Batch& batch, size_t paramOffset) {
|
|||
GLintptr rangeStart = batch._params[paramOffset + 1]._uint;
|
||||
GLsizeiptr rangeSize = batch._params[paramOffset + 0]._uint;
|
||||
|
||||
|
||||
|
||||
|
||||
if (!uniformBuffer) {
|
||||
releaseUniformBuffer(slot);
|
||||
return;
|
||||
|
@ -195,7 +152,7 @@ void GLBackend::do_setUniformBuffer(Batch& batch, size_t paramOffset) {
|
|||
}
|
||||
|
||||
// Sync BufferObject
|
||||
auto* object = GLBackend::syncGPUObject(*uniformBuffer);
|
||||
auto* object = syncGPUObject(*uniformBuffer);
|
||||
if (object) {
|
||||
glBindBufferRange(GL_UNIFORM_BUFFER, slot, object->_buffer, rangeStart, rangeSize);
|
||||
|
||||
|
@ -210,19 +167,17 @@ void GLBackend::do_setUniformBuffer(Batch& batch, size_t paramOffset) {
|
|||
void GLBackend::releaseResourceTexture(uint32_t slot) {
|
||||
auto& tex = _resource._textures[slot];
|
||||
if (tex) {
|
||||
auto* object = Backend::getGPUObject<GLBackend::GLTexture>(*tex);
|
||||
auto* object = Backend::getGPUObject<GLTexture>(*tex);
|
||||
if (object) {
|
||||
GLuint target = object->_target;
|
||||
glActiveTexture(GL_TEXTURE0 + slot);
|
||||
glBindTexture(target, 0); // RELEASE
|
||||
|
||||
(void) CHECK_GL_ERROR();
|
||||
}
|
||||
tex.reset();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void GLBackend::resetResourceStage() {
|
||||
for (uint32_t i = 0; i < _resource._textures.size(); i++) {
|
||||
releaseResourceTexture(i);
|
||||
|
@ -246,7 +201,7 @@ void GLBackend::do_setResourceTexture(Batch& batch, size_t paramOffset) {
|
|||
_stats._RSNumTextureBounded++;
|
||||
|
||||
// Always make sure the GLObject is in sync
|
||||
GLTexture* object = GLBackend::syncGPUObject(resourceTexture);
|
||||
GLTexture* object = syncGPUObject(resourceTexture);
|
||||
if (object) {
|
||||
GLuint to = object->_texture;
|
||||
GLuint target = object->_target;
|
||||
|
|
|
@ -9,58 +9,11 @@
|
|||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
#include "GLBackend.h"
|
||||
#include "GLBackendShared.h"
|
||||
#include "GLQuery.h"
|
||||
|
||||
using namespace gpu;
|
||||
using namespace gpu::gl;
|
||||
|
||||
GLBackend::GLQuery::GLQuery() {}
|
||||
|
||||
GLBackend::GLQuery::~GLQuery() {
|
||||
if (_qo != 0) {
|
||||
glDeleteQueries(1, &_qo);
|
||||
}
|
||||
}
|
||||
|
||||
GLBackend::GLQuery* GLBackend::syncGPUObject(const Query& query) {
|
||||
GLQuery* object = Backend::getGPUObject<GLBackend::GLQuery>(query);
|
||||
|
||||
// If GPU object already created and in sync
|
||||
if (object) {
|
||||
return object;
|
||||
}
|
||||
|
||||
// need to have a gpu object?
|
||||
if (!object) {
|
||||
GLuint qo;
|
||||
glGenQueries(1, &qo);
|
||||
(void) CHECK_GL_ERROR();
|
||||
GLuint64 result = -1;
|
||||
|
||||
// All is green, assign the gpuobject to the Query
|
||||
object = new GLQuery();
|
||||
object->_qo = qo;
|
||||
object->_result = result;
|
||||
Backend::setGPUObject(query, object);
|
||||
}
|
||||
|
||||
return object;
|
||||
}
|
||||
|
||||
|
||||
|
||||
GLuint GLBackend::getQueryID(const QueryPointer& query) {
|
||||
if (!query) {
|
||||
return 0;
|
||||
}
|
||||
GLQuery* object = GLBackend::syncGPUObject(*query);
|
||||
if (object) {
|
||||
return object->_qo;
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
void GLBackend::do_beginQuery(Batch& batch, size_t paramOffset) {
|
||||
auto query = batch._queries.get(batch._params[paramOffset]._uint);
|
||||
GLQuery* glquery = syncGPUObject(*query);
|
||||
|
@ -94,4 +47,3 @@ void GLBackend::do_getQuery(Batch& batch, size_t paramOffset) {
|
|||
|
||||
void GLBackend::resetQueryStage() {
|
||||
}
|
||||
|
||||
|
|
|
@ -1,68 +0,0 @@
|
|||
//
|
||||
// GLBackendShared.h
|
||||
// libraries/gpu/src/gpu
|
||||
//
|
||||
// Created by Sam Gateau on 1/22/2014.
|
||||
// Copyright 2014 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
#ifndef hifi_gpu_GLBackend_Shared_h
|
||||
#define hifi_gpu_GLBackend_Shared_h
|
||||
|
||||
#include <gl/Config.h>
|
||||
#include <gpu/Forward.h>
|
||||
#include <gpu/Format.h>
|
||||
|
||||
namespace gpu { namespace gl {
|
||||
|
||||
static const GLenum _primitiveToGLmode[gpu::NUM_PRIMITIVES] = {
|
||||
GL_POINTS,
|
||||
GL_LINES,
|
||||
GL_LINE_STRIP,
|
||||
GL_TRIANGLES,
|
||||
GL_TRIANGLE_STRIP,
|
||||
GL_TRIANGLE_FAN,
|
||||
};
|
||||
|
||||
static const GLenum _elementTypeToGLType[gpu::NUM_TYPES] = {
|
||||
GL_FLOAT,
|
||||
GL_INT,
|
||||
GL_UNSIGNED_INT,
|
||||
GL_HALF_FLOAT,
|
||||
GL_SHORT,
|
||||
GL_UNSIGNED_SHORT,
|
||||
GL_BYTE,
|
||||
GL_UNSIGNED_BYTE,
|
||||
// Normalized values
|
||||
GL_INT,
|
||||
GL_UNSIGNED_INT,
|
||||
GL_SHORT,
|
||||
GL_UNSIGNED_SHORT,
|
||||
GL_BYTE,
|
||||
GL_UNSIGNED_BYTE
|
||||
};
|
||||
|
||||
class GLTexelFormat {
|
||||
public:
|
||||
GLenum internalFormat;
|
||||
GLenum format;
|
||||
GLenum type;
|
||||
|
||||
static GLTexelFormat evalGLTexelFormat(const gpu::Element& dstFormat) {
|
||||
return evalGLTexelFormat(dstFormat, dstFormat);
|
||||
}
|
||||
static GLTexelFormat evalGLTexelFormatInternal(const gpu::Element& dstFormat);
|
||||
|
||||
static GLTexelFormat evalGLTexelFormat(const gpu::Element& dstFormat, const gpu::Element& srcFormat);
|
||||
};
|
||||
|
||||
bool checkGLError(const char* name = nullptr);
|
||||
bool checkGLErrorDebug(const char* name = nullptr);
|
||||
|
||||
} }
|
||||
|
||||
#define CHECK_GL_ERROR() gpu::gl::checkGLErrorDebug(__FUNCTION__)
|
||||
|
||||
#endif
|
|
@ -9,238 +9,11 @@
|
|||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
#include "GLBackend.h"
|
||||
#include "GLBackendShared.h"
|
||||
#include "GLState.h"
|
||||
|
||||
using namespace gpu;
|
||||
using namespace gpu::gl;
|
||||
|
||||
GLBackend::GLState::GLState()
|
||||
{}
|
||||
|
||||
GLBackend::GLState::~GLState() {
|
||||
}
|
||||
|
||||
|
||||
typedef GLBackend::GLState::Command Command;
|
||||
typedef GLBackend::GLState::CommandPointer CommandPointer;
|
||||
typedef GLBackend::GLState::Command1<uint32> Command1U;
|
||||
typedef GLBackend::GLState::Command1<int32> Command1I;
|
||||
typedef GLBackend::GLState::Command1<bool> Command1B;
|
||||
typedef GLBackend::GLState::Command1<Vec2> CommandDepthBias;
|
||||
typedef GLBackend::GLState::Command1<State::DepthTest> CommandDepthTest;
|
||||
typedef GLBackend::GLState::Command3<State::StencilActivation, State::StencilTest, State::StencilTest> CommandStencil;
|
||||
typedef GLBackend::GLState::Command1<State::BlendFunction> CommandBlend;
|
||||
|
||||
const GLBackend::GLState::Commands makeResetStateCommands();
|
||||
const GLBackend::GLState::Commands GLBackend::GLState::_resetStateCommands = makeResetStateCommands();
|
||||
|
||||
|
||||
// NOTE: This must stay in sync with the ordering of the State::Field enum
|
||||
const GLBackend::GLState::Commands makeResetStateCommands() {
|
||||
// Since State::DEFAULT is a static defined in another .cpp the initialisation order is random
|
||||
// and we have a 50/50 chance that State::DEFAULT is not yet initialized.
|
||||
// Since State::DEFAULT = State::Data() it is much easier to not use the actual State::DEFAULT
|
||||
// but another State::Data object with a default initialization.
|
||||
const State::Data DEFAULT = State::Data();
|
||||
|
||||
auto depthBiasCommand = std::make_shared<CommandDepthBias>(&GLBackend::do_setStateDepthBias,
|
||||
Vec2(DEFAULT.depthBias, DEFAULT.depthBiasSlopeScale));
|
||||
auto stencilCommand = std::make_shared<CommandStencil>(&GLBackend::do_setStateStencil, DEFAULT.stencilActivation,
|
||||
DEFAULT.stencilTestFront, DEFAULT.stencilTestBack);
|
||||
|
||||
// The state commands to reset to default,
|
||||
// WARNING depending on the order of the State::Field enum
|
||||
return {
|
||||
std::make_shared<Command1I>(&GLBackend::do_setStateFillMode, DEFAULT.fillMode),
|
||||
std::make_shared<Command1I>(&GLBackend::do_setStateCullMode, DEFAULT.cullMode),
|
||||
std::make_shared<Command1B>(&GLBackend::do_setStateFrontFaceClockwise, DEFAULT.frontFaceClockwise),
|
||||
std::make_shared<Command1B>(&GLBackend::do_setStateDepthClampEnable, DEFAULT.depthClampEnable),
|
||||
std::make_shared<Command1B>(&GLBackend::do_setStateScissorEnable, DEFAULT.scissorEnable),
|
||||
std::make_shared<Command1B>(&GLBackend::do_setStateMultisampleEnable, DEFAULT.multisampleEnable),
|
||||
std::make_shared<Command1B>(&GLBackend::do_setStateAntialiasedLineEnable, DEFAULT.antialisedLineEnable),
|
||||
|
||||
// Depth bias has 2 fields in State but really one call in GLBackend
|
||||
CommandPointer(depthBiasCommand),
|
||||
CommandPointer(depthBiasCommand),
|
||||
|
||||
std::make_shared<CommandDepthTest>(&GLBackend::do_setStateDepthTest, DEFAULT.depthTest),
|
||||
|
||||
// Depth bias has 3 fields in State but really one call in GLBackend
|
||||
CommandPointer(stencilCommand),
|
||||
CommandPointer(stencilCommand),
|
||||
CommandPointer(stencilCommand),
|
||||
|
||||
std::make_shared<Command1U>(&GLBackend::do_setStateSampleMask, DEFAULT.sampleMask),
|
||||
|
||||
std::make_shared<Command1B>(&GLBackend::do_setStateAlphaToCoverageEnable, DEFAULT.alphaToCoverageEnable),
|
||||
|
||||
std::make_shared<CommandBlend>(&GLBackend::do_setStateBlend, DEFAULT.blendFunction),
|
||||
|
||||
std::make_shared<Command1U>(&GLBackend::do_setStateColorWriteMask, DEFAULT.colorWriteMask)
|
||||
};
|
||||
}
|
||||
|
||||
void generateFillMode(GLBackend::GLState::Commands& commands, State::FillMode fillMode) {
|
||||
commands.push_back(std::make_shared<Command1I>(&GLBackend::do_setStateFillMode, int32(fillMode)));
|
||||
}
|
||||
|
||||
void generateCullMode(GLBackend::GLState::Commands& commands, State::CullMode cullMode) {
|
||||
commands.push_back(std::make_shared<Command1I>(&GLBackend::do_setStateCullMode, int32(cullMode)));
|
||||
}
|
||||
|
||||
void generateFrontFaceClockwise(GLBackend::GLState::Commands& commands, bool isClockwise) {
|
||||
commands.push_back(std::make_shared<Command1B>(&GLBackend::do_setStateFrontFaceClockwise, isClockwise));
|
||||
}
|
||||
|
||||
void generateDepthClampEnable(GLBackend::GLState::Commands& commands, bool enable) {
|
||||
commands.push_back(std::make_shared<Command1B>(&GLBackend::do_setStateDepthClampEnable, enable));
|
||||
}
|
||||
|
||||
void generateScissorEnable(GLBackend::GLState::Commands& commands, bool enable) {
|
||||
commands.push_back(std::make_shared<Command1B>(&GLBackend::do_setStateScissorEnable, enable));
|
||||
}
|
||||
|
||||
void generateMultisampleEnable(GLBackend::GLState::Commands& commands, bool enable) {
|
||||
commands.push_back(std::make_shared<Command1B>(&GLBackend::do_setStateMultisampleEnable, enable));
|
||||
}
|
||||
|
||||
void generateAntialiasedLineEnable(GLBackend::GLState::Commands& commands, bool enable) {
|
||||
commands.push_back(std::make_shared<Command1B>(&GLBackend::do_setStateAntialiasedLineEnable, enable));
|
||||
}
|
||||
|
||||
void generateDepthBias(GLBackend::GLState::Commands& commands, const State& state) {
|
||||
commands.push_back(std::make_shared<CommandDepthBias>(&GLBackend::do_setStateDepthBias, Vec2(state.getDepthBias(), state.getDepthBiasSlopeScale())));
|
||||
}
|
||||
|
||||
void generateDepthTest(GLBackend::GLState::Commands& commands, const State::DepthTest& test) {
|
||||
commands.push_back(std::make_shared<CommandDepthTest>(&GLBackend::do_setStateDepthTest, int32(test.getRaw())));
|
||||
}
|
||||
|
||||
void generateStencil(GLBackend::GLState::Commands& commands, const State& state) {
|
||||
commands.push_back(std::make_shared<CommandStencil>(&GLBackend::do_setStateStencil, state.getStencilActivation(), state.getStencilTestFront(), state.getStencilTestBack()));
|
||||
}
|
||||
|
||||
void generateAlphaToCoverageEnable(GLBackend::GLState::Commands& commands, bool enable) {
|
||||
commands.push_back(std::make_shared<Command1B>(&GLBackend::do_setStateAlphaToCoverageEnable, enable));
|
||||
}
|
||||
|
||||
void generateSampleMask(GLBackend::GLState::Commands& commands, uint32 mask) {
|
||||
commands.push_back(std::make_shared<Command1U>(&GLBackend::do_setStateSampleMask, mask));
|
||||
}
|
||||
|
||||
void generateBlend(GLBackend::GLState::Commands& commands, const State& state) {
|
||||
commands.push_back(std::make_shared<CommandBlend>(&GLBackend::do_setStateBlend, state.getBlendFunction()));
|
||||
}
|
||||
|
||||
void generateColorWriteMask(GLBackend::GLState::Commands& commands, uint32 mask) {
|
||||
commands.push_back(std::make_shared<Command1U>(&GLBackend::do_setStateColorWriteMask, mask));
|
||||
}
|
||||
|
||||
GLBackend::GLState* GLBackend::syncGPUObject(const State& state) {
|
||||
GLState* object = Backend::getGPUObject<GLBackend::GLState>(state);
|
||||
|
||||
// If GPU object already created then good
|
||||
if (object) {
|
||||
return object;
|
||||
}
|
||||
|
||||
// Else allocate and create the GLState
|
||||
if (!object) {
|
||||
object = new GLState();
|
||||
Backend::setGPUObject(state, object);
|
||||
}
|
||||
|
||||
// here, we need to regenerate something so let's do it all
|
||||
object->_commands.clear();
|
||||
object->_stamp = state.getStamp();
|
||||
object->_signature = state.getSignature();
|
||||
|
||||
bool depthBias = false;
|
||||
bool stencilState = false;
|
||||
|
||||
// go thorugh the list of state fields in the State and record the corresponding gl command
|
||||
for (int i = 0; i < State::NUM_FIELDS; i++) {
|
||||
if (state.getSignature()[i]) {
|
||||
switch(i) {
|
||||
case State::FILL_MODE: {
|
||||
generateFillMode(object->_commands, state.getFillMode());
|
||||
break;
|
||||
}
|
||||
case State::CULL_MODE: {
|
||||
generateCullMode(object->_commands, state.getCullMode());
|
||||
break;
|
||||
}
|
||||
case State::DEPTH_BIAS:
|
||||
case State::DEPTH_BIAS_SLOPE_SCALE: {
|
||||
depthBias = true;
|
||||
break;
|
||||
}
|
||||
case State::FRONT_FACE_CLOCKWISE: {
|
||||
generateFrontFaceClockwise(object->_commands, state.isFrontFaceClockwise());
|
||||
break;
|
||||
}
|
||||
case State::DEPTH_CLAMP_ENABLE: {
|
||||
generateDepthClampEnable(object->_commands, state.isDepthClampEnable());
|
||||
break;
|
||||
}
|
||||
case State::SCISSOR_ENABLE: {
|
||||
generateScissorEnable(object->_commands, state.isScissorEnable());
|
||||
break;
|
||||
}
|
||||
case State::MULTISAMPLE_ENABLE: {
|
||||
generateMultisampleEnable(object->_commands, state.isMultisampleEnable());
|
||||
break;
|
||||
}
|
||||
case State::ANTIALISED_LINE_ENABLE: {
|
||||
generateAntialiasedLineEnable(object->_commands, state.isAntialiasedLineEnable());
|
||||
break;
|
||||
}
|
||||
case State::DEPTH_TEST: {
|
||||
generateDepthTest(object->_commands, state.getDepthTest());
|
||||
break;
|
||||
}
|
||||
|
||||
case State::STENCIL_ACTIVATION:
|
||||
case State::STENCIL_TEST_FRONT:
|
||||
case State::STENCIL_TEST_BACK: {
|
||||
stencilState = true;
|
||||
break;
|
||||
}
|
||||
|
||||
case State::SAMPLE_MASK: {
|
||||
generateSampleMask(object->_commands, state.getSampleMask());
|
||||
break;
|
||||
}
|
||||
case State::ALPHA_TO_COVERAGE_ENABLE: {
|
||||
generateAlphaToCoverageEnable(object->_commands, state.isAlphaToCoverageEnabled());
|
||||
break;
|
||||
}
|
||||
|
||||
case State::BLEND_FUNCTION: {
|
||||
generateBlend(object->_commands, state);
|
||||
break;
|
||||
}
|
||||
|
||||
case State::COLOR_WRITE_MASK: {
|
||||
generateColorWriteMask(object->_commands, state.getColorWriteMask());
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (depthBias) {
|
||||
generateDepthBias(object->_commands, state);
|
||||
}
|
||||
|
||||
if (stencilState) {
|
||||
generateStencil(object->_commands, state);
|
||||
}
|
||||
|
||||
return object;
|
||||
}
|
||||
|
||||
|
||||
void GLBackend::resetPipelineState(State::Signature nextSignature) {
|
||||
auto currentNotSignature = ~_pipeline._stateSignatureCache;
|
||||
auto nextNotSignature = ~nextSignature;
|
||||
|
@ -255,230 +28,6 @@ void GLBackend::resetPipelineState(State::Signature nextSignature) {
|
|||
}
|
||||
}
|
||||
|
||||
ComparisonFunction comparisonFuncFromGL(GLenum func) {
|
||||
if (func == GL_NEVER) {
|
||||
return NEVER;
|
||||
} else if (func == GL_LESS) {
|
||||
return LESS;
|
||||
} else if (func == GL_EQUAL) {
|
||||
return EQUAL;
|
||||
} else if (func == GL_LEQUAL) {
|
||||
return LESS_EQUAL;
|
||||
} else if (func == GL_GREATER) {
|
||||
return GREATER;
|
||||
} else if (func == GL_NOTEQUAL) {
|
||||
return NOT_EQUAL;
|
||||
} else if (func == GL_GEQUAL) {
|
||||
return GREATER_EQUAL;
|
||||
} else if (func == GL_ALWAYS) {
|
||||
return ALWAYS;
|
||||
}
|
||||
|
||||
return ALWAYS;
|
||||
}
|
||||
|
||||
State::StencilOp stencilOpFromGL(GLenum stencilOp) {
|
||||
if (stencilOp == GL_KEEP) {
|
||||
return State::STENCIL_OP_KEEP;
|
||||
} else if (stencilOp == GL_ZERO) {
|
||||
return State::STENCIL_OP_ZERO;
|
||||
} else if (stencilOp == GL_REPLACE) {
|
||||
return State::STENCIL_OP_REPLACE;
|
||||
} else if (stencilOp == GL_INCR_WRAP) {
|
||||
return State::STENCIL_OP_INCR_SAT;
|
||||
} else if (stencilOp == GL_DECR_WRAP) {
|
||||
return State::STENCIL_OP_DECR_SAT;
|
||||
} else if (stencilOp == GL_INVERT) {
|
||||
return State::STENCIL_OP_INVERT;
|
||||
} else if (stencilOp == GL_INCR) {
|
||||
return State::STENCIL_OP_INCR;
|
||||
} else if (stencilOp == GL_DECR) {
|
||||
return State::STENCIL_OP_DECR;
|
||||
}
|
||||
|
||||
return State::STENCIL_OP_KEEP;
|
||||
}
|
||||
|
||||
State::BlendOp blendOpFromGL(GLenum blendOp) {
|
||||
if (blendOp == GL_FUNC_ADD) {
|
||||
return State::BLEND_OP_ADD;
|
||||
} else if (blendOp == GL_FUNC_SUBTRACT) {
|
||||
return State::BLEND_OP_SUBTRACT;
|
||||
} else if (blendOp == GL_FUNC_REVERSE_SUBTRACT) {
|
||||
return State::BLEND_OP_REV_SUBTRACT;
|
||||
} else if (blendOp == GL_MIN) {
|
||||
return State::BLEND_OP_MIN;
|
||||
} else if (blendOp == GL_MAX) {
|
||||
return State::BLEND_OP_MAX;
|
||||
}
|
||||
|
||||
return State::BLEND_OP_ADD;
|
||||
}
|
||||
|
||||
State::BlendArg blendArgFromGL(GLenum blendArg) {
|
||||
if (blendArg == GL_ZERO) {
|
||||
return State::ZERO;
|
||||
} else if (blendArg == GL_ONE) {
|
||||
return State::ONE;
|
||||
} else if (blendArg == GL_SRC_COLOR) {
|
||||
return State::SRC_COLOR;
|
||||
} else if (blendArg == GL_ONE_MINUS_SRC_COLOR) {
|
||||
return State::INV_SRC_COLOR;
|
||||
} else if (blendArg == GL_DST_COLOR) {
|
||||
return State::DEST_COLOR;
|
||||
} else if (blendArg == GL_ONE_MINUS_DST_COLOR) {
|
||||
return State::INV_DEST_COLOR;
|
||||
} else if (blendArg == GL_SRC_ALPHA) {
|
||||
return State::SRC_ALPHA;
|
||||
} else if (blendArg == GL_ONE_MINUS_SRC_ALPHA) {
|
||||
return State::INV_SRC_ALPHA;
|
||||
} else if (blendArg == GL_DST_ALPHA) {
|
||||
return State::DEST_ALPHA;
|
||||
} else if (blendArg == GL_ONE_MINUS_DST_ALPHA) {
|
||||
return State::INV_DEST_ALPHA;
|
||||
} else if (blendArg == GL_CONSTANT_COLOR) {
|
||||
return State::FACTOR_COLOR;
|
||||
} else if (blendArg == GL_ONE_MINUS_CONSTANT_COLOR) {
|
||||
return State::INV_FACTOR_COLOR;
|
||||
} else if (blendArg == GL_CONSTANT_ALPHA) {
|
||||
return State::FACTOR_ALPHA;
|
||||
} else if (blendArg == GL_ONE_MINUS_CONSTANT_ALPHA) {
|
||||
return State::INV_FACTOR_ALPHA;
|
||||
}
|
||||
|
||||
return State::ONE;
|
||||
}
|
||||
|
||||
void GLBackend::getCurrentGLState(State::Data& state) {
|
||||
{
|
||||
GLint modes[2];
|
||||
glGetIntegerv(GL_POLYGON_MODE, modes);
|
||||
if (modes[0] == GL_FILL) {
|
||||
state.fillMode = State::FILL_FACE;
|
||||
} else {
|
||||
if (modes[0] == GL_LINE) {
|
||||
state.fillMode = State::FILL_LINE;
|
||||
} else {
|
||||
state.fillMode = State::FILL_POINT;
|
||||
}
|
||||
}
|
||||
}
|
||||
{
|
||||
if (glIsEnabled(GL_CULL_FACE)) {
|
||||
GLint mode;
|
||||
glGetIntegerv(GL_CULL_FACE_MODE, &mode);
|
||||
state.cullMode = (mode == GL_FRONT ? State::CULL_FRONT : State::CULL_BACK);
|
||||
} else {
|
||||
state.cullMode = State::CULL_NONE;
|
||||
}
|
||||
}
|
||||
{
|
||||
GLint winding;
|
||||
glGetIntegerv(GL_FRONT_FACE, &winding);
|
||||
state.frontFaceClockwise = (winding == GL_CW);
|
||||
state.depthClampEnable = glIsEnabled(GL_DEPTH_CLAMP);
|
||||
state.scissorEnable = glIsEnabled(GL_SCISSOR_TEST);
|
||||
state.multisampleEnable = glIsEnabled(GL_MULTISAMPLE);
|
||||
state.antialisedLineEnable = glIsEnabled(GL_LINE_SMOOTH);
|
||||
}
|
||||
{
|
||||
if (glIsEnabled(GL_POLYGON_OFFSET_FILL)) {
|
||||
glGetFloatv(GL_POLYGON_OFFSET_FACTOR, &state.depthBiasSlopeScale);
|
||||
glGetFloatv(GL_POLYGON_OFFSET_UNITS, &state.depthBias);
|
||||
}
|
||||
}
|
||||
{
|
||||
GLboolean isEnabled = glIsEnabled(GL_DEPTH_TEST);
|
||||
GLboolean writeMask;
|
||||
glGetBooleanv(GL_DEPTH_WRITEMASK, &writeMask);
|
||||
GLint func;
|
||||
glGetIntegerv(GL_DEPTH_FUNC, &func);
|
||||
|
||||
state.depthTest = State::DepthTest(isEnabled, writeMask, comparisonFuncFromGL(func));
|
||||
}
|
||||
{
|
||||
GLboolean isEnabled = glIsEnabled(GL_STENCIL_TEST);
|
||||
|
||||
GLint frontWriteMask;
|
||||
GLint frontReadMask;
|
||||
GLint frontRef;
|
||||
GLint frontFail;
|
||||
GLint frontDepthFail;
|
||||
GLint frontPass;
|
||||
GLint frontFunc;
|
||||
glGetIntegerv(GL_STENCIL_WRITEMASK, &frontWriteMask);
|
||||
glGetIntegerv(GL_STENCIL_VALUE_MASK, &frontReadMask);
|
||||
glGetIntegerv(GL_STENCIL_REF, &frontRef);
|
||||
glGetIntegerv(GL_STENCIL_FAIL, &frontFail);
|
||||
glGetIntegerv(GL_STENCIL_PASS_DEPTH_FAIL, &frontDepthFail);
|
||||
glGetIntegerv(GL_STENCIL_PASS_DEPTH_PASS, &frontPass);
|
||||
glGetIntegerv(GL_STENCIL_FUNC, &frontFunc);
|
||||
|
||||
GLint backWriteMask;
|
||||
GLint backReadMask;
|
||||
GLint backRef;
|
||||
GLint backFail;
|
||||
GLint backDepthFail;
|
||||
GLint backPass;
|
||||
GLint backFunc;
|
||||
glGetIntegerv(GL_STENCIL_BACK_WRITEMASK, &backWriteMask);
|
||||
glGetIntegerv(GL_STENCIL_BACK_VALUE_MASK, &backReadMask);
|
||||
glGetIntegerv(GL_STENCIL_BACK_REF, &backRef);
|
||||
glGetIntegerv(GL_STENCIL_BACK_FAIL, &backFail);
|
||||
glGetIntegerv(GL_STENCIL_BACK_PASS_DEPTH_FAIL, &backDepthFail);
|
||||
glGetIntegerv(GL_STENCIL_BACK_PASS_DEPTH_PASS, &backPass);
|
||||
glGetIntegerv(GL_STENCIL_BACK_FUNC, &backFunc);
|
||||
|
||||
state.stencilActivation = State::StencilActivation(isEnabled, frontWriteMask, backWriteMask);
|
||||
state.stencilTestFront = State::StencilTest(frontRef, frontReadMask, comparisonFuncFromGL(frontFunc), stencilOpFromGL(frontFail), stencilOpFromGL(frontDepthFail), stencilOpFromGL(frontPass));
|
||||
state.stencilTestBack = State::StencilTest(backRef, backReadMask, comparisonFuncFromGL(backFunc), stencilOpFromGL(backFail), stencilOpFromGL(backDepthFail), stencilOpFromGL(backPass));
|
||||
}
|
||||
{
|
||||
GLint mask = 0xFFFFFFFF;
|
||||
|
||||
#ifdef GPU_PROFILE_CORE
|
||||
if (glIsEnabled(GL_SAMPLE_MASK)) {
|
||||
glGetIntegerv(GL_SAMPLE_MASK, &mask);
|
||||
state.sampleMask = mask;
|
||||
}
|
||||
#endif
|
||||
state.sampleMask = mask;
|
||||
}
|
||||
{
|
||||
state.alphaToCoverageEnable = glIsEnabled(GL_SAMPLE_ALPHA_TO_COVERAGE);
|
||||
}
|
||||
{
|
||||
GLboolean isEnabled = glIsEnabled(GL_BLEND);
|
||||
GLint srcRGB;
|
||||
GLint srcA;
|
||||
GLint dstRGB;
|
||||
GLint dstA;
|
||||
glGetIntegerv(GL_BLEND_SRC_RGB, &srcRGB);
|
||||
glGetIntegerv(GL_BLEND_SRC_ALPHA, &srcA);
|
||||
glGetIntegerv(GL_BLEND_DST_RGB, &dstRGB);
|
||||
glGetIntegerv(GL_BLEND_DST_ALPHA, &dstA);
|
||||
|
||||
GLint opRGB;
|
||||
GLint opA;
|
||||
glGetIntegerv(GL_BLEND_EQUATION_RGB, &opRGB);
|
||||
glGetIntegerv(GL_BLEND_EQUATION_ALPHA, &opA);
|
||||
|
||||
state.blendFunction = State::BlendFunction(isEnabled,
|
||||
blendArgFromGL(srcRGB), blendOpFromGL(opRGB), blendArgFromGL(dstRGB),
|
||||
blendArgFromGL(srcA), blendOpFromGL(opA), blendArgFromGL(dstA));
|
||||
}
|
||||
{
|
||||
GLboolean mask[4];
|
||||
glGetBooleanv(GL_COLOR_WRITEMASK, mask);
|
||||
state.colorWriteMask = (mask[0] ? State::WRITE_RED : 0)
|
||||
| (mask[1] ? State::WRITE_GREEN : 0)
|
||||
| (mask[2] ? State::WRITE_BLUE : 0)
|
||||
| (mask[3] ? State::WRITE_ALPHA : 0);
|
||||
}
|
||||
|
||||
(void) CHECK_GL_ERROR();
|
||||
}
|
||||
|
||||
void GLBackend::syncPipelineStateCache() {
|
||||
State::Data state;
|
||||
|
||||
|
@ -500,21 +49,12 @@ void GLBackend::syncPipelineStateCache() {
|
|||
_pipeline._stateSignatureCache = signature;
|
||||
}
|
||||
|
||||
static GLenum GL_COMPARISON_FUNCTIONS[] = {
|
||||
GL_NEVER,
|
||||
GL_LESS,
|
||||
GL_EQUAL,
|
||||
GL_LEQUAL,
|
||||
GL_GREATER,
|
||||
GL_NOTEQUAL,
|
||||
GL_GEQUAL,
|
||||
GL_ALWAYS };
|
||||
|
||||
void GLBackend::do_setStateFillMode(int32 mode) {
|
||||
if (_pipeline._stateCache.fillMode != mode) {
|
||||
static GLenum GL_FILL_MODES[] = { GL_POINT, GL_LINE, GL_FILL };
|
||||
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL_MODES[mode]);
|
||||
(void) CHECK_GL_ERROR();
|
||||
(void)CHECK_GL_ERROR();
|
||||
|
||||
_pipeline._stateCache.fillMode = State::FillMode(mode);
|
||||
}
|
||||
|
@ -530,7 +70,7 @@ void GLBackend::do_setStateCullMode(int32 mode) {
|
|||
glEnable(GL_CULL_FACE);
|
||||
glCullFace(GL_CULL_MODES[mode]);
|
||||
}
|
||||
(void) CHECK_GL_ERROR();
|
||||
(void)CHECK_GL_ERROR();
|
||||
|
||||
_pipeline._stateCache.cullMode = State::CullMode(mode);
|
||||
}
|
||||
|
@ -540,8 +80,8 @@ void GLBackend::do_setStateFrontFaceClockwise(bool isClockwise) {
|
|||
if (_pipeline._stateCache.frontFaceClockwise != isClockwise) {
|
||||
static GLenum GL_FRONT_FACES[] = { GL_CCW, GL_CW };
|
||||
glFrontFace(GL_FRONT_FACES[isClockwise]);
|
||||
(void) CHECK_GL_ERROR();
|
||||
|
||||
(void)CHECK_GL_ERROR();
|
||||
|
||||
_pipeline._stateCache.frontFaceClockwise = isClockwise;
|
||||
}
|
||||
}
|
||||
|
@ -553,7 +93,7 @@ void GLBackend::do_setStateDepthClampEnable(bool enable) {
|
|||
} else {
|
||||
glDisable(GL_DEPTH_CLAMP);
|
||||
}
|
||||
(void) CHECK_GL_ERROR();
|
||||
(void)CHECK_GL_ERROR();
|
||||
|
||||
_pipeline._stateCache.depthClampEnable = enable;
|
||||
}
|
||||
|
@ -566,7 +106,7 @@ void GLBackend::do_setStateScissorEnable(bool enable) {
|
|||
} else {
|
||||
glDisable(GL_SCISSOR_TEST);
|
||||
}
|
||||
(void) CHECK_GL_ERROR();
|
||||
(void)CHECK_GL_ERROR();
|
||||
|
||||
_pipeline._stateCache.scissorEnable = enable;
|
||||
}
|
||||
|
@ -579,7 +119,7 @@ void GLBackend::do_setStateMultisampleEnable(bool enable) {
|
|||
} else {
|
||||
glDisable(GL_MULTISAMPLE);
|
||||
}
|
||||
(void) CHECK_GL_ERROR();
|
||||
(void)CHECK_GL_ERROR();
|
||||
|
||||
_pipeline._stateCache.multisampleEnable = enable;
|
||||
}
|
||||
|
@ -592,14 +132,14 @@ void GLBackend::do_setStateAntialiasedLineEnable(bool enable) {
|
|||
} else {
|
||||
glDisable(GL_LINE_SMOOTH);
|
||||
}
|
||||
(void) CHECK_GL_ERROR();
|
||||
(void)CHECK_GL_ERROR();
|
||||
|
||||
_pipeline._stateCache.antialisedLineEnable = enable;
|
||||
}
|
||||
}
|
||||
|
||||
void GLBackend::do_setStateDepthBias(Vec2 bias) {
|
||||
if ( (bias.x != _pipeline._stateCache.depthBias) || (bias.y != _pipeline._stateCache.depthBiasSlopeScale)) {
|
||||
if ((bias.x != _pipeline._stateCache.depthBias) || (bias.y != _pipeline._stateCache.depthBiasSlopeScale)) {
|
||||
if ((bias.x != 0.0f) || (bias.y != 0.0f)) {
|
||||
glEnable(GL_POLYGON_OFFSET_FILL);
|
||||
glEnable(GL_POLYGON_OFFSET_LINE);
|
||||
|
@ -610,10 +150,10 @@ void GLBackend::do_setStateDepthBias(Vec2 bias) {
|
|||
glDisable(GL_POLYGON_OFFSET_LINE);
|
||||
glDisable(GL_POLYGON_OFFSET_POINT);
|
||||
}
|
||||
(void) CHECK_GL_ERROR();
|
||||
(void)CHECK_GL_ERROR();
|
||||
|
||||
_pipeline._stateCache.depthBias = bias.x;
|
||||
_pipeline._stateCache.depthBiasSlopeScale = bias.y;
|
||||
_pipeline._stateCache.depthBias = bias.x;
|
||||
_pipeline._stateCache.depthBiasSlopeScale = bias.y;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -622,15 +162,15 @@ void GLBackend::do_setStateDepthTest(State::DepthTest test) {
|
|||
if (test.isEnabled()) {
|
||||
glEnable(GL_DEPTH_TEST);
|
||||
glDepthMask(test.getWriteMask());
|
||||
glDepthFunc(GL_COMPARISON_FUNCTIONS[test.getFunction()]);
|
||||
glDepthFunc(COMPARISON_TO_GL[test.getFunction()]);
|
||||
} else {
|
||||
glDisable(GL_DEPTH_TEST);
|
||||
}
|
||||
if (CHECK_GL_ERROR()) {
|
||||
qDebug() << "DepthTest" << (test.isEnabled() ? "Enabled" : "Disabled")
|
||||
<< "Mask=" << (test.getWriteMask() ? "Write" : "no Write")
|
||||
<< "Func=" << test.getFunction()
|
||||
<< "Raw=" << test.getRaw();
|
||||
<< "Mask=" << (test.getWriteMask() ? "Write" : "no Write")
|
||||
<< "Func=" << test.getFunction()
|
||||
<< "Raw=" << test.getRaw();
|
||||
}
|
||||
|
||||
_pipeline._stateCache.depthTest = test;
|
||||
|
@ -638,7 +178,7 @@ void GLBackend::do_setStateDepthTest(State::DepthTest test) {
|
|||
}
|
||||
|
||||
void GLBackend::do_setStateStencil(State::StencilActivation activation, State::StencilTest frontTest, State::StencilTest backTest) {
|
||||
|
||||
|
||||
if ((_pipeline._stateCache.stencilActivation != activation)
|
||||
|| (_pipeline._stateCache.stencilTestFront != frontTest)
|
||||
|| (_pipeline._stateCache.stencilTestBack != backTest)) {
|
||||
|
@ -665,18 +205,18 @@ void GLBackend::do_setStateStencil(State::StencilActivation activation, State::S
|
|||
|
||||
if (frontTest != backTest) {
|
||||
glStencilOpSeparate(GL_FRONT, STENCIL_OPS[frontTest.getFailOp()], STENCIL_OPS[frontTest.getPassOp()], STENCIL_OPS[frontTest.getDepthFailOp()]);
|
||||
glStencilFuncSeparate(GL_FRONT, GL_COMPARISON_FUNCTIONS[frontTest.getFunction()], frontTest.getReference(), frontTest.getReadMask());
|
||||
glStencilFuncSeparate(GL_FRONT, COMPARISON_TO_GL[frontTest.getFunction()], frontTest.getReference(), frontTest.getReadMask());
|
||||
|
||||
glStencilOpSeparate(GL_BACK, STENCIL_OPS[backTest.getFailOp()], STENCIL_OPS[backTest.getPassOp()], STENCIL_OPS[backTest.getDepthFailOp()]);
|
||||
glStencilFuncSeparate(GL_BACK, GL_COMPARISON_FUNCTIONS[backTest.getFunction()], backTest.getReference(), backTest.getReadMask());
|
||||
glStencilFuncSeparate(GL_BACK, COMPARISON_TO_GL[backTest.getFunction()], backTest.getReference(), backTest.getReadMask());
|
||||
} else {
|
||||
glStencilOp(STENCIL_OPS[frontTest.getFailOp()], STENCIL_OPS[frontTest.getPassOp()], STENCIL_OPS[frontTest.getDepthFailOp()]);
|
||||
glStencilFunc(GL_COMPARISON_FUNCTIONS[frontTest.getFunction()], frontTest.getReference(), frontTest.getReadMask());
|
||||
glStencilFunc(COMPARISON_TO_GL[frontTest.getFunction()], frontTest.getReference(), frontTest.getReadMask());
|
||||
}
|
||||
} else {
|
||||
glDisable(GL_STENCIL_TEST);
|
||||
}
|
||||
(void) CHECK_GL_ERROR();
|
||||
(void)CHECK_GL_ERROR();
|
||||
|
||||
_pipeline._stateCache.stencilActivation = activation;
|
||||
_pipeline._stateCache.stencilTestFront = frontTest;
|
||||
|
@ -691,7 +231,7 @@ void GLBackend::do_setStateAlphaToCoverageEnable(bool enable) {
|
|||
} else {
|
||||
glDisable(GL_SAMPLE_ALPHA_TO_COVERAGE);
|
||||
}
|
||||
(void) CHECK_GL_ERROR();
|
||||
(void)CHECK_GL_ERROR();
|
||||
|
||||
_pipeline._stateCache.alphaToCoverageEnable = enable;
|
||||
}
|
||||
|
@ -699,15 +239,13 @@ void GLBackend::do_setStateAlphaToCoverageEnable(bool enable) {
|
|||
|
||||
void GLBackend::do_setStateSampleMask(uint32 mask) {
|
||||
if (_pipeline._stateCache.sampleMask != mask) {
|
||||
#ifdef GPU_CORE_PROFILE
|
||||
if (mask == 0xFFFFFFFF) {
|
||||
glDisable(GL_SAMPLE_MASK);
|
||||
} else {
|
||||
glEnable(GL_SAMPLE_MASK);
|
||||
glSampleMaski(0, mask);
|
||||
}
|
||||
(void) CHECK_GL_ERROR();
|
||||
#endif
|
||||
(void)CHECK_GL_ERROR();
|
||||
_pipeline._stateCache.sampleMask = mask;
|
||||
}
|
||||
}
|
||||
|
@ -717,40 +255,16 @@ void GLBackend::do_setStateBlend(State::BlendFunction function) {
|
|||
if (function.isEnabled()) {
|
||||
glEnable(GL_BLEND);
|
||||
|
||||
static GLenum GL_BLEND_OPS[] = {
|
||||
GL_FUNC_ADD,
|
||||
GL_FUNC_SUBTRACT,
|
||||
GL_FUNC_REVERSE_SUBTRACT,
|
||||
GL_MIN,
|
||||
GL_MAX };
|
||||
glBlendEquationSeparate(BLEND_OPS_TO_GL[function.getOperationColor()], BLEND_OPS_TO_GL[function.getOperationAlpha()]);
|
||||
(void)CHECK_GL_ERROR();
|
||||
|
||||
glBlendEquationSeparate(GL_BLEND_OPS[function.getOperationColor()], GL_BLEND_OPS[function.getOperationAlpha()]);
|
||||
(void) CHECK_GL_ERROR();
|
||||
|
||||
static GLenum BLEND_ARGS[] = {
|
||||
GL_ZERO,
|
||||
GL_ONE,
|
||||
GL_SRC_COLOR,
|
||||
GL_ONE_MINUS_SRC_COLOR,
|
||||
GL_SRC_ALPHA,
|
||||
GL_ONE_MINUS_SRC_ALPHA,
|
||||
GL_DST_ALPHA,
|
||||
GL_ONE_MINUS_DST_ALPHA,
|
||||
GL_DST_COLOR,
|
||||
GL_ONE_MINUS_DST_COLOR,
|
||||
GL_SRC_ALPHA_SATURATE,
|
||||
GL_CONSTANT_COLOR,
|
||||
GL_ONE_MINUS_CONSTANT_COLOR,
|
||||
GL_CONSTANT_ALPHA,
|
||||
GL_ONE_MINUS_CONSTANT_ALPHA,
|
||||
};
|
||||
|
||||
glBlendFuncSeparate(BLEND_ARGS[function.getSourceColor()], BLEND_ARGS[function.getDestinationColor()],
|
||||
BLEND_ARGS[function.getSourceAlpha()], BLEND_ARGS[function.getDestinationAlpha()]);
|
||||
glBlendFuncSeparate(BLEND_ARGS_TO_GL[function.getSourceColor()], BLEND_ARGS_TO_GL[function.getDestinationColor()],
|
||||
BLEND_ARGS_TO_GL[function.getSourceAlpha()], BLEND_ARGS_TO_GL[function.getDestinationAlpha()]);
|
||||
} else {
|
||||
glDisable(GL_BLEND);
|
||||
}
|
||||
(void) CHECK_GL_ERROR();
|
||||
(void)CHECK_GL_ERROR();
|
||||
|
||||
_pipeline._stateCache.blendFunction = function;
|
||||
}
|
||||
|
@ -759,10 +273,10 @@ void GLBackend::do_setStateBlend(State::BlendFunction function) {
|
|||
void GLBackend::do_setStateColorWriteMask(uint32 mask) {
|
||||
if (_pipeline._stateCache.colorWriteMask != mask) {
|
||||
glColorMask(mask & State::ColorMask::WRITE_RED,
|
||||
mask & State::ColorMask::WRITE_GREEN,
|
||||
mask & State::ColorMask::WRITE_BLUE,
|
||||
mask & State::ColorMask::WRITE_ALPHA );
|
||||
(void) CHECK_GL_ERROR();
|
||||
mask & State::ColorMask::WRITE_GREEN,
|
||||
mask & State::ColorMask::WRITE_BLUE,
|
||||
mask & State::ColorMask::WRITE_ALPHA);
|
||||
(void)CHECK_GL_ERROR();
|
||||
|
||||
_pipeline._stateCache.colorWriteMask = mask;
|
||||
}
|
||||
|
@ -771,12 +285,12 @@ void GLBackend::do_setStateColorWriteMask(uint32 mask) {
|
|||
|
||||
void GLBackend::do_setStateBlendFactor(Batch& batch, size_t paramOffset) {
|
||||
Vec4 factor(batch._params[paramOffset + 0]._float,
|
||||
batch._params[paramOffset + 1]._float,
|
||||
batch._params[paramOffset + 2]._float,
|
||||
batch._params[paramOffset + 3]._float);
|
||||
batch._params[paramOffset + 1]._float,
|
||||
batch._params[paramOffset + 2]._float,
|
||||
batch._params[paramOffset + 3]._float);
|
||||
|
||||
glBlendColor(factor.x, factor.y, factor.z, factor.w);
|
||||
(void) CHECK_GL_ERROR();
|
||||
(void)CHECK_GL_ERROR();
|
||||
}
|
||||
|
||||
void GLBackend::do_setStateScissorRect(Batch& batch, size_t paramOffset) {
|
||||
|
@ -790,5 +304,6 @@ void GLBackend::do_setStateScissorRect(Batch& batch, size_t paramOffset) {
|
|||
}
|
||||
}
|
||||
glScissor(rect.x, rect.y, rect.z, rect.w);
|
||||
(void) CHECK_GL_ERROR();
|
||||
(void)CHECK_GL_ERROR();
|
||||
}
|
||||
|
||||
|
|
|
@ -9,591 +9,11 @@
|
|||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
#include "GLBackend.h"
|
||||
|
||||
#include <unordered_set>
|
||||
#include <unordered_map>
|
||||
#include <QtCore/QThread>
|
||||
|
||||
#include "GLBackendShared.h"
|
||||
|
||||
#include "GLBackendTextureTransfer.h"
|
||||
#include "GLTexture.h"
|
||||
|
||||
using namespace gpu;
|
||||
using namespace gpu::gl;
|
||||
|
||||
|
||||
GLenum gpuToGLTextureType(const Texture& texture) {
|
||||
switch (texture.getType()) {
|
||||
case Texture::TEX_2D:
|
||||
return GL_TEXTURE_2D;
|
||||
break;
|
||||
|
||||
case Texture::TEX_CUBE:
|
||||
return GL_TEXTURE_CUBE_MAP;
|
||||
break;
|
||||
|
||||
default:
|
||||
qFatal("Unsupported texture type");
|
||||
}
|
||||
Q_UNREACHABLE();
|
||||
return GL_TEXTURE_2D;
|
||||
}
|
||||
|
||||
GLuint allocateSingleTexture() {
|
||||
Backend::incrementTextureGPUCount();
|
||||
GLuint result;
|
||||
glGenTextures(1, &result);
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
// FIXME placeholder for texture memory over-use
|
||||
#define DEFAULT_MAX_MEMORY_MB 256
|
||||
|
||||
float GLBackend::GLTexture::getMemoryPressure() {
|
||||
// Check for an explicit memory limit
|
||||
auto availableTextureMemory = Texture::getAllowedGPUMemoryUsage();
|
||||
|
||||
// If no memory limit has been set, use a percentage of the total dedicated memory
|
||||
if (!availableTextureMemory) {
|
||||
auto totalGpuMemory = GLBackend::getDedicatedMemory();
|
||||
|
||||
// If no limit has been explicitly set, and the dedicated memory can't be determined,
|
||||
// just use a fallback fixed value of 256 MB
|
||||
if (!totalGpuMemory) {
|
||||
totalGpuMemory = MB_TO_BYTES(DEFAULT_MAX_MEMORY_MB);
|
||||
}
|
||||
|
||||
// Allow 75% of all available GPU memory to be consumed by textures
|
||||
// FIXME overly conservative?
|
||||
availableTextureMemory = (totalGpuMemory >> 2) * 3;
|
||||
}
|
||||
|
||||
// Return the consumed texture memory divided by the available texture memory.
|
||||
auto consumedGpuMemory = Context::getTextureGPUMemoryUsage();
|
||||
return (float)consumedGpuMemory / (float)availableTextureMemory;
|
||||
}
|
||||
|
||||
GLBackend::GLTexture::DownsampleSource::DownsampleSource(GLTexture& oldTexture) :
|
||||
_texture(oldTexture._privateTexture),
|
||||
_minMip(oldTexture._minMip),
|
||||
_maxMip(oldTexture._maxMip)
|
||||
{
|
||||
// Take ownership of the GL texture
|
||||
oldTexture._texture = oldTexture._privateTexture = 0;
|
||||
}
|
||||
|
||||
GLBackend::GLTexture::DownsampleSource::~DownsampleSource() {
|
||||
if (_texture) {
|
||||
Backend::decrementTextureGPUCount();
|
||||
glDeleteTextures(1, &_texture);
|
||||
}
|
||||
}
|
||||
|
||||
const GLenum GLBackend::GLTexture::CUBE_FACE_LAYOUT[6] = {
|
||||
GL_TEXTURE_CUBE_MAP_POSITIVE_X, GL_TEXTURE_CUBE_MAP_NEGATIVE_X,
|
||||
GL_TEXTURE_CUBE_MAP_POSITIVE_Y, GL_TEXTURE_CUBE_MAP_NEGATIVE_Y,
|
||||
GL_TEXTURE_CUBE_MAP_POSITIVE_Z, GL_TEXTURE_CUBE_MAP_NEGATIVE_Z
|
||||
};
|
||||
|
||||
static std::map<uint16, size_t> _textureCountByMips;
|
||||
static uint16 _currentMaxMipCount { 0 };
|
||||
|
||||
GLBackend::GLTexture::GLTexture(bool transferrable, const Texture& texture, bool init) :
|
||||
_storageStamp(texture.getStamp()),
|
||||
_target(gpuToGLTextureType(texture)),
|
||||
_maxMip(texture.maxMip()),
|
||||
_minMip(texture.minMip()),
|
||||
_transferrable(transferrable),
|
||||
_virtualSize(texture.evalTotalSize()),
|
||||
_size(_virtualSize),
|
||||
_gpuTexture(texture)
|
||||
{
|
||||
Q_UNUSED(init);
|
||||
|
||||
if (_transferrable) {
|
||||
uint16 mipCount = usedMipLevels();
|
||||
_currentMaxMipCount = std::max(_currentMaxMipCount, mipCount);
|
||||
if (!_textureCountByMips.count(mipCount)) {
|
||||
_textureCountByMips[mipCount] = 1;
|
||||
} else {
|
||||
++_textureCountByMips[mipCount];
|
||||
}
|
||||
} else {
|
||||
withPreservedTexture([&] {
|
||||
createTexture();
|
||||
});
|
||||
_contentStamp = _gpuTexture.getDataStamp();
|
||||
postTransfer();
|
||||
}
|
||||
|
||||
Backend::updateTextureGPUMemoryUsage(0, _size);
|
||||
Backend::updateTextureGPUVirtualMemoryUsage(0, _virtualSize);
|
||||
}
|
||||
|
||||
// Create the texture and allocate storage
|
||||
GLBackend::GLTexture::GLTexture(bool transferrable, const Texture& texture) :
|
||||
GLTexture(transferrable, texture, true)
|
||||
{
|
||||
Backend::setGPUObject(texture, this);
|
||||
}
|
||||
|
||||
// Create the texture and copy from the original higher resolution version
|
||||
GLBackend::GLTexture::GLTexture(GLTexture& originalTexture, const gpu::Texture& texture) :
|
||||
GLTexture(originalTexture._transferrable, texture, true)
|
||||
{
|
||||
if (!originalTexture._texture) {
|
||||
qFatal("Invalid original texture");
|
||||
}
|
||||
Q_ASSERT(_minMip >= originalTexture._minMip);
|
||||
// Our downsampler takes ownership of the texture
|
||||
_downsampleSource = std::make_shared<DownsampleSource>(originalTexture);
|
||||
_texture = _downsampleSource->_texture;
|
||||
|
||||
// Set the GPU object last because that implicitly destroys the originalTexture object
|
||||
Backend::setGPUObject(texture, this);
|
||||
}
|
||||
|
||||
GLBackend::GLTexture::~GLTexture() {
|
||||
if (_privateTexture != 0) {
|
||||
Backend::decrementTextureGPUCount();
|
||||
glDeleteTextures(1, &_privateTexture);
|
||||
}
|
||||
|
||||
if (_transferrable) {
|
||||
uint16 mipCount = usedMipLevels();
|
||||
Q_ASSERT(_textureCountByMips.count(mipCount));
|
||||
auto& numTexturesForMipCount = _textureCountByMips[mipCount];
|
||||
--numTexturesForMipCount;
|
||||
if (0 == numTexturesForMipCount) {
|
||||
_textureCountByMips.erase(mipCount);
|
||||
if (mipCount == _currentMaxMipCount) {
|
||||
_currentMaxMipCount = _textureCountByMips.rbegin()->first;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Backend::updateTextureGPUMemoryUsage(_size, 0);
|
||||
Backend::updateTextureGPUVirtualMemoryUsage(_virtualSize, 0);
|
||||
}
|
||||
|
||||
const std::vector<GLenum>& GLBackend::GLTexture::getFaceTargets() const {
|
||||
static std::vector<GLenum> cubeFaceTargets {
|
||||
GL_TEXTURE_CUBE_MAP_POSITIVE_X, GL_TEXTURE_CUBE_MAP_NEGATIVE_X,
|
||||
GL_TEXTURE_CUBE_MAP_POSITIVE_Y, GL_TEXTURE_CUBE_MAP_NEGATIVE_Y,
|
||||
GL_TEXTURE_CUBE_MAP_POSITIVE_Z, GL_TEXTURE_CUBE_MAP_NEGATIVE_Z
|
||||
};
|
||||
static std::vector<GLenum> faceTargets {
|
||||
GL_TEXTURE_2D
|
||||
};
|
||||
switch (_target) {
|
||||
case GL_TEXTURE_2D:
|
||||
return faceTargets;
|
||||
case GL_TEXTURE_CUBE_MAP:
|
||||
return cubeFaceTargets;
|
||||
default:
|
||||
Q_UNREACHABLE();
|
||||
break;
|
||||
}
|
||||
Q_UNREACHABLE();
|
||||
return faceTargets;
|
||||
}
|
||||
|
||||
void GLBackend::GLTexture::withPreservedTexture(std::function<void()> f) {
|
||||
GLint boundTex = -1;
|
||||
switch (_target) {
|
||||
case GL_TEXTURE_2D:
|
||||
glGetIntegerv(GL_TEXTURE_BINDING_2D, &boundTex);
|
||||
break;
|
||||
|
||||
case GL_TEXTURE_CUBE_MAP:
|
||||
glGetIntegerv(GL_TEXTURE_BINDING_CUBE_MAP, &boundTex);
|
||||
break;
|
||||
|
||||
default:
|
||||
qFatal("Unsupported texture type");
|
||||
}
|
||||
(void)CHECK_GL_ERROR();
|
||||
|
||||
f();
|
||||
|
||||
glBindTexture(_target, boundTex);
|
||||
(void)CHECK_GL_ERROR();
|
||||
}
|
||||
|
||||
void GLBackend::GLTexture::createTexture() {
|
||||
_privateTexture = allocateSingleTexture();
|
||||
|
||||
glBindTexture(_target, _privateTexture);
|
||||
(void)CHECK_GL_ERROR();
|
||||
|
||||
allocateStorage();
|
||||
(void)CHECK_GL_ERROR();
|
||||
|
||||
syncSampler(_gpuTexture.getSampler(), _gpuTexture.getType(), this);
|
||||
(void)CHECK_GL_ERROR();
|
||||
}
|
||||
|
||||
void GLBackend::GLTexture::allocateStorage() {
|
||||
GLTexelFormat texelFormat = GLTexelFormat::evalGLTexelFormat(_gpuTexture.getTexelFormat());
|
||||
glTexParameteri(_target, GL_TEXTURE_BASE_LEVEL, 0);
|
||||
(void)CHECK_GL_ERROR();
|
||||
glTexParameteri(_target, GL_TEXTURE_MAX_LEVEL, _maxMip - _minMip);
|
||||
(void)CHECK_GL_ERROR();
|
||||
if (GLEW_VERSION_4_2 && !_gpuTexture.getTexelFormat().isCompressed()) {
|
||||
// Get the dimensions, accounting for the downgrade level
|
||||
Vec3u dimensions = _gpuTexture.evalMipDimensions(_minMip);
|
||||
glTexStorage2D(_target, usedMipLevels(), texelFormat.internalFormat, dimensions.x, dimensions.y);
|
||||
(void)CHECK_GL_ERROR();
|
||||
} else {
|
||||
for (uint16_t l = _minMip; l <= _maxMip; l++) {
|
||||
// Get the mip level dimensions, accounting for the downgrade level
|
||||
Vec3u dimensions = _gpuTexture.evalMipDimensions(l);
|
||||
for (GLenum target : getFaceTargets()) {
|
||||
glTexImage2D(target, l - _minMip, texelFormat.internalFormat, dimensions.x, dimensions.y, 0, texelFormat.format, texelFormat.type, NULL);
|
||||
(void)CHECK_GL_ERROR();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void GLBackend::GLTexture::setSize(GLuint size) {
|
||||
Backend::updateTextureGPUMemoryUsage(_size, size);
|
||||
_size = size;
|
||||
}
|
||||
|
||||
void GLBackend::GLTexture::updateSize() {
|
||||
setSize(_virtualSize);
|
||||
if (!_texture) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (_gpuTexture.getTexelFormat().isCompressed()) {
|
||||
GLenum proxyType = GL_TEXTURE_2D;
|
||||
GLuint numFaces = 1;
|
||||
if (_gpuTexture.getType() == gpu::Texture::TEX_CUBE) {
|
||||
proxyType = CUBE_FACE_LAYOUT[0];
|
||||
numFaces = CUBE_NUM_FACES;
|
||||
}
|
||||
GLint gpuSize{ 0 };
|
||||
glGetTexLevelParameteriv(proxyType, 0, GL_TEXTURE_COMPRESSED, &gpuSize);
|
||||
(void)CHECK_GL_ERROR();
|
||||
|
||||
if (gpuSize) {
|
||||
for (GLuint level = _minMip; level < _maxMip; level++) {
|
||||
GLint levelSize{ 0 };
|
||||
glGetTexLevelParameteriv(proxyType, level, GL_TEXTURE_COMPRESSED_IMAGE_SIZE, &levelSize);
|
||||
levelSize *= numFaces;
|
||||
|
||||
if (levelSize <= 0) {
|
||||
break;
|
||||
}
|
||||
gpuSize += levelSize;
|
||||
}
|
||||
(void)CHECK_GL_ERROR();
|
||||
setSize(gpuSize);
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bool GLBackend::GLTexture::isInvalid() const {
|
||||
return _storageStamp < _gpuTexture.getStamp();
|
||||
}
|
||||
|
||||
bool GLBackend::GLTexture::isOutdated() const {
|
||||
return GLTexture::Idle == _syncState && _contentStamp < _gpuTexture.getDataStamp();
|
||||
}
|
||||
|
||||
bool GLBackend::GLTexture::isOverMaxMemory() const {
|
||||
// FIXME switch to using the max mip count used from the previous frame
|
||||
if (usedMipLevels() < _currentMaxMipCount) {
|
||||
return false;
|
||||
}
|
||||
Q_ASSERT(usedMipLevels() == _currentMaxMipCount);
|
||||
|
||||
if (getMemoryPressure() < 1.0f) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool GLBackend::GLTexture::isReady() const {
|
||||
// If we have an invalid texture, we're never ready
|
||||
if (isInvalid()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// If we're out of date, but the transfer is in progress, report ready
|
||||
// as a special case
|
||||
auto syncState = _syncState.load();
|
||||
|
||||
if (isOutdated()) {
|
||||
return Idle != syncState;
|
||||
}
|
||||
|
||||
if (Idle != syncState) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
// Move content bits from the CPU to the GPU for a given mip / face
|
||||
void GLBackend::GLTexture::transferMip(uint16_t mipLevel, uint8_t face) const {
|
||||
auto mip = _gpuTexture.accessStoredMipFace(mipLevel, face);
|
||||
GLTexelFormat texelFormat = GLTexelFormat::evalGLTexelFormat(_gpuTexture.getTexelFormat(), mip->getFormat());
|
||||
//GLenum target = getFaceTargets()[face];
|
||||
GLenum target = _target == GL_TEXTURE_2D ? GL_TEXTURE_2D : CUBE_FACE_LAYOUT[face];
|
||||
auto size = _gpuTexture.evalMipDimensions(mipLevel);
|
||||
glTexSubImage2D(target, mipLevel, 0, 0, size.x, size.y, texelFormat.format, texelFormat.type, mip->readData());
|
||||
(void)CHECK_GL_ERROR();
|
||||
}
|
||||
|
||||
// This should never happen on the main thread
|
||||
// Move content bits from the CPU to the GPU
|
||||
void GLBackend::GLTexture::transfer() const {
|
||||
PROFILE_RANGE(__FUNCTION__);
|
||||
//qDebug() << "Transferring texture: " << _privateTexture;
|
||||
// Need to update the content of the GPU object from the source sysmem of the texture
|
||||
if (_contentStamp >= _gpuTexture.getDataStamp()) {
|
||||
return;
|
||||
}
|
||||
|
||||
glBindTexture(_target, _privateTexture);
|
||||
(void)CHECK_GL_ERROR();
|
||||
|
||||
if (_downsampleSource) {
|
||||
GLuint fbo { 0 };
|
||||
glGenFramebuffers(1, &fbo);
|
||||
(void)CHECK_GL_ERROR();
|
||||
glBindFramebuffer(GL_READ_FRAMEBUFFER, fbo);
|
||||
(void)CHECK_GL_ERROR();
|
||||
// Find the distance between the old min mip and the new one
|
||||
uint16 mipOffset = _minMip - _downsampleSource->_minMip;
|
||||
for (uint16 i = _minMip; i <= _maxMip; ++i) {
|
||||
uint16 targetMip = i - _minMip;
|
||||
uint16 sourceMip = targetMip + mipOffset;
|
||||
Vec3u dimensions = _gpuTexture.evalMipDimensions(i);
|
||||
for (GLenum target : getFaceTargets()) {
|
||||
glFramebufferTexture2D(GL_READ_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, target, _downsampleSource->_texture, sourceMip);
|
||||
(void)CHECK_GL_ERROR();
|
||||
glCopyTexSubImage2D(target, targetMip, 0, 0, 0, 0, dimensions.x, dimensions.y);
|
||||
(void)CHECK_GL_ERROR();
|
||||
}
|
||||
}
|
||||
glBindFramebuffer(GL_READ_FRAMEBUFFER, 0);
|
||||
glDeleteFramebuffers(1, &fbo);
|
||||
} else {
|
||||
// GO through the process of allocating the correct storage and/or update the content
|
||||
switch (_gpuTexture.getType()) {
|
||||
case Texture::TEX_2D:
|
||||
{
|
||||
for (uint16_t i = _minMip; i <= _maxMip; ++i) {
|
||||
if (_gpuTexture.isStoredMipFaceAvailable(i)) {
|
||||
transferMip(i);
|
||||
}
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
||||
case Texture::TEX_CUBE:
|
||||
// transfer pixels from each faces
|
||||
for (uint8_t f = 0; f < CUBE_NUM_FACES; f++) {
|
||||
for (uint16_t i = 0; i < Sampler::MAX_MIP_LEVEL; ++i) {
|
||||
if (_gpuTexture.isStoredMipFaceAvailable(i, f)) {
|
||||
transferMip(i, f);
|
||||
}
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
qCWarning(gpugllogging) << __FUNCTION__ << " case for Texture Type " << _gpuTexture.getType() << " not supported";
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (_gpuTexture.isAutogenerateMips()) {
|
||||
glGenerateMipmap(_target);
|
||||
(void)CHECK_GL_ERROR();
|
||||
}
|
||||
}
|
||||
|
||||
// Do any post-transfer operations that might be required on the main context / rendering thread
|
||||
void GLBackend::GLTexture::postTransfer() {
|
||||
setSyncState(GLTexture::Idle);
|
||||
|
||||
// The public gltexture becaomes available
|
||||
_texture = _privateTexture;
|
||||
|
||||
_downsampleSource.reset();
|
||||
|
||||
// At this point the mip pixels have been loaded, we can notify the gpu texture to abandon it's memory
|
||||
switch (_gpuTexture.getType()) {
|
||||
case Texture::TEX_2D:
|
||||
for (uint16_t i = 0; i < Sampler::MAX_MIP_LEVEL; ++i) {
|
||||
if (_gpuTexture.isStoredMipFaceAvailable(i)) {
|
||||
_gpuTexture.notifyMipFaceGPULoaded(i);
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
||||
case Texture::TEX_CUBE:
|
||||
// transfer pixels from each faces
|
||||
for (uint8_t f = 0; f < CUBE_NUM_FACES; f++) {
|
||||
for (uint16_t i = 0; i < Sampler::MAX_MIP_LEVEL; ++i) {
|
||||
if (_gpuTexture.isStoredMipFaceAvailable(i, f)) {
|
||||
_gpuTexture.notifyMipFaceGPULoaded(i, f);
|
||||
}
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
qCWarning(gpugllogging) << __FUNCTION__ << " case for Texture Type " << _gpuTexture.getType() << " not supported";
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
GLBackend::GLTexture* GLBackend::syncGPUObject(const TexturePointer& texturePointer, bool needTransfer) {
|
||||
const Texture& texture = *texturePointer;
|
||||
if (!texture.isDefined()) {
|
||||
// NO texture definition yet so let's avoid thinking
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// If the object hasn't been created, or the object definition is out of date, drop and re-create
|
||||
GLTexture* object = Backend::getGPUObject<GLBackend::GLTexture>(texture);
|
||||
|
||||
// Create the texture if need be (force re-creation if the storage stamp changes
|
||||
// for easier use of immutable storage)
|
||||
if (!object || object->isInvalid()) {
|
||||
// This automatically any previous texture
|
||||
object = new GLTexture(needTransfer, texture);
|
||||
}
|
||||
|
||||
// Object maybe doens't neet to be tranasferred after creation
|
||||
if (!object->_transferrable) {
|
||||
return object;
|
||||
}
|
||||
|
||||
// If we just did a transfer, return the object after doing post-transfer work
|
||||
if (GLTexture::Transferred == object->getSyncState()) {
|
||||
object->postTransfer();
|
||||
return object;
|
||||
}
|
||||
|
||||
if (object->isReady()) {
|
||||
// Do we need to reduce texture memory usage?
|
||||
if (object->isOverMaxMemory() && texturePointer->incremementMinMip()) {
|
||||
// This automatically destroys the old texture
|
||||
object = new GLTexture(*object, texture);
|
||||
_textureTransferHelper->transferTexture(texturePointer);
|
||||
}
|
||||
} else if (object->isOutdated()) {
|
||||
// Object might be outdated, if so, start the transfer
|
||||
// (outdated objects that are already in transfer will have reported 'true' for ready()
|
||||
_textureTransferHelper->transferTexture(texturePointer);
|
||||
}
|
||||
|
||||
return object;
|
||||
}
|
||||
|
||||
std::shared_ptr<GLTextureTransferHelper> GLBackend::_textureTransferHelper;
|
||||
|
||||
void GLBackend::initTextureTransferHelper() {
|
||||
_textureTransferHelper = std::make_shared<GLTextureTransferHelper>();
|
||||
}
|
||||
|
||||
GLuint GLBackend::getTextureID(const TexturePointer& texture, bool sync) {
|
||||
if (!texture) {
|
||||
return 0;
|
||||
}
|
||||
GLTexture* object { nullptr };
|
||||
if (sync) {
|
||||
object = GLBackend::syncGPUObject(texture);
|
||||
} else {
|
||||
object = Backend::getGPUObject<GLBackend::GLTexture>(*texture);
|
||||
}
|
||||
if (object) {
|
||||
if (object->getSyncState() == GLTexture::Idle) {
|
||||
return object->_texture;
|
||||
} else if (object->_downsampleSource) {
|
||||
return object->_downsampleSource->_texture;
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
void GLBackend::syncSampler(const Sampler& sampler, Texture::Type type, const GLTexture* object) {
|
||||
if (!object) return;
|
||||
|
||||
class GLFilterMode {
|
||||
public:
|
||||
GLint minFilter;
|
||||
GLint magFilter;
|
||||
};
|
||||
static const GLFilterMode filterModes[Sampler::NUM_FILTERS] = {
|
||||
{ GL_NEAREST, GL_NEAREST }, //FILTER_MIN_MAG_POINT,
|
||||
{ GL_NEAREST, GL_LINEAR }, //FILTER_MIN_POINT_MAG_LINEAR,
|
||||
{ GL_LINEAR, GL_NEAREST }, //FILTER_MIN_LINEAR_MAG_POINT,
|
||||
{ GL_LINEAR, GL_LINEAR }, //FILTER_MIN_MAG_LINEAR,
|
||||
|
||||
{ GL_NEAREST_MIPMAP_NEAREST, GL_NEAREST }, //FILTER_MIN_MAG_MIP_POINT,
|
||||
{ GL_NEAREST_MIPMAP_LINEAR, GL_NEAREST }, //FILTER_MIN_MAG_POINT_MIP_LINEAR,
|
||||
{ GL_NEAREST_MIPMAP_NEAREST, GL_LINEAR }, //FILTER_MIN_POINT_MAG_LINEAR_MIP_POINT,
|
||||
{ GL_NEAREST_MIPMAP_LINEAR, GL_LINEAR }, //FILTER_MIN_POINT_MAG_MIP_LINEAR,
|
||||
{ GL_LINEAR_MIPMAP_NEAREST, GL_NEAREST }, //FILTER_MIN_LINEAR_MAG_MIP_POINT,
|
||||
{ GL_LINEAR_MIPMAP_LINEAR, GL_NEAREST }, //FILTER_MIN_LINEAR_MAG_POINT_MIP_LINEAR,
|
||||
{ GL_LINEAR_MIPMAP_NEAREST, GL_LINEAR }, //FILTER_MIN_MAG_LINEAR_MIP_POINT,
|
||||
{ GL_LINEAR_MIPMAP_LINEAR, GL_LINEAR }, //FILTER_MIN_MAG_MIP_LINEAR,
|
||||
{ GL_LINEAR_MIPMAP_LINEAR, GL_LINEAR } //FILTER_ANISOTROPIC,
|
||||
};
|
||||
|
||||
auto fm = filterModes[sampler.getFilter()];
|
||||
glTexParameteri(object->_target, GL_TEXTURE_MIN_FILTER, fm.minFilter);
|
||||
glTexParameteri(object->_target, GL_TEXTURE_MAG_FILTER, fm.magFilter);
|
||||
|
||||
static const GLenum comparisonFuncs[NUM_COMPARISON_FUNCS] = {
|
||||
GL_NEVER,
|
||||
GL_LESS,
|
||||
GL_EQUAL,
|
||||
GL_LEQUAL,
|
||||
GL_GREATER,
|
||||
GL_NOTEQUAL,
|
||||
GL_GEQUAL,
|
||||
GL_ALWAYS };
|
||||
|
||||
if (sampler.doComparison()) {
|
||||
glTexParameteri(object->_target, GL_TEXTURE_COMPARE_MODE, GL_COMPARE_R_TO_TEXTURE);
|
||||
glTexParameteri(object->_target, GL_TEXTURE_COMPARE_FUNC, comparisonFuncs[sampler.getComparisonFunction()]);
|
||||
} else {
|
||||
glTexParameteri(object->_target, GL_TEXTURE_COMPARE_MODE, GL_NONE);
|
||||
}
|
||||
|
||||
static const GLenum wrapModes[Sampler::NUM_WRAP_MODES] = {
|
||||
GL_REPEAT, // WRAP_REPEAT,
|
||||
GL_MIRRORED_REPEAT, // WRAP_MIRROR,
|
||||
GL_CLAMP_TO_EDGE, // WRAP_CLAMP,
|
||||
GL_CLAMP_TO_BORDER, // WRAP_BORDER,
|
||||
GL_MIRROR_CLAMP_TO_EDGE_EXT }; // WRAP_MIRROR_ONCE,
|
||||
|
||||
glTexParameteri(object->_target, GL_TEXTURE_WRAP_S, wrapModes[sampler.getWrapModeU()]);
|
||||
glTexParameteri(object->_target, GL_TEXTURE_WRAP_T, wrapModes[sampler.getWrapModeV()]);
|
||||
glTexParameteri(object->_target, GL_TEXTURE_WRAP_R, wrapModes[sampler.getWrapModeW()]);
|
||||
|
||||
glTexParameterfv(object->_target, GL_TEXTURE_BORDER_COLOR, (const float*)&sampler.getBorderColor());
|
||||
glTexParameteri(object->_target, GL_TEXTURE_BASE_LEVEL, (uint16)sampler.getMipOffset());
|
||||
glTexParameterf(object->_target, GL_TEXTURE_MIN_LOD, (float)sampler.getMinMip());
|
||||
glTexParameterf(object->_target, GL_TEXTURE_MAX_LOD, (sampler.getMaxMip() == Sampler::MAX_MIP_LEVEL ? 1000.f : sampler.getMaxMip()));
|
||||
glTexParameterf(object->_target, GL_TEXTURE_MAX_ANISOTROPY_EXT, sampler.getMaxAnisotropy());
|
||||
}
|
||||
|
||||
void GLBackend::do_generateTextureMips(Batch& batch, size_t paramOffset) {
|
||||
TexturePointer resourceTexture = batch._textures.get(batch._params[paramOffset + 0]._uint);
|
||||
if (!resourceTexture) {
|
||||
|
@ -601,28 +21,10 @@ void GLBackend::do_generateTextureMips(Batch& batch, size_t paramOffset) {
|
|||
}
|
||||
|
||||
// DO not transfer the texture, this call is expected for rendering texture
|
||||
GLTexture* object = GLBackend::syncGPUObject(resourceTexture, false);
|
||||
GLTexture* object = syncGPUObject(resourceTexture, false);
|
||||
if (!object) {
|
||||
return;
|
||||
}
|
||||
|
||||
// IN 4.1 we still need to find an available slot
|
||||
auto freeSlot = _resource.findEmptyTextureSlot();
|
||||
auto bindingSlot = (freeSlot < 0 ? 0 : freeSlot);
|
||||
glActiveTexture(GL_TEXTURE0 + bindingSlot);
|
||||
glBindTexture(object->_target, object->_texture);
|
||||
|
||||
glGenerateMipmap(object->_target);
|
||||
|
||||
if (freeSlot < 0) {
|
||||
// If had to use slot 0 then restore state
|
||||
GLTexture* boundObject = GLBackend::syncGPUObject(_resource._textures[0]);
|
||||
if (boundObject) {
|
||||
glBindTexture(boundObject->_target, boundObject->_texture);
|
||||
}
|
||||
} else {
|
||||
// clean up
|
||||
glBindTexture(object->_target, 0);
|
||||
}
|
||||
(void)CHECK_GL_ERROR();
|
||||
object->generateMips();
|
||||
}
|
||||
|
|
|
@ -9,7 +9,6 @@
|
|||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
#include "GLBackend.h"
|
||||
#include "GLBackendShared.h"
|
||||
|
||||
using namespace gpu;
|
||||
using namespace gpu::gl;
|
||||
|
@ -51,26 +50,11 @@ void GLBackend::do_setDepthRangeTransform(Batch& batch, size_t paramOffset) {
|
|||
}
|
||||
}
|
||||
|
||||
void GLBackend::initTransform() {
|
||||
glGenBuffers(1, &_transform._objectBuffer);
|
||||
glGenBuffers(1, &_transform._cameraBuffer);
|
||||
glGenBuffers(1, &_transform._drawCallInfoBuffer);
|
||||
#ifndef GPU_SSBO_DRAW_CALL_INFO
|
||||
glGenTextures(1, &_transform._objectBufferTexture);
|
||||
#endif
|
||||
size_t cameraSize = sizeof(TransformStageState::CameraBufferElement);
|
||||
while (_transform._cameraUboSize < cameraSize) {
|
||||
_transform._cameraUboSize += _uboAlignment;
|
||||
}
|
||||
}
|
||||
|
||||
void GLBackend::killTransform() {
|
||||
glDeleteBuffers(1, &_transform._objectBuffer);
|
||||
glDeleteBuffers(1, &_transform._cameraBuffer);
|
||||
glDeleteBuffers(1, &_transform._drawCallInfoBuffer);
|
||||
#ifndef GPU_SSBO_DRAW_CALL_INFO
|
||||
glDeleteTextures(1, &_transform._objectBufferTexture);
|
||||
#endif
|
||||
}
|
||||
|
||||
void GLBackend::syncTransformStateCache() {
|
||||
|
@ -118,64 +102,6 @@ void GLBackend::TransformStageState::preUpdate(size_t commandIndex, const Stereo
|
|||
_invalidView = _invalidProj = _invalidViewport = false;
|
||||
}
|
||||
|
||||
void GLBackend::TransformStageState::transfer(const Batch& batch) const {
|
||||
// FIXME not thread safe
|
||||
static std::vector<uint8_t> bufferData;
|
||||
if (!_cameras.empty()) {
|
||||
bufferData.resize(_cameraUboSize * _cameras.size());
|
||||
for (size_t i = 0; i < _cameras.size(); ++i) {
|
||||
memcpy(bufferData.data() + (_cameraUboSize * i), &_cameras[i], sizeof(CameraBufferElement));
|
||||
}
|
||||
glBindBuffer(GL_UNIFORM_BUFFER, _cameraBuffer);
|
||||
glBufferData(GL_UNIFORM_BUFFER, bufferData.size(), bufferData.data(), GL_DYNAMIC_DRAW);
|
||||
glBindBuffer(GL_UNIFORM_BUFFER, 0);
|
||||
}
|
||||
|
||||
if (!batch._objects.empty()) {
|
||||
auto byteSize = batch._objects.size() * sizeof(Batch::TransformObject);
|
||||
bufferData.resize(byteSize);
|
||||
memcpy(bufferData.data(), batch._objects.data(), byteSize);
|
||||
|
||||
#ifdef GPU_SSBO_DRAW_CALL_INFO
|
||||
glBindBuffer(GL_SHADER_STORAGE_BUFFER, _objectBuffer);
|
||||
glBufferData(GL_SHADER_STORAGE_BUFFER, bufferData.size(), bufferData.data(), GL_DYNAMIC_DRAW);
|
||||
glBindBuffer(GL_SHADER_STORAGE_BUFFER, 0);
|
||||
#else
|
||||
glBindBuffer(GL_TEXTURE_BUFFER, _objectBuffer);
|
||||
glBufferData(GL_TEXTURE_BUFFER, bufferData.size(), bufferData.data(), GL_DYNAMIC_DRAW);
|
||||
glBindBuffer(GL_TEXTURE_BUFFER, 0);
|
||||
#endif
|
||||
}
|
||||
|
||||
if (!batch._namedData.empty()) {
|
||||
bufferData.clear();
|
||||
for (auto& data : batch._namedData) {
|
||||
auto currentSize = bufferData.size();
|
||||
auto bytesToCopy = data.second.drawCallInfos.size() * sizeof(Batch::DrawCallInfo);
|
||||
bufferData.resize(currentSize + bytesToCopy);
|
||||
memcpy(bufferData.data() + currentSize, data.second.drawCallInfos.data(), bytesToCopy);
|
||||
_drawCallInfoOffsets[data.first] = (GLvoid*)currentSize;
|
||||
}
|
||||
|
||||
glBindBuffer(GL_ARRAY_BUFFER, _drawCallInfoBuffer);
|
||||
glBufferData(GL_ARRAY_BUFFER, bufferData.size(), bufferData.data(), GL_DYNAMIC_DRAW);
|
||||
glBindBuffer(GL_ARRAY_BUFFER, 0);
|
||||
}
|
||||
|
||||
#ifdef GPU_SSBO_DRAW_CALL_INFO
|
||||
glBindBufferBase(GL_SHADER_STORAGE_BUFFER, TRANSFORM_OBJECT_SLOT, _objectBuffer);
|
||||
#else
|
||||
glActiveTexture(GL_TEXTURE0 + TRANSFORM_OBJECT_SLOT);
|
||||
glBindTexture(GL_TEXTURE_BUFFER, _objectBufferTexture);
|
||||
glTexBuffer(GL_TEXTURE_BUFFER, GL_RGBA32F, _objectBuffer);
|
||||
#endif
|
||||
|
||||
CHECK_GL_ERROR();
|
||||
|
||||
// Make sure the current Camera offset is unknown before render Draw
|
||||
_currentCameraOffset = INVALID_OFFSET;
|
||||
}
|
||||
|
||||
void GLBackend::TransformStageState::update(size_t commandIndex, const StereoState& stereo) const {
|
||||
size_t offset = INVALID_OFFSET;
|
||||
while ((_camerasItr != _cameraOffsets.end()) && (commandIndex >= (*_camerasItr).first)) {
|
||||
|
|
28
libraries/gpu-gl/src/gpu/gl/GLBuffer.cpp
Normal file
28
libraries/gpu-gl/src/gpu/gl/GLBuffer.cpp
Normal file
|
@ -0,0 +1,28 @@
|
|||
//
|
||||
// Created by Bradley Austin Davis on 2016/05/15
|
||||
// Copyright 2013-2016 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
#include "GLBuffer.h"
|
||||
|
||||
using namespace gpu;
|
||||
using namespace gpu::gl;
|
||||
|
||||
GLBuffer::~GLBuffer() {
|
||||
glDeleteBuffers(1, &_id);
|
||||
Backend::decrementBufferGPUCount();
|
||||
Backend::updateBufferGPUMemoryUsage(_size, 0);
|
||||
}
|
||||
|
||||
GLBuffer::GLBuffer(const Buffer& buffer, GLuint id) :
|
||||
GLObject(buffer, id),
|
||||
_size((GLuint)buffer._sysmem.getSize()),
|
||||
_stamp(buffer._sysmem.getStamp())
|
||||
{
|
||||
Backend::incrementBufferGPUCount();
|
||||
Backend::updateBufferGPUMemoryUsage(0, _size);
|
||||
}
|
||||
|
57
libraries/gpu-gl/src/gpu/gl/GLBuffer.h
Normal file
57
libraries/gpu-gl/src/gpu/gl/GLBuffer.h
Normal file
|
@ -0,0 +1,57 @@
|
|||
//
|
||||
// Created by Bradley Austin Davis on 2016/05/15
|
||||
// Copyright 2013-2016 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
#ifndef hifi_gpu_gl_GLBuffer_h
|
||||
#define hifi_gpu_gl_GLBuffer_h
|
||||
|
||||
#include "GLShared.h"
|
||||
|
||||
namespace gpu { namespace gl {
|
||||
|
||||
class GLBuffer : public GLObject<Buffer> {
|
||||
public:
|
||||
template <typename GLBufferType>
|
||||
static GLBufferType* sync(const Buffer& buffer) {
|
||||
GLBufferType* object = Backend::getGPUObject<GLBufferType>(buffer);
|
||||
|
||||
// Has the storage size changed?
|
||||
if (!object || object->_stamp != buffer.getSysmem().getStamp()) {
|
||||
object = new GLBufferType(buffer, object);
|
||||
}
|
||||
|
||||
if (0 != (buffer._flags & Buffer::DIRTY)) {
|
||||
object->transfer();
|
||||
}
|
||||
|
||||
return object;
|
||||
}
|
||||
|
||||
template <typename GLBufferType>
|
||||
static GLuint getId(const Buffer& buffer) {
|
||||
GLBuffer* bo = sync<GLBufferType>(buffer);
|
||||
if (bo) {
|
||||
return bo->_buffer;
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
const GLuint& _buffer { _id };
|
||||
const GLuint _size;
|
||||
const Stamp _stamp;
|
||||
|
||||
~GLBuffer();
|
||||
|
||||
virtual void transfer() = 0;
|
||||
|
||||
protected:
|
||||
GLBuffer(const Buffer& buffer, GLuint id);
|
||||
};
|
||||
|
||||
} }
|
||||
|
||||
#endif
|
39
libraries/gpu-gl/src/gpu/gl/GLFramebuffer.cpp
Normal file
39
libraries/gpu-gl/src/gpu/gl/GLFramebuffer.cpp
Normal file
|
@ -0,0 +1,39 @@
|
|||
//
|
||||
// Created by Bradley Austin Davis on 2016/05/15
|
||||
// Copyright 2013-2016 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
#include "GLFramebuffer.h"
|
||||
|
||||
using namespace gpu;
|
||||
using namespace gpu::gl;
|
||||
|
||||
|
||||
bool GLFramebuffer::checkStatus(GLenum target) const {
|
||||
bool result = false;
|
||||
switch (_status) {
|
||||
case GL_FRAMEBUFFER_COMPLETE:
|
||||
// Success !
|
||||
result = true;
|
||||
break;
|
||||
case GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT:
|
||||
qCDebug(gpugllogging) << "GLFramebuffer::syncGPUObject : Framebuffer not valid, GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT.";
|
||||
break;
|
||||
case GL_FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT:
|
||||
qCDebug(gpugllogging) << "GLFramebuffer::syncGPUObject : Framebuffer not valid, GL_FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT.";
|
||||
break;
|
||||
case GL_FRAMEBUFFER_INCOMPLETE_DRAW_BUFFER:
|
||||
qCDebug(gpugllogging) << "GLFramebuffer::syncGPUObject : Framebuffer not valid, GL_FRAMEBUFFER_INCOMPLETE_DRAW_BUFFER.";
|
||||
break;
|
||||
case GL_FRAMEBUFFER_INCOMPLETE_READ_BUFFER:
|
||||
qCDebug(gpugllogging) << "GLFramebuffer::syncGPUObject : Framebuffer not valid, GL_FRAMEBUFFER_INCOMPLETE_READ_BUFFER.";
|
||||
break;
|
||||
case GL_FRAMEBUFFER_UNSUPPORTED:
|
||||
qCDebug(gpugllogging) << "GLFramebuffer::syncGPUObject : Framebuffer not valid, GL_FRAMEBUFFER_UNSUPPORTED.";
|
||||
break;
|
||||
}
|
||||
return result;
|
||||
}
|
76
libraries/gpu-gl/src/gpu/gl/GLFramebuffer.h
Normal file
76
libraries/gpu-gl/src/gpu/gl/GLFramebuffer.h
Normal file
|
@ -0,0 +1,76 @@
|
|||
//
|
||||
// Created by Bradley Austin Davis on 2016/05/15
|
||||
// Copyright 2013-2016 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
#ifndef hifi_gpu_gl_GLFramebuffer_h
|
||||
#define hifi_gpu_gl_GLFramebuffer_h
|
||||
|
||||
#include "GLShared.h"
|
||||
|
||||
namespace gpu { namespace gl {
|
||||
|
||||
class GLFramebuffer : public GLObject<Framebuffer> {
|
||||
public:
|
||||
template <typename GLFramebufferType>
|
||||
static GLFramebufferType* sync(const Framebuffer& framebuffer) {
|
||||
GLFramebufferType* object = Backend::getGPUObject<GLFramebufferType>(framebuffer);
|
||||
|
||||
bool needsUpate { false };
|
||||
if (!object ||
|
||||
framebuffer.getDepthStamp() != object->_depthStamp ||
|
||||
framebuffer.getColorStamps() != object->_colorStamps) {
|
||||
needsUpate = true;
|
||||
}
|
||||
|
||||
// If GPU object already created and in sync
|
||||
if (!needsUpate) {
|
||||
return object;
|
||||
} else if (framebuffer.isEmpty()) {
|
||||
// NO framebuffer definition yet so let's avoid thinking
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// need to have a gpu object?
|
||||
if (!object) {
|
||||
// All is green, assign the gpuobject to the Framebuffer
|
||||
object = new GLFramebufferType(framebuffer);
|
||||
Backend::setGPUObject(framebuffer, object);
|
||||
(void)CHECK_GL_ERROR();
|
||||
}
|
||||
|
||||
object->update();
|
||||
return object;
|
||||
}
|
||||
|
||||
template <typename GLFramebufferType>
|
||||
static GLuint getId(const Framebuffer& framebuffer) {
|
||||
GLFramebufferType* fbo = sync<GLFramebufferType>(framebuffer);
|
||||
if (fbo) {
|
||||
return fbo->_id;
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
const GLuint& _fbo { _id };
|
||||
std::vector<GLenum> _colorBuffers;
|
||||
Stamp _depthStamp { 0 };
|
||||
std::vector<Stamp> _colorStamps;
|
||||
|
||||
protected:
|
||||
GLenum _status { GL_FRAMEBUFFER_COMPLETE };
|
||||
virtual void update() = 0;
|
||||
bool checkStatus(GLenum target) const;
|
||||
|
||||
GLFramebuffer(const Framebuffer& framebuffer, GLuint id) : GLObject(framebuffer, id) {}
|
||||
~GLFramebuffer() { if (_id) { glDeleteFramebuffers(1, &_id); } };
|
||||
|
||||
};
|
||||
|
||||
} }
|
||||
|
||||
|
||||
#endif
|
48
libraries/gpu-gl/src/gpu/gl/GLPipeline.cpp
Normal file
48
libraries/gpu-gl/src/gpu/gl/GLPipeline.cpp
Normal file
|
@ -0,0 +1,48 @@
|
|||
//
|
||||
// Created by Bradley Austin Davis on 2016/05/15
|
||||
// Copyright 2013-2016 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
#include "GLPipeline.h"
|
||||
|
||||
#include "GLShader.h"
|
||||
#include "GLState.h"
|
||||
|
||||
using namespace gpu;
|
||||
using namespace gpu::gl;
|
||||
|
||||
GLPipeline* GLPipeline::sync(const Pipeline& pipeline) {
|
||||
GLPipeline* object = Backend::getGPUObject<GLPipeline>(pipeline);
|
||||
|
||||
// If GPU object already created then good
|
||||
if (object) {
|
||||
return object;
|
||||
}
|
||||
|
||||
// No object allocated yet, let's see if it's worth it...
|
||||
ShaderPointer shader = pipeline.getProgram();
|
||||
GLShader* programObject = GLShader::sync(*shader);
|
||||
if (programObject == nullptr) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
StatePointer state = pipeline.getState();
|
||||
GLState* stateObject = GLState::sync(*state);
|
||||
if (stateObject == nullptr) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// Program and state are valid, we can create the pipeline object
|
||||
if (!object) {
|
||||
object = new GLPipeline();
|
||||
Backend::setGPUObject(pipeline, object);
|
||||
}
|
||||
|
||||
object->_program = programObject;
|
||||
object->_state = stateObject;
|
||||
|
||||
return object;
|
||||
}
|
26
libraries/gpu-gl/src/gpu/gl/GLPipeline.h
Normal file
26
libraries/gpu-gl/src/gpu/gl/GLPipeline.h
Normal file
|
@ -0,0 +1,26 @@
|
|||
//
|
||||
// Created by Bradley Austin Davis on 2016/05/15
|
||||
// Copyright 2013-2016 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
#ifndef hifi_gpu_gl_GLPipeline_h
|
||||
#define hifi_gpu_gl_GLPipeline_h
|
||||
|
||||
#include "GLShared.h"
|
||||
|
||||
namespace gpu { namespace gl {
|
||||
|
||||
class GLPipeline : public GPUObject {
|
||||
public:
|
||||
static GLPipeline* sync(const Pipeline& pipeline);
|
||||
|
||||
GLShader* _program { nullptr };
|
||||
GLState* _state { nullptr };
|
||||
};
|
||||
|
||||
} }
|
||||
|
||||
|
||||
#endif
|
12
libraries/gpu-gl/src/gpu/gl/GLQuery.cpp
Normal file
12
libraries/gpu-gl/src/gpu/gl/GLQuery.cpp
Normal file
|
@ -0,0 +1,12 @@
|
|||
//
|
||||
// Created by Bradley Austin Davis on 2016/05/15
|
||||
// Copyright 2013-2016 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
#include "GLQuery.h"
|
||||
|
||||
using namespace gpu;
|
||||
using namespace gpu::gl;
|
57
libraries/gpu-gl/src/gpu/gl/GLQuery.h
Normal file
57
libraries/gpu-gl/src/gpu/gl/GLQuery.h
Normal file
|
@ -0,0 +1,57 @@
|
|||
//
|
||||
// Created by Bradley Austin Davis on 2016/05/15
|
||||
// Copyright 2013-2016 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
#ifndef hifi_gpu_gl_GLQuery_h
|
||||
#define hifi_gpu_gl_GLQuery_h
|
||||
|
||||
#include "GLShared.h"
|
||||
|
||||
namespace gpu { namespace gl {
|
||||
|
||||
class GLQuery : public GLObject<Query> {
|
||||
using Parent = gpu::gl::GLObject<Query>;
|
||||
public:
|
||||
template <typename GLQueryType>
|
||||
static GLQueryType* sync(const Query& query) {
|
||||
GLQueryType* object = Backend::getGPUObject<GLQueryType>(query);
|
||||
|
||||
// need to have a gpu object?
|
||||
if (!object) {
|
||||
// All is green, assign the gpuobject to the Query
|
||||
object = new GLQueryType(query);
|
||||
(void)CHECK_GL_ERROR();
|
||||
Backend::setGPUObject(query, object);
|
||||
}
|
||||
|
||||
return object;
|
||||
}
|
||||
|
||||
template <typename GLQueryType>
|
||||
static GLuint getId(const QueryPointer& query) {
|
||||
if (!query) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
GLQuery* object = sync<GLQueryType>(*query);
|
||||
if (!object) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
return object->_qo;
|
||||
}
|
||||
|
||||
const GLuint& _qo { _id };
|
||||
GLuint64 _result { (GLuint64)-1 };
|
||||
|
||||
protected:
|
||||
GLQuery(const Query& query, GLuint id) : Parent(query, id) {}
|
||||
~GLQuery() { if (_id) { glDeleteQueries(1, &_id); } }
|
||||
};
|
||||
|
||||
} }
|
||||
|
||||
#endif
|
182
libraries/gpu-gl/src/gpu/gl/GLShader.cpp
Normal file
182
libraries/gpu-gl/src/gpu/gl/GLShader.cpp
Normal file
|
@ -0,0 +1,182 @@
|
|||
//
|
||||
// Created by Bradley Austin Davis on 2016/05/15
|
||||
// Copyright 2013-2016 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
#include "GLShader.h"
|
||||
#include "GLBackend.h"
|
||||
|
||||
using namespace gpu;
|
||||
using namespace gpu::gl;
|
||||
|
||||
GLShader::GLShader() {
|
||||
}
|
||||
|
||||
GLShader::~GLShader() {
|
||||
for (auto& so : _shaderObjects) {
|
||||
if (so.glshader != 0) {
|
||||
glDeleteShader(so.glshader);
|
||||
}
|
||||
if (so.glprogram != 0) {
|
||||
glDeleteProgram(so.glprogram);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
GLShader* compileBackendShader(const Shader& shader) {
|
||||
// Any GLSLprogram ? normally yes...
|
||||
const std::string& shaderSource = shader.getSource().getCode();
|
||||
|
||||
// GLSL version
|
||||
const std::string glslVersion = {
|
||||
"#version 410 core"
|
||||
};
|
||||
|
||||
// Shader domain
|
||||
const int NUM_SHADER_DOMAINS = 2;
|
||||
const GLenum SHADER_DOMAINS[NUM_SHADER_DOMAINS] = {
|
||||
GL_VERTEX_SHADER,
|
||||
GL_FRAGMENT_SHADER
|
||||
};
|
||||
GLenum shaderDomain = SHADER_DOMAINS[shader.getType()];
|
||||
|
||||
// Domain specific defines
|
||||
const std::string domainDefines[NUM_SHADER_DOMAINS] = {
|
||||
"#define GPU_VERTEX_SHADER",
|
||||
"#define GPU_PIXEL_SHADER"
|
||||
};
|
||||
|
||||
// Versions specific of the shader
|
||||
const std::string versionDefines[GLShader::NumVersions] = {
|
||||
""
|
||||
};
|
||||
|
||||
GLShader::ShaderObjects shaderObjects;
|
||||
|
||||
for (int version = 0; version < GLShader::NumVersions; version++) {
|
||||
auto& shaderObject = shaderObjects[version];
|
||||
|
||||
std::string shaderDefines = glslVersion + "\n" + domainDefines[shader.getType()] + "\n" + versionDefines[version];
|
||||
|
||||
bool result = compileShader(shaderDomain, shaderSource, shaderDefines, shaderObject.glshader, shaderObject.glprogram);
|
||||
if (!result) {
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
// So far so good, the shader is created successfully
|
||||
GLShader* object = new GLShader();
|
||||
object->_shaderObjects = shaderObjects;
|
||||
|
||||
return object;
|
||||
}
|
||||
|
||||
GLShader* compileBackendProgram(const Shader& program) {
|
||||
if (!program.isProgram()) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
GLShader::ShaderObjects programObjects;
|
||||
|
||||
for (int version = 0; version < GLShader::NumVersions; version++) {
|
||||
auto& programObject = programObjects[version];
|
||||
|
||||
// Let's go through every shaders and make sure they are ready to go
|
||||
std::vector< GLuint > shaderGLObjects;
|
||||
for (auto subShader : program.getShaders()) {
|
||||
auto object = GLShader::sync(*subShader);
|
||||
if (object) {
|
||||
shaderGLObjects.push_back(object->_shaderObjects[version].glshader);
|
||||
} else {
|
||||
qCDebug(gpugllogging) << "GLShader::compileBackendProgram - One of the shaders of the program is not compiled?";
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
GLuint glprogram = compileProgram(shaderGLObjects);
|
||||
if (glprogram == 0) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
programObject.glprogram = glprogram;
|
||||
|
||||
makeProgramBindings(programObject);
|
||||
}
|
||||
|
||||
// So far so good, the program versions have all been created successfully
|
||||
GLShader* object = new GLShader();
|
||||
object->_shaderObjects = programObjects;
|
||||
|
||||
return object;
|
||||
}
|
||||
|
||||
GLShader* GLShader::sync(const Shader& shader) {
|
||||
GLShader* object = Backend::getGPUObject<GLShader>(shader);
|
||||
|
||||
// If GPU object already created then good
|
||||
if (object) {
|
||||
return object;
|
||||
}
|
||||
// need to have a gpu object?
|
||||
if (shader.isProgram()) {
|
||||
GLShader* tempObject = compileBackendProgram(shader);
|
||||
if (tempObject) {
|
||||
object = tempObject;
|
||||
Backend::setGPUObject(shader, object);
|
||||
}
|
||||
} else if (shader.isDomain()) {
|
||||
GLShader* tempObject = compileBackendShader(shader);
|
||||
if (tempObject) {
|
||||
object = tempObject;
|
||||
Backend::setGPUObject(shader, object);
|
||||
}
|
||||
}
|
||||
|
||||
return object;
|
||||
}
|
||||
|
||||
bool GLShader::makeProgram(Shader& shader, const Shader::BindingSet& slotBindings) {
|
||||
|
||||
// First make sure the Shader has been compiled
|
||||
GLShader* object = sync(shader);
|
||||
if (!object) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Apply bindings to all program versions and generate list of slots from default version
|
||||
for (int version = 0; version < GLShader::NumVersions; version++) {
|
||||
auto& shaderObject = object->_shaderObjects[version];
|
||||
if (shaderObject.glprogram) {
|
||||
Shader::SlotSet buffers;
|
||||
makeUniformBlockSlots(shaderObject.glprogram, slotBindings, buffers);
|
||||
|
||||
Shader::SlotSet uniforms;
|
||||
Shader::SlotSet textures;
|
||||
Shader::SlotSet samplers;
|
||||
makeUniformSlots(shaderObject.glprogram, slotBindings, uniforms, textures, samplers);
|
||||
|
||||
Shader::SlotSet inputs;
|
||||
makeInputSlots(shaderObject.glprogram, slotBindings, inputs);
|
||||
|
||||
Shader::SlotSet outputs;
|
||||
makeOutputSlots(shaderObject.glprogram, slotBindings, outputs);
|
||||
|
||||
// Define the public slots only from the default version
|
||||
if (version == 0) {
|
||||
shader.defineSlots(uniforms, buffers, textures, samplers, inputs, outputs);
|
||||
} else {
|
||||
GLShader::UniformMapping mapping;
|
||||
for (auto srcUniform : shader.getUniforms()) {
|
||||
mapping[srcUniform._location] = uniforms.findLocation(srcUniform._name);
|
||||
}
|
||||
object->_uniformMappings.push_back(mapping);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
return true;
|
||||
}
|
||||
|
52
libraries/gpu-gl/src/gpu/gl/GLShader.h
Normal file
52
libraries/gpu-gl/src/gpu/gl/GLShader.h
Normal file
|
@ -0,0 +1,52 @@
|
|||
//
|
||||
// Created by Bradley Austin Davis on 2016/05/15
|
||||
// Copyright 2013-2016 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
#ifndef hifi_gpu_gl_GLShader_h
|
||||
#define hifi_gpu_gl_GLShader_h
|
||||
|
||||
#include "GLShared.h"
|
||||
|
||||
namespace gpu { namespace gl {
|
||||
|
||||
class GLShader : public GPUObject {
|
||||
public:
|
||||
static GLShader* sync(const Shader& shader);
|
||||
static bool makeProgram(Shader& shader, const Shader::BindingSet& slotBindings);
|
||||
|
||||
enum Version {
|
||||
Mono = 0,
|
||||
NumVersions
|
||||
};
|
||||
|
||||
using ShaderObject = gpu::gl::ShaderObject;
|
||||
using ShaderObjects = std::array< ShaderObject, NumVersions >;
|
||||
|
||||
using UniformMapping = std::map<GLint, GLint>;
|
||||
using UniformMappingVersions = std::vector<UniformMapping>;
|
||||
|
||||
GLShader();
|
||||
~GLShader();
|
||||
|
||||
ShaderObjects _shaderObjects;
|
||||
UniformMappingVersions _uniformMappings;
|
||||
|
||||
GLuint getProgram(Version version = Mono) const {
|
||||
return _shaderObjects[version].glprogram;
|
||||
}
|
||||
|
||||
GLint getUniformLocation(GLint srcLoc, Version version = Mono) {
|
||||
// THIS will be used in the future PR as we grow the number of versions
|
||||
// return _uniformMappings[version][srcLoc];
|
||||
return srcLoc;
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
} }
|
||||
|
||||
|
||||
#endif
|
File diff suppressed because it is too large
Load diff
158
libraries/gpu-gl/src/gpu/gl/GLShared.h
Normal file
158
libraries/gpu-gl/src/gpu/gl/GLShared.h
Normal file
|
@ -0,0 +1,158 @@
|
|||
//
|
||||
// Created by Bradley Austin Davis on 2016/05/15
|
||||
// Copyright 2013-2016 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
#ifndef hifi_gpu_GLShared_h
|
||||
#define hifi_gpu_GLShared_h
|
||||
|
||||
#include <gl/Config.h>
|
||||
#include <gpu/Forward.h>
|
||||
#include <gpu/Format.h>
|
||||
#include <gpu/Context.h>
|
||||
#include <QLoggingCategory>
|
||||
|
||||
Q_DECLARE_LOGGING_CATEGORY(gpugllogging)
|
||||
|
||||
namespace gpu { namespace gl {
|
||||
|
||||
gpu::Size getDedicatedMemory();
|
||||
ComparisonFunction comparisonFuncFromGL(GLenum func);
|
||||
State::StencilOp stencilOpFromGL(GLenum stencilOp);
|
||||
State::BlendOp blendOpFromGL(GLenum blendOp);
|
||||
State::BlendArg blendArgFromGL(GLenum blendArg);
|
||||
void getCurrentGLState(State::Data& state);
|
||||
|
||||
struct ShaderObject {
|
||||
GLuint glshader { 0 };
|
||||
GLuint glprogram { 0 };
|
||||
GLint transformCameraSlot { -1 };
|
||||
GLint transformObjectSlot { -1 };
|
||||
};
|
||||
|
||||
int makeUniformSlots(GLuint glprogram, const Shader::BindingSet& slotBindings,
|
||||
Shader::SlotSet& uniforms, Shader::SlotSet& textures, Shader::SlotSet& samplers);
|
||||
int makeUniformBlockSlots(GLuint glprogram, const Shader::BindingSet& slotBindings, Shader::SlotSet& buffers);
|
||||
int makeInputSlots(GLuint glprogram, const Shader::BindingSet& slotBindings, Shader::SlotSet& inputs);
|
||||
int makeOutputSlots(GLuint glprogram, const Shader::BindingSet& slotBindings, Shader::SlotSet& outputs);
|
||||
bool compileShader(GLenum shaderDomain, const std::string& shaderSource, const std::string& defines, GLuint &shaderObject, GLuint &programObject);
|
||||
GLuint compileProgram(const std::vector<GLuint>& glshaders);
|
||||
void makeProgramBindings(ShaderObject& shaderObject);
|
||||
|
||||
enum GLSyncState {
|
||||
// The object is currently undergoing no processing, although it's content
|
||||
// may be out of date, or it's storage may be invalid relative to the
|
||||
// owning GPU object
|
||||
Idle,
|
||||
// The object has been queued for transfer to the GPU
|
||||
Pending,
|
||||
// The object has been transferred to the GPU, but is awaiting
|
||||
// any post transfer operations that may need to occur on the
|
||||
// primary rendering thread
|
||||
Transferred,
|
||||
};
|
||||
|
||||
static const GLenum BLEND_OPS_TO_GL[State::NUM_BLEND_OPS] = {
|
||||
GL_FUNC_ADD,
|
||||
GL_FUNC_SUBTRACT,
|
||||
GL_FUNC_REVERSE_SUBTRACT,
|
||||
GL_MIN,
|
||||
GL_MAX
|
||||
};
|
||||
|
||||
static const GLenum BLEND_ARGS_TO_GL[State::NUM_BLEND_ARGS] = {
|
||||
GL_ZERO,
|
||||
GL_ONE,
|
||||
GL_SRC_COLOR,
|
||||
GL_ONE_MINUS_SRC_COLOR,
|
||||
GL_SRC_ALPHA,
|
||||
GL_ONE_MINUS_SRC_ALPHA,
|
||||
GL_DST_ALPHA,
|
||||
GL_ONE_MINUS_DST_ALPHA,
|
||||
GL_DST_COLOR,
|
||||
GL_ONE_MINUS_DST_COLOR,
|
||||
GL_SRC_ALPHA_SATURATE,
|
||||
GL_CONSTANT_COLOR,
|
||||
GL_ONE_MINUS_CONSTANT_COLOR,
|
||||
GL_CONSTANT_ALPHA,
|
||||
GL_ONE_MINUS_CONSTANT_ALPHA,
|
||||
};
|
||||
|
||||
static const GLenum COMPARISON_TO_GL[gpu::NUM_COMPARISON_FUNCS] = {
|
||||
GL_NEVER,
|
||||
GL_LESS,
|
||||
GL_EQUAL,
|
||||
GL_LEQUAL,
|
||||
GL_GREATER,
|
||||
GL_NOTEQUAL,
|
||||
GL_GEQUAL,
|
||||
GL_ALWAYS
|
||||
};
|
||||
|
||||
static const GLenum PRIMITIVE_TO_GL[gpu::NUM_PRIMITIVES] = {
|
||||
GL_POINTS,
|
||||
GL_LINES,
|
||||
GL_LINE_STRIP,
|
||||
GL_TRIANGLES,
|
||||
GL_TRIANGLE_STRIP,
|
||||
GL_TRIANGLE_FAN,
|
||||
};
|
||||
|
||||
static const GLenum ELEMENT_TYPE_TO_GL[gpu::NUM_TYPES] = {
|
||||
GL_FLOAT,
|
||||
GL_INT,
|
||||
GL_UNSIGNED_INT,
|
||||
GL_HALF_FLOAT,
|
||||
GL_SHORT,
|
||||
GL_UNSIGNED_SHORT,
|
||||
GL_BYTE,
|
||||
GL_UNSIGNED_BYTE,
|
||||
// Normalized values
|
||||
GL_INT,
|
||||
GL_UNSIGNED_INT,
|
||||
GL_SHORT,
|
||||
GL_UNSIGNED_SHORT,
|
||||
GL_BYTE,
|
||||
GL_UNSIGNED_BYTE
|
||||
};
|
||||
|
||||
bool checkGLError(const char* name = nullptr);
|
||||
bool checkGLErrorDebug(const char* name = nullptr);
|
||||
|
||||
template <typename GPUType>
|
||||
struct GLObject : public GPUObject {
|
||||
public:
|
||||
GLObject(const GPUType& gpuObject, GLuint id) : _gpuObject(gpuObject), _id(id) {}
|
||||
|
||||
virtual ~GLObject() { }
|
||||
|
||||
// Used by derived classes and helpers to ensure the actual GL object exceeds the lifetime of `this`
|
||||
GLuint takeOwnership() {
|
||||
GLuint result = _id;
|
||||
const_cast<GLuint&>(_id) = 0;
|
||||
return result;
|
||||
}
|
||||
|
||||
const GPUType& _gpuObject;
|
||||
const GLuint _id;
|
||||
};
|
||||
|
||||
class GlBuffer;
|
||||
class GLFramebuffer;
|
||||
class GLPipeline;
|
||||
class GLQuery;
|
||||
class GLState;
|
||||
class GLShader;
|
||||
class GLTexture;
|
||||
class GLTextureTransferHelper;
|
||||
|
||||
} } // namespace gpu::gl
|
||||
|
||||
#define CHECK_GL_ERROR() gpu::gl::checkGLErrorDebug(__FUNCTION__)
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
|
233
libraries/gpu-gl/src/gpu/gl/GLState.cpp
Normal file
233
libraries/gpu-gl/src/gpu/gl/GLState.cpp
Normal file
|
@ -0,0 +1,233 @@
|
|||
//
|
||||
// Created by Bradley Austin Davis on 2016/05/15
|
||||
// Copyright 2013-2016 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
#include "GLState.h"
|
||||
#include "GLBackend.h"
|
||||
|
||||
using namespace gpu;
|
||||
using namespace gpu::gl;
|
||||
|
||||
typedef GLState::Command Command;
|
||||
typedef GLState::CommandPointer CommandPointer;
|
||||
typedef GLState::Command1<uint32> Command1U;
|
||||
typedef GLState::Command1<int32> Command1I;
|
||||
typedef GLState::Command1<bool> Command1B;
|
||||
typedef GLState::Command1<Vec2> CommandDepthBias;
|
||||
typedef GLState::Command1<State::DepthTest> CommandDepthTest;
|
||||
typedef GLState::Command3<State::StencilActivation, State::StencilTest, State::StencilTest> CommandStencil;
|
||||
typedef GLState::Command1<State::BlendFunction> CommandBlend;
|
||||
|
||||
const GLState::Commands makeResetStateCommands();
|
||||
|
||||
// NOTE: This must stay in sync with the ordering of the State::Field enum
|
||||
const GLState::Commands makeResetStateCommands() {
|
||||
// Since State::DEFAULT is a static defined in another .cpp the initialisation order is random
|
||||
// and we have a 50/50 chance that State::DEFAULT is not yet initialized.
|
||||
// Since State::DEFAULT = State::Data() it is much easier to not use the actual State::DEFAULT
|
||||
// but another State::Data object with a default initialization.
|
||||
const State::Data DEFAULT = State::Data();
|
||||
|
||||
auto depthBiasCommand = std::make_shared<CommandDepthBias>(&GLBackend::do_setStateDepthBias,
|
||||
Vec2(DEFAULT.depthBias, DEFAULT.depthBiasSlopeScale));
|
||||
auto stencilCommand = std::make_shared<CommandStencil>(&GLBackend::do_setStateStencil, DEFAULT.stencilActivation,
|
||||
DEFAULT.stencilTestFront, DEFAULT.stencilTestBack);
|
||||
|
||||
// The state commands to reset to default,
|
||||
// WARNING depending on the order of the State::Field enum
|
||||
return {
|
||||
std::make_shared<Command1I>(&GLBackend::do_setStateFillMode, DEFAULT.fillMode),
|
||||
std::make_shared<Command1I>(&GLBackend::do_setStateCullMode, DEFAULT.cullMode),
|
||||
std::make_shared<Command1B>(&GLBackend::do_setStateFrontFaceClockwise, DEFAULT.frontFaceClockwise),
|
||||
std::make_shared<Command1B>(&GLBackend::do_setStateDepthClampEnable, DEFAULT.depthClampEnable),
|
||||
std::make_shared<Command1B>(&GLBackend::do_setStateScissorEnable, DEFAULT.scissorEnable),
|
||||
std::make_shared<Command1B>(&GLBackend::do_setStateMultisampleEnable, DEFAULT.multisampleEnable),
|
||||
std::make_shared<Command1B>(&GLBackend::do_setStateAntialiasedLineEnable, DEFAULT.antialisedLineEnable),
|
||||
|
||||
// Depth bias has 2 fields in State but really one call in GLBackend
|
||||
CommandPointer(depthBiasCommand),
|
||||
CommandPointer(depthBiasCommand),
|
||||
|
||||
std::make_shared<CommandDepthTest>(&GLBackend::do_setStateDepthTest, DEFAULT.depthTest),
|
||||
|
||||
// Depth bias has 3 fields in State but really one call in GLBackend
|
||||
CommandPointer(stencilCommand),
|
||||
CommandPointer(stencilCommand),
|
||||
CommandPointer(stencilCommand),
|
||||
|
||||
std::make_shared<Command1U>(&GLBackend::do_setStateSampleMask, DEFAULT.sampleMask),
|
||||
|
||||
std::make_shared<Command1B>(&GLBackend::do_setStateAlphaToCoverageEnable, DEFAULT.alphaToCoverageEnable),
|
||||
|
||||
std::make_shared<CommandBlend>(&GLBackend::do_setStateBlend, DEFAULT.blendFunction),
|
||||
|
||||
std::make_shared<Command1U>(&GLBackend::do_setStateColorWriteMask, DEFAULT.colorWriteMask)
|
||||
};
|
||||
}
|
||||
|
||||
const GLState::Commands GLState::_resetStateCommands = makeResetStateCommands();
|
||||
|
||||
|
||||
void generateFillMode(GLState::Commands& commands, State::FillMode fillMode) {
|
||||
commands.push_back(std::make_shared<Command1I>(&GLBackend::do_setStateFillMode, int32(fillMode)));
|
||||
}
|
||||
|
||||
void generateCullMode(GLState::Commands& commands, State::CullMode cullMode) {
|
||||
commands.push_back(std::make_shared<Command1I>(&GLBackend::do_setStateCullMode, int32(cullMode)));
|
||||
}
|
||||
|
||||
void generateFrontFaceClockwise(GLState::Commands& commands, bool isClockwise) {
|
||||
commands.push_back(std::make_shared<Command1B>(&GLBackend::do_setStateFrontFaceClockwise, isClockwise));
|
||||
}
|
||||
|
||||
void generateDepthClampEnable(GLState::Commands& commands, bool enable) {
|
||||
commands.push_back(std::make_shared<Command1B>(&GLBackend::do_setStateDepthClampEnable, enable));
|
||||
}
|
||||
|
||||
void generateScissorEnable(GLState::Commands& commands, bool enable) {
|
||||
commands.push_back(std::make_shared<Command1B>(&GLBackend::do_setStateScissorEnable, enable));
|
||||
}
|
||||
|
||||
void generateMultisampleEnable(GLState::Commands& commands, bool enable) {
|
||||
commands.push_back(std::make_shared<Command1B>(&GLBackend::do_setStateMultisampleEnable, enable));
|
||||
}
|
||||
|
||||
void generateAntialiasedLineEnable(GLState::Commands& commands, bool enable) {
|
||||
commands.push_back(std::make_shared<Command1B>(&GLBackend::do_setStateAntialiasedLineEnable, enable));
|
||||
}
|
||||
|
||||
void generateDepthBias(GLState::Commands& commands, const State& state) {
|
||||
commands.push_back(std::make_shared<CommandDepthBias>(&GLBackend::do_setStateDepthBias, Vec2(state.getDepthBias(), state.getDepthBiasSlopeScale())));
|
||||
}
|
||||
|
||||
void generateDepthTest(GLState::Commands& commands, const State::DepthTest& test) {
|
||||
commands.push_back(std::make_shared<CommandDepthTest>(&GLBackend::do_setStateDepthTest, int32(test.getRaw())));
|
||||
}
|
||||
|
||||
void generateStencil(GLState::Commands& commands, const State& state) {
|
||||
commands.push_back(std::make_shared<CommandStencil>(&GLBackend::do_setStateStencil, state.getStencilActivation(), state.getStencilTestFront(), state.getStencilTestBack()));
|
||||
}
|
||||
|
||||
void generateAlphaToCoverageEnable(GLState::Commands& commands, bool enable) {
|
||||
commands.push_back(std::make_shared<Command1B>(&GLBackend::do_setStateAlphaToCoverageEnable, enable));
|
||||
}
|
||||
|
||||
void generateSampleMask(GLState::Commands& commands, uint32 mask) {
|
||||
commands.push_back(std::make_shared<Command1U>(&GLBackend::do_setStateSampleMask, mask));
|
||||
}
|
||||
|
||||
void generateBlend(GLState::Commands& commands, const State& state) {
|
||||
commands.push_back(std::make_shared<CommandBlend>(&GLBackend::do_setStateBlend, state.getBlendFunction()));
|
||||
}
|
||||
|
||||
void generateColorWriteMask(GLState::Commands& commands, uint32 mask) {
|
||||
commands.push_back(std::make_shared<Command1U>(&GLBackend::do_setStateColorWriteMask, mask));
|
||||
}
|
||||
|
||||
GLState* GLState::sync(const State& state) {
|
||||
GLState* object = Backend::getGPUObject<GLState>(state);
|
||||
|
||||
// If GPU object already created then good
|
||||
if (object) {
|
||||
return object;
|
||||
}
|
||||
|
||||
// Else allocate and create the GLState
|
||||
if (!object) {
|
||||
object = new GLState();
|
||||
Backend::setGPUObject(state, object);
|
||||
}
|
||||
|
||||
// here, we need to regenerate something so let's do it all
|
||||
object->_commands.clear();
|
||||
object->_stamp = state.getStamp();
|
||||
object->_signature = state.getSignature();
|
||||
|
||||
bool depthBias = false;
|
||||
bool stencilState = false;
|
||||
|
||||
// go thorugh the list of state fields in the State and record the corresponding gl command
|
||||
for (int i = 0; i < State::NUM_FIELDS; i++) {
|
||||
if (state.getSignature()[i]) {
|
||||
switch (i) {
|
||||
case State::FILL_MODE: {
|
||||
generateFillMode(object->_commands, state.getFillMode());
|
||||
break;
|
||||
}
|
||||
case State::CULL_MODE: {
|
||||
generateCullMode(object->_commands, state.getCullMode());
|
||||
break;
|
||||
}
|
||||
case State::DEPTH_BIAS:
|
||||
case State::DEPTH_BIAS_SLOPE_SCALE: {
|
||||
depthBias = true;
|
||||
break;
|
||||
}
|
||||
case State::FRONT_FACE_CLOCKWISE: {
|
||||
generateFrontFaceClockwise(object->_commands, state.isFrontFaceClockwise());
|
||||
break;
|
||||
}
|
||||
case State::DEPTH_CLAMP_ENABLE: {
|
||||
generateDepthClampEnable(object->_commands, state.isDepthClampEnable());
|
||||
break;
|
||||
}
|
||||
case State::SCISSOR_ENABLE: {
|
||||
generateScissorEnable(object->_commands, state.isScissorEnable());
|
||||
break;
|
||||
}
|
||||
case State::MULTISAMPLE_ENABLE: {
|
||||
generateMultisampleEnable(object->_commands, state.isMultisampleEnable());
|
||||
break;
|
||||
}
|
||||
case State::ANTIALISED_LINE_ENABLE: {
|
||||
generateAntialiasedLineEnable(object->_commands, state.isAntialiasedLineEnable());
|
||||
break;
|
||||
}
|
||||
case State::DEPTH_TEST: {
|
||||
generateDepthTest(object->_commands, state.getDepthTest());
|
||||
break;
|
||||
}
|
||||
|
||||
case State::STENCIL_ACTIVATION:
|
||||
case State::STENCIL_TEST_FRONT:
|
||||
case State::STENCIL_TEST_BACK: {
|
||||
stencilState = true;
|
||||
break;
|
||||
}
|
||||
|
||||
case State::SAMPLE_MASK: {
|
||||
generateSampleMask(object->_commands, state.getSampleMask());
|
||||
break;
|
||||
}
|
||||
case State::ALPHA_TO_COVERAGE_ENABLE: {
|
||||
generateAlphaToCoverageEnable(object->_commands, state.isAlphaToCoverageEnabled());
|
||||
break;
|
||||
}
|
||||
|
||||
case State::BLEND_FUNCTION: {
|
||||
generateBlend(object->_commands, state);
|
||||
break;
|
||||
}
|
||||
|
||||
case State::COLOR_WRITE_MASK: {
|
||||
generateColorWriteMask(object->_commands, state.getColorWriteMask());
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (depthBias) {
|
||||
generateDepthBias(object->_commands, state);
|
||||
}
|
||||
|
||||
if (stencilState) {
|
||||
generateStencil(object->_commands, state);
|
||||
}
|
||||
|
||||
return object;
|
||||
}
|
||||
|
73
libraries/gpu-gl/src/gpu/gl/GLState.h
Normal file
73
libraries/gpu-gl/src/gpu/gl/GLState.h
Normal file
|
@ -0,0 +1,73 @@
|
|||
//
|
||||
// Created by Bradley Austin Davis on 2016/05/15
|
||||
// Copyright 2013-2016 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
#ifndef hifi_gpu_gl_GLState_h
|
||||
#define hifi_gpu_gl_GLState_h
|
||||
|
||||
#include "GLShared.h"
|
||||
|
||||
#include <gpu/State.h>
|
||||
|
||||
namespace gpu { namespace gl {
|
||||
|
||||
class GLBackend;
|
||||
class GLState : public GPUObject {
|
||||
public:
|
||||
static GLState* sync(const State& state);
|
||||
|
||||
class Command {
|
||||
public:
|
||||
virtual void run(GLBackend* backend) = 0;
|
||||
Command() {}
|
||||
virtual ~Command() {};
|
||||
};
|
||||
|
||||
template <class T> class Command1 : public Command {
|
||||
public:
|
||||
typedef void (GLBackend::*GLFunction)(T);
|
||||
void run(GLBackend* backend) { (backend->*(_func))(_param); }
|
||||
Command1(GLFunction func, T param) : _func(func), _param(param) {};
|
||||
GLFunction _func;
|
||||
T _param;
|
||||
};
|
||||
template <class T, class U> class Command2 : public Command {
|
||||
public:
|
||||
typedef void (GLBackend::*GLFunction)(T, U);
|
||||
void run(GLBackend* backend) { (backend->*(_func))(_param0, _param1); }
|
||||
Command2(GLFunction func, T param0, U param1) : _func(func), _param0(param0), _param1(param1) {};
|
||||
GLFunction _func;
|
||||
T _param0;
|
||||
U _param1;
|
||||
};
|
||||
|
||||
template <class T, class U, class V> class Command3 : public Command {
|
||||
public:
|
||||
typedef void (GLBackend::*GLFunction)(T, U, V);
|
||||
void run(GLBackend* backend) { (backend->*(_func))(_param0, _param1, _param2); }
|
||||
Command3(GLFunction func, T param0, U param1, V param2) : _func(func), _param0(param0), _param1(param1), _param2(param2) {};
|
||||
GLFunction _func;
|
||||
T _param0;
|
||||
U _param1;
|
||||
V _param2;
|
||||
};
|
||||
|
||||
typedef std::shared_ptr< Command > CommandPointer;
|
||||
typedef std::vector< CommandPointer > Commands;
|
||||
|
||||
Commands _commands;
|
||||
Stamp _stamp;
|
||||
State::Signature _signature;
|
||||
|
||||
// The state commands to reset to default,
|
||||
static const Commands _resetStateCommands;
|
||||
|
||||
friend class GLBackend;
|
||||
};
|
||||
|
||||
} }
|
||||
|
||||
#endif
|
|
@ -1,59 +1,15 @@
|
|||
//
|
||||
// Created by Bradley Austin Davis on 2016/05/14
|
||||
// Created by Bradley Austin Davis on 2016/05/15
|
||||
// Copyright 2013-2016 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
#include "GLBackendShared.h"
|
||||
|
||||
#include <QLoggingCategory>
|
||||
#include "GLTexelFormat.h"
|
||||
|
||||
Q_DECLARE_LOGGING_CATEGORY(gpugllogging)
|
||||
Q_LOGGING_CATEGORY(gpugllogging, "hifi.gpu.gl")
|
||||
|
||||
namespace gpu { namespace gl {
|
||||
|
||||
bool checkGLError(const char* name) {
|
||||
GLenum error = glGetError();
|
||||
if (!error) {
|
||||
return false;
|
||||
} else {
|
||||
switch (error) {
|
||||
case GL_INVALID_ENUM:
|
||||
qCDebug(gpugllogging) << "GLBackend::" << name << ": An unacceptable value is specified for an enumerated argument.The offending command is ignored and has no other side effect than to set the error flag.";
|
||||
break;
|
||||
case GL_INVALID_VALUE:
|
||||
qCDebug(gpugllogging) << "GLBackend" << name << ": A numeric argument is out of range.The offending command is ignored and has no other side effect than to set the error flag";
|
||||
break;
|
||||
case GL_INVALID_OPERATION:
|
||||
qCDebug(gpugllogging) << "GLBackend" << name << ": The specified operation is not allowed in the current state.The offending command is ignored and has no other side effect than to set the error flag..";
|
||||
break;
|
||||
case GL_INVALID_FRAMEBUFFER_OPERATION:
|
||||
qCDebug(gpugllogging) << "GLBackend" << name << ": The framebuffer object is not complete.The offending command is ignored and has no other side effect than to set the error flag.";
|
||||
break;
|
||||
case GL_OUT_OF_MEMORY:
|
||||
qCDebug(gpugllogging) << "GLBackend" << name << ": There is not enough memory left to execute the command.The state of the GL is undefined, except for the state of the error flags, after this error is recorded.";
|
||||
break;
|
||||
case GL_STACK_UNDERFLOW:
|
||||
qCDebug(gpugllogging) << "GLBackend" << name << ": An attempt has been made to perform an operation that would cause an internal stack to underflow.";
|
||||
break;
|
||||
case GL_STACK_OVERFLOW:
|
||||
qCDebug(gpugllogging) << "GLBackend" << name << ": An attempt has been made to perform an operation that would cause an internal stack to overflow.";
|
||||
break;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
bool checkGLErrorDebug(const char* name) {
|
||||
#ifdef DEBUG
|
||||
return checkGLError(name);
|
||||
#else
|
||||
Q_UNUSED(name);
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
using namespace gpu;
|
||||
using namespace gpu::gl;
|
||||
|
||||
|
||||
GLTexelFormat GLTexelFormat::evalGLTexelFormatInternal(const gpu::Element& dstFormat) {
|
||||
|
@ -68,7 +24,7 @@ GLTexelFormat GLTexelFormat::evalGLTexelFormat(const Element& dstFormat, const E
|
|||
switch (dstFormat.getDimension()) {
|
||||
case gpu::SCALAR: {
|
||||
texel.format = GL_RED;
|
||||
texel.type = _elementTypeToGLType[dstFormat.getType()];
|
||||
texel.type = ELEMENT_TYPE_TO_GL[dstFormat.getType()];
|
||||
|
||||
switch (dstFormat.getSemantic()) {
|
||||
case gpu::RGB:
|
||||
|
@ -96,7 +52,7 @@ GLTexelFormat GLTexelFormat::evalGLTexelFormat(const Element& dstFormat, const E
|
|||
|
||||
case gpu::VEC2: {
|
||||
texel.format = GL_RG;
|
||||
texel.type = _elementTypeToGLType[dstFormat.getType()];
|
||||
texel.type = ELEMENT_TYPE_TO_GL[dstFormat.getType()];
|
||||
|
||||
switch (dstFormat.getSemantic()) {
|
||||
case gpu::RGB:
|
||||
|
@ -113,7 +69,7 @@ GLTexelFormat GLTexelFormat::evalGLTexelFormat(const Element& dstFormat, const E
|
|||
case gpu::VEC3: {
|
||||
texel.format = GL_RGB;
|
||||
|
||||
texel.type = _elementTypeToGLType[dstFormat.getType()];
|
||||
texel.type = ELEMENT_TYPE_TO_GL[dstFormat.getType()];
|
||||
|
||||
switch (dstFormat.getSemantic()) {
|
||||
case gpu::RGB:
|
||||
|
@ -135,7 +91,7 @@ GLTexelFormat GLTexelFormat::evalGLTexelFormat(const Element& dstFormat, const E
|
|||
|
||||
case gpu::VEC4: {
|
||||
texel.format = GL_RGBA;
|
||||
texel.type = _elementTypeToGLType[dstFormat.getType()];
|
||||
texel.type = ELEMENT_TYPE_TO_GL[dstFormat.getType()];
|
||||
|
||||
switch (srcFormat.getSemantic()) {
|
||||
case gpu::BGRA:
|
||||
|
@ -205,7 +161,7 @@ GLTexelFormat GLTexelFormat::evalGLTexelFormat(const Element& dstFormat, const E
|
|||
switch (dstFormat.getDimension()) {
|
||||
case gpu::SCALAR: {
|
||||
texel.format = GL_RED;
|
||||
texel.type = _elementTypeToGLType[dstFormat.getType()];
|
||||
texel.type = ELEMENT_TYPE_TO_GL[dstFormat.getType()];
|
||||
|
||||
switch (dstFormat.getSemantic()) {
|
||||
case gpu::COMPRESSED_R: {
|
||||
|
@ -340,7 +296,7 @@ GLTexelFormat GLTexelFormat::evalGLTexelFormat(const Element& dstFormat, const E
|
|||
|
||||
case gpu::VEC2: {
|
||||
texel.format = GL_RG;
|
||||
texel.type = _elementTypeToGLType[dstFormat.getType()];
|
||||
texel.type = ELEMENT_TYPE_TO_GL[dstFormat.getType()];
|
||||
|
||||
switch (dstFormat.getSemantic()) {
|
||||
case gpu::RGB:
|
||||
|
@ -357,7 +313,7 @@ GLTexelFormat GLTexelFormat::evalGLTexelFormat(const Element& dstFormat, const E
|
|||
case gpu::VEC3: {
|
||||
texel.format = GL_RGB;
|
||||
|
||||
texel.type = _elementTypeToGLType[dstFormat.getType()];
|
||||
texel.type = ELEMENT_TYPE_TO_GL[dstFormat.getType()];
|
||||
|
||||
switch (dstFormat.getSemantic()) {
|
||||
case gpu::RGB:
|
||||
|
@ -382,7 +338,7 @@ GLTexelFormat GLTexelFormat::evalGLTexelFormat(const Element& dstFormat, const E
|
|||
|
||||
case gpu::VEC4: {
|
||||
texel.format = GL_RGBA;
|
||||
texel.type = _elementTypeToGLType[dstFormat.getType()];
|
||||
texel.type = ELEMENT_TYPE_TO_GL[dstFormat.getType()];
|
||||
|
||||
switch (dstFormat.getSemantic()) {
|
||||
case gpu::RGB:
|
||||
|
@ -468,5 +424,3 @@ GLTexelFormat GLTexelFormat::evalGLTexelFormat(const Element& dstFormat, const E
|
|||
return texel;
|
||||
}
|
||||
}
|
||||
|
||||
} }
|
32
libraries/gpu-gl/src/gpu/gl/GLTexelFormat.h
Normal file
32
libraries/gpu-gl/src/gpu/gl/GLTexelFormat.h
Normal file
|
@ -0,0 +1,32 @@
|
|||
//
|
||||
// Created by Bradley Austin Davis on 2016/05/15
|
||||
// Copyright 2013-2016 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
#ifndef hifi_gpu_gl_GLTexelFormat_h
|
||||
#define hifi_gpu_gl_GLTexelFormat_h
|
||||
|
||||
#include "GLShared.h"
|
||||
|
||||
namespace gpu { namespace gl {
|
||||
|
||||
class GLTexelFormat {
|
||||
public:
|
||||
GLenum internalFormat;
|
||||
GLenum format;
|
||||
GLenum type;
|
||||
|
||||
static GLTexelFormat evalGLTexelFormat(const Element& dstFormat) {
|
||||
return evalGLTexelFormat(dstFormat, dstFormat);
|
||||
}
|
||||
static GLTexelFormat evalGLTexelFormatInternal(const Element& dstFormat);
|
||||
|
||||
static GLTexelFormat evalGLTexelFormat(const Element& dstFormat, const Element& srcFormat);
|
||||
};
|
||||
|
||||
} }
|
||||
|
||||
|
||||
#endif
|
292
libraries/gpu-gl/src/gpu/gl/GLTexture.cpp
Normal file
292
libraries/gpu-gl/src/gpu/gl/GLTexture.cpp
Normal file
|
@ -0,0 +1,292 @@
|
|||
//
|
||||
// Created by Bradley Austin Davis on 2016/05/15
|
||||
// Copyright 2013-2016 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
#include "GLTexture.h"
|
||||
|
||||
#include <NumericalConstants.h>
|
||||
|
||||
#include "GLTextureTransfer.h"
|
||||
|
||||
using namespace gpu;
|
||||
using namespace gpu::gl;
|
||||
|
||||
std::shared_ptr<GLTextureTransferHelper> GLTexture::_textureTransferHelper;
|
||||
static std::map<uint16, size_t> _textureCountByMips;
|
||||
static uint16 _currentMaxMipCount { 0 };
|
||||
|
||||
// FIXME placeholder for texture memory over-use
|
||||
#define DEFAULT_MAX_MEMORY_MB 256
|
||||
|
||||
const GLenum GLTexture::CUBE_FACE_LAYOUT[6] = {
|
||||
GL_TEXTURE_CUBE_MAP_POSITIVE_X, GL_TEXTURE_CUBE_MAP_NEGATIVE_X,
|
||||
GL_TEXTURE_CUBE_MAP_POSITIVE_Y, GL_TEXTURE_CUBE_MAP_NEGATIVE_Y,
|
||||
GL_TEXTURE_CUBE_MAP_POSITIVE_Z, GL_TEXTURE_CUBE_MAP_NEGATIVE_Z
|
||||
};
|
||||
|
||||
const GLenum GLTexture::WRAP_MODES[Sampler::NUM_WRAP_MODES] = {
|
||||
GL_REPEAT, // WRAP_REPEAT,
|
||||
GL_MIRRORED_REPEAT, // WRAP_MIRROR,
|
||||
GL_CLAMP_TO_EDGE, // WRAP_CLAMP,
|
||||
GL_CLAMP_TO_BORDER, // WRAP_BORDER,
|
||||
GL_MIRROR_CLAMP_TO_EDGE_EXT // WRAP_MIRROR_ONCE,
|
||||
};
|
||||
|
||||
const GLFilterMode GLTexture::FILTER_MODES[Sampler::NUM_FILTERS] = {
|
||||
{ GL_NEAREST, GL_NEAREST }, //FILTER_MIN_MAG_POINT,
|
||||
{ GL_NEAREST, GL_LINEAR }, //FILTER_MIN_POINT_MAG_LINEAR,
|
||||
{ GL_LINEAR, GL_NEAREST }, //FILTER_MIN_LINEAR_MAG_POINT,
|
||||
{ GL_LINEAR, GL_LINEAR }, //FILTER_MIN_MAG_LINEAR,
|
||||
|
||||
{ GL_NEAREST_MIPMAP_NEAREST, GL_NEAREST }, //FILTER_MIN_MAG_MIP_POINT,
|
||||
{ GL_NEAREST_MIPMAP_LINEAR, GL_NEAREST }, //FILTER_MIN_MAG_POINT_MIP_LINEAR,
|
||||
{ GL_NEAREST_MIPMAP_NEAREST, GL_LINEAR }, //FILTER_MIN_POINT_MAG_LINEAR_MIP_POINT,
|
||||
{ GL_NEAREST_MIPMAP_LINEAR, GL_LINEAR }, //FILTER_MIN_POINT_MAG_MIP_LINEAR,
|
||||
{ GL_LINEAR_MIPMAP_NEAREST, GL_NEAREST }, //FILTER_MIN_LINEAR_MAG_MIP_POINT,
|
||||
{ GL_LINEAR_MIPMAP_LINEAR, GL_NEAREST }, //FILTER_MIN_LINEAR_MAG_POINT_MIP_LINEAR,
|
||||
{ GL_LINEAR_MIPMAP_NEAREST, GL_LINEAR }, //FILTER_MIN_MAG_LINEAR_MIP_POINT,
|
||||
{ GL_LINEAR_MIPMAP_LINEAR, GL_LINEAR }, //FILTER_MIN_MAG_MIP_LINEAR,
|
||||
{ GL_LINEAR_MIPMAP_LINEAR, GL_LINEAR } //FILTER_ANISOTROPIC,
|
||||
};
|
||||
|
||||
GLenum GLTexture::getGLTextureType(const Texture& texture) {
|
||||
switch (texture.getType()) {
|
||||
case Texture::TEX_2D:
|
||||
return GL_TEXTURE_2D;
|
||||
break;
|
||||
|
||||
case Texture::TEX_CUBE:
|
||||
return GL_TEXTURE_CUBE_MAP;
|
||||
break;
|
||||
|
||||
default:
|
||||
qFatal("Unsupported texture type");
|
||||
}
|
||||
Q_UNREACHABLE();
|
||||
return GL_TEXTURE_2D;
|
||||
}
|
||||
|
||||
|
||||
const std::vector<GLenum>& GLTexture::getFaceTargets(GLenum target) {
|
||||
static std::vector<GLenum> cubeFaceTargets {
|
||||
GL_TEXTURE_CUBE_MAP_POSITIVE_X, GL_TEXTURE_CUBE_MAP_NEGATIVE_X,
|
||||
GL_TEXTURE_CUBE_MAP_POSITIVE_Y, GL_TEXTURE_CUBE_MAP_NEGATIVE_Y,
|
||||
GL_TEXTURE_CUBE_MAP_POSITIVE_Z, GL_TEXTURE_CUBE_MAP_NEGATIVE_Z
|
||||
};
|
||||
static std::vector<GLenum> faceTargets {
|
||||
GL_TEXTURE_2D
|
||||
};
|
||||
switch (target) {
|
||||
case GL_TEXTURE_2D:
|
||||
return faceTargets;
|
||||
case GL_TEXTURE_CUBE_MAP:
|
||||
return cubeFaceTargets;
|
||||
default:
|
||||
Q_UNREACHABLE();
|
||||
break;
|
||||
}
|
||||
Q_UNREACHABLE();
|
||||
return faceTargets;
|
||||
}
|
||||
|
||||
float GLTexture::getMemoryPressure() {
|
||||
// Check for an explicit memory limit
|
||||
auto availableTextureMemory = Texture::getAllowedGPUMemoryUsage();
|
||||
|
||||
// If no memory limit has been set, use a percentage of the total dedicated memory
|
||||
if (!availableTextureMemory) {
|
||||
auto totalGpuMemory = gpu::gl::getDedicatedMemory();
|
||||
|
||||
// If no limit has been explicitly set, and the dedicated memory can't be determined,
|
||||
// just use a fallback fixed value of 256 MB
|
||||
if (!totalGpuMemory) {
|
||||
totalGpuMemory = MB_TO_BYTES(DEFAULT_MAX_MEMORY_MB);
|
||||
}
|
||||
|
||||
// Allow 75% of all available GPU memory to be consumed by textures
|
||||
// FIXME overly conservative?
|
||||
availableTextureMemory = (totalGpuMemory >> 2) * 3;
|
||||
}
|
||||
|
||||
// Return the consumed texture memory divided by the available texture memory.
|
||||
auto consumedGpuMemory = Context::getTextureGPUMemoryUsage();
|
||||
return (float)consumedGpuMemory / (float)availableTextureMemory;
|
||||
}
|
||||
|
||||
GLTexture::DownsampleSource::DownsampleSource(GLTexture* oldTexture) :
|
||||
_texture(oldTexture ? oldTexture->takeOwnership() : 0),
|
||||
_minMip(oldTexture ? oldTexture->_minMip : 0),
|
||||
_maxMip(oldTexture ? oldTexture->_maxMip : 0)
|
||||
{
|
||||
}
|
||||
|
||||
GLTexture::DownsampleSource::~DownsampleSource() {
|
||||
if (_texture) {
|
||||
glDeleteTextures(1, &_texture);
|
||||
Backend::decrementTextureGPUCount();
|
||||
}
|
||||
}
|
||||
|
||||
GLTexture::GLTexture(const gpu::Texture& texture, GLuint id, GLTexture* originalTexture, bool transferrable) :
|
||||
GLObject(texture, id),
|
||||
_storageStamp(texture.getStamp()),
|
||||
_target(getGLTextureType(texture)),
|
||||
_maxMip(texture.maxMip()),
|
||||
_minMip(texture.minMip()),
|
||||
_virtualSize(texture.evalTotalSize()),
|
||||
_transferrable(transferrable),
|
||||
_downsampleSource(originalTexture)
|
||||
{
|
||||
if (_transferrable) {
|
||||
uint16 mipCount = usedMipLevels();
|
||||
_currentMaxMipCount = std::max(_currentMaxMipCount, mipCount);
|
||||
if (!_textureCountByMips.count(mipCount)) {
|
||||
_textureCountByMips[mipCount] = 1;
|
||||
} else {
|
||||
++_textureCountByMips[mipCount];
|
||||
}
|
||||
}
|
||||
Backend::incrementTextureGPUCount();
|
||||
Backend::updateTextureGPUVirtualMemoryUsage(0, _virtualSize);
|
||||
}
|
||||
|
||||
|
||||
// Create the texture and allocate storage
|
||||
GLTexture::GLTexture(const Texture& texture, GLuint id, bool transferrable) :
|
||||
GLTexture(texture, id, nullptr, transferrable)
|
||||
{
|
||||
// FIXME, do during allocation
|
||||
//Backend::updateTextureGPUMemoryUsage(0, _size);
|
||||
Backend::setGPUObject(texture, this);
|
||||
}
|
||||
|
||||
// Create the texture and copy from the original higher resolution version
|
||||
GLTexture::GLTexture(const gpu::Texture& texture, GLuint id, GLTexture* originalTexture) :
|
||||
GLTexture(texture, id, originalTexture, originalTexture->_transferrable)
|
||||
{
|
||||
Q_ASSERT(_minMip >= originalTexture->_minMip);
|
||||
// Set the GPU object last because that implicitly destroys the originalTexture object
|
||||
Backend::setGPUObject(texture, this);
|
||||
}
|
||||
|
||||
GLTexture::~GLTexture() {
|
||||
if (_transferrable) {
|
||||
uint16 mipCount = usedMipLevels();
|
||||
Q_ASSERT(_textureCountByMips.count(mipCount));
|
||||
auto& numTexturesForMipCount = _textureCountByMips[mipCount];
|
||||
--numTexturesForMipCount;
|
||||
if (0 == numTexturesForMipCount) {
|
||||
_textureCountByMips.erase(mipCount);
|
||||
if (mipCount == _currentMaxMipCount) {
|
||||
_currentMaxMipCount = _textureCountByMips.rbegin()->first;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Backend::decrementTextureGPUCount();
|
||||
Backend::updateTextureGPUMemoryUsage(_size, 0);
|
||||
Backend::updateTextureGPUVirtualMemoryUsage(_virtualSize, 0);
|
||||
}
|
||||
|
||||
void GLTexture::createTexture() {
|
||||
withPreservedTexture([&] {
|
||||
allocateStorage();
|
||||
(void)CHECK_GL_ERROR();
|
||||
syncSampler();
|
||||
(void)CHECK_GL_ERROR();
|
||||
});
|
||||
}
|
||||
|
||||
void GLTexture::setSize(GLuint size) const {
|
||||
Backend::updateTextureGPUMemoryUsage(_size, size);
|
||||
const_cast<GLuint&>(_size) = size;
|
||||
}
|
||||
|
||||
bool GLTexture::isInvalid() const {
|
||||
return _storageStamp < _gpuObject.getStamp();
|
||||
}
|
||||
|
||||
bool GLTexture::isOutdated() const {
|
||||
return GLSyncState::Idle == _syncState && _contentStamp < _gpuObject.getDataStamp();
|
||||
}
|
||||
|
||||
bool GLTexture::isOverMaxMemory() const {
|
||||
// FIXME switch to using the max mip count used from the previous frame
|
||||
if (usedMipLevels() < _currentMaxMipCount) {
|
||||
return false;
|
||||
}
|
||||
Q_ASSERT(usedMipLevels() == _currentMaxMipCount);
|
||||
|
||||
if (getMemoryPressure() < 1.0f) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool GLTexture::isReady() const {
|
||||
// If we have an invalid texture, we're never ready
|
||||
if (isInvalid()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// If we're out of date, but the transfer is in progress, report ready
|
||||
// as a special case
|
||||
auto syncState = _syncState.load();
|
||||
|
||||
if (isOutdated()) {
|
||||
return Idle != syncState;
|
||||
}
|
||||
|
||||
if (Idle != syncState) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
// Do any post-transfer operations that might be required on the main context / rendering thread
|
||||
void GLTexture::postTransfer() {
|
||||
setSyncState(GLSyncState::Idle);
|
||||
++_transferCount;
|
||||
|
||||
//// The public gltexture becaomes available
|
||||
//_id = _privateTexture;
|
||||
|
||||
_downsampleSource.reset();
|
||||
|
||||
// At this point the mip pixels have been loaded, we can notify the gpu texture to abandon it's memory
|
||||
switch (_gpuObject.getType()) {
|
||||
case Texture::TEX_2D:
|
||||
for (uint16_t i = 0; i < Sampler::MAX_MIP_LEVEL; ++i) {
|
||||
if (_gpuObject.isStoredMipFaceAvailable(i)) {
|
||||
_gpuObject.notifyMipFaceGPULoaded(i);
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
||||
case Texture::TEX_CUBE:
|
||||
// transfer pixels from each faces
|
||||
for (uint8_t f = 0; f < CUBE_NUM_FACES; f++) {
|
||||
for (uint16_t i = 0; i < Sampler::MAX_MIP_LEVEL; ++i) {
|
||||
if (_gpuObject.isStoredMipFaceAvailable(i, f)) {
|
||||
_gpuObject.notifyMipFaceGPULoaded(i, f);
|
||||
}
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
qCWarning(gpugllogging) << __FUNCTION__ << " case for Texture Type " << _gpuObject.getType() << " not supported";
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
void GLTexture::initTextureTransferHelper() {
|
||||
_textureTransferHelper = std::make_shared<GLTextureTransferHelper>();
|
||||
}
|
192
libraries/gpu-gl/src/gpu/gl/GLTexture.h
Normal file
192
libraries/gpu-gl/src/gpu/gl/GLTexture.h
Normal file
|
@ -0,0 +1,192 @@
|
|||
//
|
||||
// Created by Bradley Austin Davis on 2016/05/15
|
||||
// Copyright 2013-2016 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
#ifndef hifi_gpu_gl_GLTexture_h
|
||||
#define hifi_gpu_gl_GLTexture_h
|
||||
|
||||
#include "GLShared.h"
|
||||
#include "GLTextureTransfer.h"
|
||||
|
||||
namespace gpu { namespace gl {
|
||||
|
||||
struct GLFilterMode {
|
||||
GLint minFilter;
|
||||
GLint magFilter;
|
||||
};
|
||||
|
||||
class GLTexture : public GLObject<Texture> {
|
||||
public:
|
||||
static void initTextureTransferHelper();
|
||||
static std::shared_ptr<GLTextureTransferHelper> _textureTransferHelper;
|
||||
|
||||
template <typename GLTextureType>
|
||||
static GLTextureType* sync(const TexturePointer& texturePointer, bool needTransfer) {
|
||||
const Texture& texture = *texturePointer;
|
||||
if (!texture.isDefined()) {
|
||||
// NO texture definition yet so let's avoid thinking
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// If the object hasn't been created, or the object definition is out of date, drop and re-create
|
||||
GLTextureType* object = Backend::getGPUObject<GLTextureType>(texture);
|
||||
|
||||
// Create the texture if need be (force re-creation if the storage stamp changes
|
||||
// for easier use of immutable storage)
|
||||
if (!object || object->isInvalid()) {
|
||||
// This automatically any previous texture
|
||||
object = new GLTextureType(texture, needTransfer);
|
||||
if (!object->_transferrable) {
|
||||
object->createTexture();
|
||||
object->_contentStamp = texture.getDataStamp();
|
||||
object->postTransfer();
|
||||
}
|
||||
}
|
||||
|
||||
// Object maybe doens't neet to be tranasferred after creation
|
||||
if (!object->_transferrable) {
|
||||
return object;
|
||||
}
|
||||
|
||||
// If we just did a transfer, return the object after doing post-transfer work
|
||||
if (GLSyncState::Transferred == object->getSyncState()) {
|
||||
object->postTransfer();
|
||||
return object;
|
||||
}
|
||||
|
||||
if (object->isReady()) {
|
||||
// Do we need to reduce texture memory usage?
|
||||
if (object->isOverMaxMemory() && texturePointer->incremementMinMip()) {
|
||||
// WARNING, this code path will essentially `delete this`,
|
||||
// so no dereferencing of this instance should be done past this point
|
||||
object = new GLTextureType(texture, object);
|
||||
_textureTransferHelper->transferTexture(texturePointer);
|
||||
}
|
||||
} else if (object->isOutdated()) {
|
||||
// Object might be outdated, if so, start the transfer
|
||||
// (outdated objects that are already in transfer will have reported 'true' for ready()
|
||||
_textureTransferHelper->transferTexture(texturePointer);
|
||||
}
|
||||
|
||||
return object;
|
||||
}
|
||||
|
||||
template <typename GLTextureType>
|
||||
static GLuint getId(const TexturePointer& texture, bool shouldSync) {
|
||||
if (!texture) {
|
||||
return 0;
|
||||
}
|
||||
GLTextureType* object { nullptr };
|
||||
if (shouldSync) {
|
||||
object = sync<GLTextureType>(texture, shouldSync);
|
||||
} else {
|
||||
object = Backend::getGPUObject<GLTextureType>(*texture);
|
||||
}
|
||||
if (!object) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
GLuint result = object->_id;
|
||||
|
||||
// Don't return textures that are in transfer state
|
||||
if ((object->getSyncState() != GLSyncState::Idle) ||
|
||||
// Don't return transferrable textures that have never completed transfer
|
||||
(!object->_transferrable || 0 != object->_transferCount)) {
|
||||
// Will be either 0 or the original texture being downsampled.
|
||||
result = object->_downsampleSource._texture;
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
~GLTexture();
|
||||
|
||||
const GLuint& _texture { _id };
|
||||
const Stamp _storageStamp;
|
||||
const GLenum _target;
|
||||
const uint16 _maxMip;
|
||||
const uint16 _minMip;
|
||||
const GLuint _virtualSize; // theoretical size as expected
|
||||
Stamp _contentStamp { 0 };
|
||||
const bool _transferrable;
|
||||
Size _transferCount { 0 };
|
||||
|
||||
struct DownsampleSource {
|
||||
using Pointer = std::shared_ptr<DownsampleSource>;
|
||||
DownsampleSource() : _texture(0), _minMip(0), _maxMip(0) {}
|
||||
DownsampleSource(GLTexture* originalTexture);
|
||||
~DownsampleSource();
|
||||
void reset() const { const_cast<GLuint&>(_texture) = 0; }
|
||||
const GLuint _texture { 0 };
|
||||
const uint16 _minMip { 0 };
|
||||
const uint16 _maxMip { 0 };
|
||||
} _downsampleSource;
|
||||
|
||||
GLuint size() const { return _size; }
|
||||
GLSyncState getSyncState() const { return _syncState; }
|
||||
|
||||
// Is the storage out of date relative to the gpu texture?
|
||||
bool isInvalid() const;
|
||||
|
||||
// Is the content out of date relative to the gpu texture?
|
||||
bool isOutdated() const;
|
||||
|
||||
// Is the texture in a state where it can be rendered with no work?
|
||||
bool isReady() const;
|
||||
|
||||
// Execute any post-move operations that must occur only on the main thread
|
||||
void postTransfer();
|
||||
|
||||
bool isOverMaxMemory() const;
|
||||
|
||||
protected:
|
||||
static const size_t CUBE_NUM_FACES = 6;
|
||||
static const GLenum CUBE_FACE_LAYOUT[6];
|
||||
static const GLFilterMode FILTER_MODES[Sampler::NUM_FILTERS];
|
||||
static const GLenum WRAP_MODES[Sampler::NUM_WRAP_MODES];
|
||||
|
||||
static const std::vector<GLenum>& getFaceTargets(GLenum textureType);
|
||||
|
||||
static GLenum getGLTextureType(const Texture& texture);
|
||||
// Return a floating point value indicating how much of the allowed
|
||||
// texture memory we are currently consuming. A value of 0 indicates
|
||||
// no texture memory usage, while a value of 1 indicates all available / allowed memory
|
||||
// is consumed. A value above 1 indicates that there is a problem.
|
||||
static float getMemoryPressure();
|
||||
|
||||
|
||||
const GLuint _size { 0 }; // true size as reported by the gl api
|
||||
std::atomic<GLSyncState> _syncState { GLSyncState::Idle };
|
||||
|
||||
GLTexture(const Texture& texture, GLuint id, bool transferrable);
|
||||
GLTexture(const Texture& texture, GLuint id, GLTexture* originalTexture);
|
||||
|
||||
void setSyncState(GLSyncState syncState) { _syncState = syncState; }
|
||||
uint16 usedMipLevels() const { return (_maxMip - _minMip) + 1; }
|
||||
|
||||
void createTexture();
|
||||
|
||||
virtual void allocateStorage() const = 0;
|
||||
virtual void updateSize() const = 0;
|
||||
virtual void transfer() const = 0;
|
||||
virtual void syncSampler() const = 0;
|
||||
virtual void generateMips() const = 0;
|
||||
virtual void withPreservedTexture(std::function<void()> f) const = 0;
|
||||
|
||||
protected:
|
||||
void setSize(GLuint size) const;
|
||||
|
||||
private:
|
||||
|
||||
GLTexture(const gpu::Texture& gpuTexture, GLuint id, GLTexture* originalTexture, bool transferrable);
|
||||
|
||||
friend class GLTextureTransferHelper;
|
||||
friend class GLBackend;
|
||||
};
|
||||
|
||||
} }
|
||||
|
||||
#endif
|
|
@ -5,21 +5,18 @@
|
|||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
#include "GLBackendTextureTransfer.h"
|
||||
#include "GLTextureTransfer.h"
|
||||
|
||||
#ifdef THREADED_TEXTURE_TRANSFER
|
||||
#include <gl/OffscreenGLCanvas.h>
|
||||
#include <gl/QOpenGLContextWrapper.h>
|
||||
#endif
|
||||
|
||||
|
||||
#include "GLBackendShared.h"
|
||||
#include "GLShared.h"
|
||||
|
||||
using namespace gpu;
|
||||
using namespace gpu::gl;
|
||||
|
||||
#include "GLBackend.h"
|
||||
|
||||
GLTextureTransferHelper::GLTextureTransferHelper() {
|
||||
#ifdef THREADED_TEXTURE_TRANSFER
|
||||
_canvas = QSharedPointer<OffscreenGLCanvas>(new OffscreenGLCanvas(), &QObject::deleteLater);
|
||||
|
@ -45,7 +42,7 @@ GLTextureTransferHelper::~GLTextureTransferHelper() {
|
|||
}
|
||||
|
||||
void GLTextureTransferHelper::transferTexture(const gpu::TexturePointer& texturePointer) {
|
||||
GLBackend::GLTexture* object = Backend::getGPUObject<GLBackend::GLTexture>(*texturePointer);
|
||||
GLTexture* object = Backend::getGPUObject<GLTexture>(*texturePointer);
|
||||
Backend::incrementTextureGPUTransferCount();
|
||||
#ifdef THREADED_TEXTURE_TRANSFER
|
||||
GLsync fence { 0 };
|
||||
|
@ -53,14 +50,14 @@ void GLTextureTransferHelper::transferTexture(const gpu::TexturePointer& texture
|
|||
//glFlush();
|
||||
|
||||
TextureTransferPackage package { texturePointer, fence };
|
||||
object->setSyncState(GLBackend::GLTexture::Pending);
|
||||
object->setSyncState(GLSyncState::Pending);
|
||||
queueItem(package);
|
||||
#else
|
||||
object->withPreservedTexture([&] {
|
||||
do_transfer(*object);
|
||||
});
|
||||
object->_contentStamp = texturePointer->getDataStamp();
|
||||
object->setSyncState(GLBackend::GLTexture::Transferred);
|
||||
object->setSyncState(GLSyncState::Transferred);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -78,7 +75,7 @@ void GLTextureTransferHelper::shutdown() {
|
|||
#endif
|
||||
}
|
||||
|
||||
void GLTextureTransferHelper::do_transfer(GLBackend::GLTexture& texture) {
|
||||
void GLTextureTransferHelper::do_transfer(GLTexture& texture) {
|
||||
texture.createTexture();
|
||||
texture.transfer();
|
||||
texture.updateSize();
|
||||
|
@ -99,7 +96,7 @@ bool GLTextureTransferHelper::processQueueItems(const Queue& messages) {
|
|||
package.fence = 0;
|
||||
}
|
||||
|
||||
GLBackend::GLTexture* object = Backend::getGPUObject<GLBackend::GLTexture>(*texturePointer);
|
||||
GLTexture* object = Backend::getGPUObject<GLTexture>(*texturePointer);
|
||||
do_transfer(*object);
|
||||
glBindTexture(object->_target, 0);
|
||||
|
||||
|
@ -108,7 +105,7 @@ bool GLTextureTransferHelper::processQueueItems(const Queue& messages) {
|
|||
glDeleteSync(writeSync);
|
||||
|
||||
object->_contentStamp = texturePointer->getDataStamp();
|
||||
object->setSyncState(GLBackend::GLTexture::Transferred);
|
||||
object->setSyncState(GLSyncState::Transferred);
|
||||
}
|
||||
return true;
|
||||
}
|
|
@ -5,12 +5,16 @@
|
|||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
#ifndef hifi_gpu_gl_GLTextureTransfer_h
|
||||
#define hifi_gpu_gl_GLTextureTransfer_h
|
||||
|
||||
#include <QtGlobal>
|
||||
#include <QSharedPointer>
|
||||
#include <QtCore/QSharedPointer>
|
||||
|
||||
#include <GenericQueueThread.h>
|
||||
#include "GLBackendShared.h"
|
||||
#include "GLBackend.h"
|
||||
|
||||
#include "GLShared.h"
|
||||
#include "GLTexture.h"
|
||||
|
||||
#ifdef Q_OS_WIN
|
||||
#define THREADED_TEXTURE_TRANSFER
|
||||
|
@ -27,6 +31,7 @@ struct TextureTransferPackage {
|
|||
|
||||
class GLTextureTransferHelper : public GenericQueueThread<TextureTransferPackage> {
|
||||
public:
|
||||
using Pointer = std::shared_ptr<GLTextureTransferHelper>;
|
||||
GLTextureTransferHelper();
|
||||
~GLTextureTransferHelper();
|
||||
void transferTexture(const gpu::TexturePointer& texturePointer);
|
||||
|
@ -36,10 +41,12 @@ protected:
|
|||
void setup() override;
|
||||
void shutdown() override;
|
||||
bool processQueueItems(const Queue& messages) override;
|
||||
void do_transfer(GLBackend::GLTexture& texturePointer);
|
||||
void do_transfer(GLTexture& texturePointer);
|
||||
|
||||
private:
|
||||
QSharedPointer<OffscreenGLCanvas> _canvas;
|
||||
};
|
||||
|
||||
} }
|
||||
|
||||
#endif
|
175
libraries/gpu-gl/src/gpu/gl41/GLBackend.cpp
Normal file
175
libraries/gpu-gl/src/gpu/gl41/GLBackend.cpp
Normal file
|
@ -0,0 +1,175 @@
|
|||
//
|
||||
// Created by Sam Gateau on 10/27/2014.
|
||||
// Copyright 2014 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
#include "GLBackend.h"
|
||||
|
||||
#include <mutex>
|
||||
#include <queue>
|
||||
#include <list>
|
||||
#include <functional>
|
||||
#include <glm/gtc/type_ptr.hpp>
|
||||
|
||||
Q_LOGGING_CATEGORY(gpugl41logging, "hifi.gpu.gl41")
|
||||
|
||||
using namespace gpu;
|
||||
using namespace gpu::gl41;
|
||||
|
||||
void GLBackend::do_draw(Batch& batch, size_t paramOffset) {
|
||||
Primitive primitiveType = (Primitive)batch._params[paramOffset + 2]._uint;
|
||||
GLenum mode = gl::PRIMITIVE_TO_GL[primitiveType];
|
||||
uint32 numVertices = batch._params[paramOffset + 1]._uint;
|
||||
uint32 startVertex = batch._params[paramOffset + 0]._uint;
|
||||
|
||||
if (isStereo()) {
|
||||
setupStereoSide(0);
|
||||
glDrawArrays(mode, startVertex, numVertices);
|
||||
setupStereoSide(1);
|
||||
glDrawArrays(mode, startVertex, numVertices);
|
||||
|
||||
_stats._DSNumTriangles += 2 * numVertices / 3;
|
||||
_stats._DSNumDrawcalls += 2;
|
||||
|
||||
} else {
|
||||
glDrawArrays(mode, startVertex, numVertices);
|
||||
_stats._DSNumTriangles += numVertices / 3;
|
||||
_stats._DSNumDrawcalls++;
|
||||
}
|
||||
_stats._DSNumAPIDrawcalls++;
|
||||
|
||||
(void) CHECK_GL_ERROR();
|
||||
}
|
||||
|
||||
void GLBackend::do_drawIndexed(Batch& batch, size_t paramOffset) {
|
||||
Primitive primitiveType = (Primitive)batch._params[paramOffset + 2]._uint;
|
||||
GLenum mode = gl::PRIMITIVE_TO_GL[primitiveType];
|
||||
uint32 numIndices = batch._params[paramOffset + 1]._uint;
|
||||
uint32 startIndex = batch._params[paramOffset + 0]._uint;
|
||||
|
||||
GLenum glType = gl::ELEMENT_TYPE_TO_GL[_input._indexBufferType];
|
||||
|
||||
auto typeByteSize = TYPE_SIZE[_input._indexBufferType];
|
||||
GLvoid* indexBufferByteOffset = reinterpret_cast<GLvoid*>(startIndex * typeByteSize + _input._indexBufferOffset);
|
||||
|
||||
if (isStereo()) {
|
||||
setupStereoSide(0);
|
||||
glDrawElements(mode, numIndices, glType, indexBufferByteOffset);
|
||||
setupStereoSide(1);
|
||||
glDrawElements(mode, numIndices, glType, indexBufferByteOffset);
|
||||
|
||||
_stats._DSNumTriangles += 2 * numIndices / 3;
|
||||
_stats._DSNumDrawcalls += 2;
|
||||
} else {
|
||||
glDrawElements(mode, numIndices, glType, indexBufferByteOffset);
|
||||
_stats._DSNumTriangles += numIndices / 3;
|
||||
_stats._DSNumDrawcalls++;
|
||||
}
|
||||
_stats._DSNumAPIDrawcalls++;
|
||||
|
||||
(void) CHECK_GL_ERROR();
|
||||
}
|
||||
|
||||
void GLBackend::do_drawInstanced(Batch& batch, size_t paramOffset) {
|
||||
GLint numInstances = batch._params[paramOffset + 4]._uint;
|
||||
Primitive primitiveType = (Primitive)batch._params[paramOffset + 3]._uint;
|
||||
GLenum mode = gl::PRIMITIVE_TO_GL[primitiveType];
|
||||
uint32 numVertices = batch._params[paramOffset + 2]._uint;
|
||||
uint32 startVertex = batch._params[paramOffset + 1]._uint;
|
||||
|
||||
|
||||
if (isStereo()) {
|
||||
GLint trueNumInstances = 2 * numInstances;
|
||||
|
||||
setupStereoSide(0);
|
||||
glDrawArraysInstancedARB(mode, startVertex, numVertices, numInstances);
|
||||
setupStereoSide(1);
|
||||
glDrawArraysInstancedARB(mode, startVertex, numVertices, numInstances);
|
||||
|
||||
_stats._DSNumTriangles += (trueNumInstances * numVertices) / 3;
|
||||
_stats._DSNumDrawcalls += trueNumInstances;
|
||||
} else {
|
||||
glDrawArraysInstancedARB(mode, startVertex, numVertices, numInstances);
|
||||
_stats._DSNumTriangles += (numInstances * numVertices) / 3;
|
||||
_stats._DSNumDrawcalls += numInstances;
|
||||
}
|
||||
_stats._DSNumAPIDrawcalls++;
|
||||
|
||||
(void) CHECK_GL_ERROR();
|
||||
}
|
||||
|
||||
void glbackend_glDrawElementsInstancedBaseVertexBaseInstance(GLenum mode, GLsizei count, GLenum type, const GLvoid *indices, GLsizei primcount, GLint basevertex, GLuint baseinstance) {
|
||||
#if (GPU_INPUT_PROFILE == GPU_CORE_43)
|
||||
glDrawElementsInstancedBaseVertexBaseInstance(mode, count, type, indices, primcount, basevertex, baseinstance);
|
||||
#else
|
||||
glDrawElementsInstanced(mode, count, type, indices, primcount);
|
||||
#endif
|
||||
}
|
||||
|
||||
void GLBackend::do_drawIndexedInstanced(Batch& batch, size_t paramOffset) {
|
||||
GLint numInstances = batch._params[paramOffset + 4]._uint;
|
||||
GLenum mode = gl::PRIMITIVE_TO_GL[(Primitive)batch._params[paramOffset + 3]._uint];
|
||||
uint32 numIndices = batch._params[paramOffset + 2]._uint;
|
||||
uint32 startIndex = batch._params[paramOffset + 1]._uint;
|
||||
// FIXME glDrawElementsInstancedBaseVertexBaseInstance is only available in GL 4.3
|
||||
// and higher, so currently we ignore this field
|
||||
uint32 startInstance = batch._params[paramOffset + 0]._uint;
|
||||
GLenum glType = gl::ELEMENT_TYPE_TO_GL[_input._indexBufferType];
|
||||
|
||||
auto typeByteSize = TYPE_SIZE[_input._indexBufferType];
|
||||
GLvoid* indexBufferByteOffset = reinterpret_cast<GLvoid*>(startIndex * typeByteSize + _input._indexBufferOffset);
|
||||
|
||||
if (isStereo()) {
|
||||
GLint trueNumInstances = 2 * numInstances;
|
||||
|
||||
setupStereoSide(0);
|
||||
glbackend_glDrawElementsInstancedBaseVertexBaseInstance(mode, numIndices, glType, indexBufferByteOffset, numInstances, 0, startInstance);
|
||||
setupStereoSide(1);
|
||||
glbackend_glDrawElementsInstancedBaseVertexBaseInstance(mode, numIndices, glType, indexBufferByteOffset, numInstances, 0, startInstance);
|
||||
|
||||
_stats._DSNumTriangles += (trueNumInstances * numIndices) / 3;
|
||||
_stats._DSNumDrawcalls += trueNumInstances;
|
||||
} else {
|
||||
glbackend_glDrawElementsInstancedBaseVertexBaseInstance(mode, numIndices, glType, indexBufferByteOffset, numInstances, 0, startInstance);
|
||||
_stats._DSNumTriangles += (numInstances * numIndices) / 3;
|
||||
_stats._DSNumDrawcalls += numInstances;
|
||||
}
|
||||
|
||||
_stats._DSNumAPIDrawcalls++;
|
||||
|
||||
(void)CHECK_GL_ERROR();
|
||||
}
|
||||
|
||||
|
||||
void GLBackend::do_multiDrawIndirect(Batch& batch, size_t paramOffset) {
|
||||
#if (GPU_INPUT_PROFILE == GPU_CORE_43)
|
||||
uint commandCount = batch._params[paramOffset + 0]._uint;
|
||||
GLenum mode = gl::PRIMITIVE_TO_GL[(Primitive)batch._params[paramOffset + 1]._uint];
|
||||
|
||||
glMultiDrawArraysIndirect(mode, reinterpret_cast<GLvoid*>(_input._indirectBufferOffset), commandCount, (GLsizei)_input._indirectBufferStride);
|
||||
_stats._DSNumDrawcalls += commandCount;
|
||||
_stats._DSNumAPIDrawcalls++;
|
||||
|
||||
#else
|
||||
// FIXME implement the slow path
|
||||
#endif
|
||||
(void)CHECK_GL_ERROR();
|
||||
|
||||
}
|
||||
|
||||
void GLBackend::do_multiDrawIndexedIndirect(Batch& batch, size_t paramOffset) {
|
||||
#if (GPU_INPUT_PROFILE == GPU_CORE_43)
|
||||
uint commandCount = batch._params[paramOffset + 0]._uint;
|
||||
GLenum mode = gl::PRIMITIVE_TO_GL[(Primitive)batch._params[paramOffset + 1]._uint];
|
||||
GLenum indexType = gl::ELEMENT_TYPE_TO_GL[_input._indexBufferType];
|
||||
|
||||
glMultiDrawElementsIndirect(mode, indexType, reinterpret_cast<GLvoid*>(_input._indirectBufferOffset), commandCount, (GLsizei)_input._indirectBufferStride);
|
||||
_stats._DSNumDrawcalls += commandCount;
|
||||
_stats._DSNumAPIDrawcalls++;
|
||||
#else
|
||||
// FIXME implement the slow path
|
||||
#endif
|
||||
(void)CHECK_GL_ERROR();
|
||||
}
|
96
libraries/gpu-gl/src/gpu/gl41/GLBackend.h
Normal file
96
libraries/gpu-gl/src/gpu/gl41/GLBackend.h
Normal file
|
@ -0,0 +1,96 @@
|
|||
//
|
||||
// GLBackend.h
|
||||
// libraries/gpu/src/gpu
|
||||
//
|
||||
// Created by Sam Gateau on 10/27/2014.
|
||||
// Copyright 2014 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
#ifndef hifi_gpu_41_GLBackend_h
|
||||
#define hifi_gpu_41_GLBackend_h
|
||||
|
||||
#include <gl/Config.h>
|
||||
|
||||
#include "../gl/GLBackend.h"
|
||||
#include "../gl/GLTexture.h"
|
||||
|
||||
#define GPU_CORE_41 410
|
||||
#define GPU_CORE_43 430
|
||||
|
||||
#ifdef Q_OS_MAC
|
||||
#define GPU_INPUT_PROFILE GPU_CORE_41
|
||||
#else
|
||||
#define GPU_INPUT_PROFILE GPU_CORE_43
|
||||
#endif
|
||||
|
||||
namespace gpu { namespace gl41 {
|
||||
|
||||
class GLBackend : public gl::GLBackend {
|
||||
using Parent = gl::GLBackend;
|
||||
// Context Backend static interface required
|
||||
friend class Context;
|
||||
|
||||
public:
|
||||
explicit GLBackend(bool syncCache) : Parent(syncCache) {}
|
||||
GLBackend() : Parent() {}
|
||||
|
||||
class GLTexture : public gpu::gl::GLTexture {
|
||||
using Parent = gpu::gl::GLTexture;
|
||||
GLuint allocate();
|
||||
public:
|
||||
GLTexture(const Texture& buffer, bool transferrable);
|
||||
GLTexture(const Texture& buffer, GLTexture* original);
|
||||
|
||||
protected:
|
||||
void transferMip(uint16_t mipLevel, uint8_t face = 0) const;
|
||||
void allocateStorage() const override;
|
||||
void updateSize() const override;
|
||||
void transfer() const override;
|
||||
void syncSampler() const override;
|
||||
void generateMips() const override;
|
||||
void withPreservedTexture(std::function<void()> f) const override;
|
||||
};
|
||||
|
||||
|
||||
protected:
|
||||
GLuint getFramebufferID(const FramebufferPointer& framebuffer) override;
|
||||
gl::GLFramebuffer* syncGPUObject(const Framebuffer& framebuffer) override;
|
||||
|
||||
GLuint getBufferID(const Buffer& buffer) override;
|
||||
gl::GLBuffer* syncGPUObject(const Buffer& buffer) override;
|
||||
|
||||
GLuint getTextureID(const TexturePointer& texture, bool needTransfer = true) override;
|
||||
gl::GLTexture* syncGPUObject(const TexturePointer& texture, bool sync = true) override;
|
||||
|
||||
GLuint getQueryID(const QueryPointer& query) override;
|
||||
gl::GLQuery* syncGPUObject(const Query& query) override;
|
||||
|
||||
// Draw Stage
|
||||
void do_draw(Batch& batch, size_t paramOffset) override;
|
||||
void do_drawIndexed(Batch& batch, size_t paramOffset) override;
|
||||
void do_drawInstanced(Batch& batch, size_t paramOffset) override;
|
||||
void do_drawIndexedInstanced(Batch& batch, size_t paramOffset) override;
|
||||
void do_multiDrawIndirect(Batch& batch, size_t paramOffset) override;
|
||||
void do_multiDrawIndexedIndirect(Batch& batch, size_t paramOffset) override;
|
||||
|
||||
// Input Stage
|
||||
void updateInput() override;
|
||||
|
||||
// Synchronize the state cache of this Backend with the actual real state of the GL Context
|
||||
void transferTransformState(const Batch& batch) const override;
|
||||
void initTransform() override;
|
||||
void updateTransform(const Batch& batch);
|
||||
void resetTransformStage();
|
||||
|
||||
// Output stage
|
||||
void do_blit(Batch& batch, size_t paramOffset) override;
|
||||
};
|
||||
|
||||
} }
|
||||
|
||||
Q_DECLARE_LOGGING_CATEGORY(gpugl41logging)
|
||||
|
||||
|
||||
#endif
|
62
libraries/gpu-gl/src/gpu/gl41/GLBackendBuffer.cpp
Normal file
62
libraries/gpu-gl/src/gpu/gl41/GLBackendBuffer.cpp
Normal file
|
@ -0,0 +1,62 @@
|
|||
//
|
||||
// Created by Bradley Austin Davis on 2016/05/15
|
||||
// Copyright 2013-2016 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
#include "GLBackend.h"
|
||||
#include "../gl/GLBuffer.h"
|
||||
|
||||
using namespace gpu;
|
||||
using namespace gpu::gl41;
|
||||
|
||||
class GLBuffer : public gl::GLBuffer {
|
||||
using Parent = gpu::gl::GLBuffer;
|
||||
static GLuint allocate() {
|
||||
GLuint result;
|
||||
glGenBuffers(1, &result);
|
||||
return result;
|
||||
}
|
||||
|
||||
public:
|
||||
GLBuffer(const Buffer& buffer, GLBuffer* original) : Parent(buffer, allocate()) {
|
||||
glBindBuffer(GL_ARRAY_BUFFER, _buffer);
|
||||
glBufferData(GL_ARRAY_BUFFER, _size, nullptr, GL_DYNAMIC_DRAW);
|
||||
glBindBuffer(GL_ARRAY_BUFFER, 0);
|
||||
|
||||
if (original) {
|
||||
glBindBuffer(GL_COPY_WRITE_BUFFER, _buffer);
|
||||
glBindBuffer(GL_COPY_READ_BUFFER, original->_buffer);
|
||||
glCopyBufferSubData(GL_COPY_READ_BUFFER, GL_COPY_WRITE_BUFFER, 0, 0, original->_size);
|
||||
glBindBuffer(GL_COPY_WRITE_BUFFER, 0);
|
||||
glBindBuffer(GL_COPY_READ_BUFFER, 0);
|
||||
(void)CHECK_GL_ERROR();
|
||||
}
|
||||
Backend::setGPUObject(buffer, this);
|
||||
}
|
||||
|
||||
void transfer() override {
|
||||
glBindBuffer(GL_ARRAY_BUFFER, _buffer);
|
||||
(void)CHECK_GL_ERROR();
|
||||
Size offset;
|
||||
Size size;
|
||||
Size currentPage { 0 };
|
||||
auto data = _gpuObject.getSysmem().readData();
|
||||
while (_gpuObject.getNextTransferBlock(offset, size, currentPage)) {
|
||||
glBufferSubData(GL_ARRAY_BUFFER, offset, size, data + offset);
|
||||
(void)CHECK_GL_ERROR();
|
||||
}
|
||||
glBindBuffer(GL_ARRAY_BUFFER, 0);
|
||||
(void)CHECK_GL_ERROR();
|
||||
_gpuObject._flags &= ~Buffer::DIRTY;
|
||||
}
|
||||
};
|
||||
|
||||
GLuint GLBackend::getBufferID(const Buffer& buffer) {
|
||||
return GLBuffer::getId<GLBuffer>(buffer);
|
||||
}
|
||||
|
||||
gl::GLBuffer* GLBackend::syncGPUObject(const Buffer& buffer) {
|
||||
return GLBuffer::sync<GLBuffer>(buffer);
|
||||
}
|
185
libraries/gpu-gl/src/gpu/gl41/GLBackendInput.cpp
Normal file
185
libraries/gpu-gl/src/gpu/gl41/GLBackendInput.cpp
Normal file
|
@ -0,0 +1,185 @@
|
|||
//
|
||||
// GLBackendInput.cpp
|
||||
// libraries/gpu/src/gpu
|
||||
//
|
||||
// Created by Sam Gateau on 3/8/2015.
|
||||
// Copyright 2014 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
#include "GLBackend.h"
|
||||
|
||||
using namespace gpu;
|
||||
using namespace gpu::gl41;
|
||||
|
||||
// Core 41 doesn't expose the features to really separate the vertex format from the vertex buffers binding
|
||||
// Core 43 does :)
|
||||
// FIXME crashing problem with glVertexBindingDivisor / glVertexAttribFormat
|
||||
#if 1 || (GPU_INPUT_PROFILE == GPU_CORE_41)
|
||||
#define NO_SUPPORT_VERTEX_ATTRIB_FORMAT
|
||||
#else
|
||||
#define SUPPORT_VERTEX_ATTRIB_FORMAT
|
||||
#endif
|
||||
|
||||
void GLBackend::updateInput() {
|
||||
#if defined(SUPPORT_VERTEX_ATTRIB_FORMAT)
|
||||
if (_input._invalidFormat) {
|
||||
|
||||
InputStageState::ActivationCache newActivation;
|
||||
|
||||
// Assign the vertex format required
|
||||
if (_input._format) {
|
||||
for (auto& it : _input._format->getAttributes()) {
|
||||
const Stream::Attribute& attrib = (it).second;
|
||||
|
||||
GLuint slot = attrib._slot;
|
||||
GLuint count = attrib._element.getLocationScalarCount();
|
||||
uint8_t locationCount = attrib._element.getLocationCount();
|
||||
GLenum type = _elementTypeToGLType[attrib._element.getType()];
|
||||
GLuint offset = attrib._offset;;
|
||||
GLboolean isNormalized = attrib._element.isNormalized();
|
||||
|
||||
GLenum perLocationSize = attrib._element.getLocationSize();
|
||||
|
||||
for (size_t locNum = 0; locNum < locationCount; ++locNum) {
|
||||
newActivation.set(slot + locNum);
|
||||
glVertexAttribFormat(slot + locNum, count, type, isNormalized, offset + locNum * perLocationSize);
|
||||
glVertexAttribBinding(slot + locNum, attrib._channel);
|
||||
}
|
||||
glVertexBindingDivisor(attrib._channel, attrib._frequency);
|
||||
}
|
||||
(void) CHECK_GL_ERROR();
|
||||
}
|
||||
|
||||
// Manage Activation what was and what is expected now
|
||||
for (size_t i = 0; i < newActivation.size(); i++) {
|
||||
bool newState = newActivation[i];
|
||||
if (newState != _input._attributeActivation[i]) {
|
||||
if (newState) {
|
||||
glEnableVertexAttribArray(i);
|
||||
} else {
|
||||
glDisableVertexAttribArray(i);
|
||||
}
|
||||
_input._attributeActivation.flip(i);
|
||||
}
|
||||
}
|
||||
(void) CHECK_GL_ERROR();
|
||||
|
||||
_input._invalidFormat = false;
|
||||
_stats._ISNumFormatChanges++;
|
||||
}
|
||||
|
||||
if (_input._invalidBuffers.any()) {
|
||||
int numBuffers = _input._buffers.size();
|
||||
auto buffer = _input._buffers.data();
|
||||
auto vbo = _input._bufferVBOs.data();
|
||||
auto offset = _input._bufferOffsets.data();
|
||||
auto stride = _input._bufferStrides.data();
|
||||
|
||||
for (int bufferNum = 0; bufferNum < numBuffers; bufferNum++) {
|
||||
if (_input._invalidBuffers.test(bufferNum)) {
|
||||
glBindVertexBuffer(bufferNum, (*vbo), (*offset), (*stride));
|
||||
}
|
||||
buffer++;
|
||||
vbo++;
|
||||
offset++;
|
||||
stride++;
|
||||
}
|
||||
_input._invalidBuffers.reset();
|
||||
(void) CHECK_GL_ERROR();
|
||||
}
|
||||
#else
|
||||
if (_input._invalidFormat || _input._invalidBuffers.any()) {
|
||||
|
||||
if (_input._invalidFormat) {
|
||||
InputStageState::ActivationCache newActivation;
|
||||
|
||||
_stats._ISNumFormatChanges++;
|
||||
|
||||
// Check expected activation
|
||||
if (_input._format) {
|
||||
for (auto& it : _input._format->getAttributes()) {
|
||||
const Stream::Attribute& attrib = (it).second;
|
||||
uint8_t locationCount = attrib._element.getLocationCount();
|
||||
for (int i = 0; i < locationCount; ++i) {
|
||||
newActivation.set(attrib._slot + i);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Manage Activation what was and what is expected now
|
||||
for (unsigned int i = 0; i < newActivation.size(); i++) {
|
||||
bool newState = newActivation[i];
|
||||
if (newState != _input._attributeActivation[i]) {
|
||||
|
||||
if (newState) {
|
||||
glEnableVertexAttribArray(i);
|
||||
} else {
|
||||
glDisableVertexAttribArray(i);
|
||||
}
|
||||
(void) CHECK_GL_ERROR();
|
||||
|
||||
_input._attributeActivation.flip(i);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// now we need to bind the buffers and assign the attrib pointers
|
||||
if (_input._format) {
|
||||
const Buffers& buffers = _input._buffers;
|
||||
const Offsets& offsets = _input._bufferOffsets;
|
||||
const Offsets& strides = _input._bufferStrides;
|
||||
|
||||
const Stream::Format::AttributeMap& attributes = _input._format->getAttributes();
|
||||
auto& inputChannels = _input._format->getChannels();
|
||||
_stats._ISNumInputBufferChanges++;
|
||||
|
||||
GLuint boundVBO = 0;
|
||||
for (auto& channelIt : inputChannels) {
|
||||
const Stream::Format::ChannelMap::value_type::second_type& channel = (channelIt).second;
|
||||
if ((channelIt).first < buffers.size()) {
|
||||
int bufferNum = (channelIt).first;
|
||||
|
||||
if (_input._invalidBuffers.test(bufferNum) || _input._invalidFormat) {
|
||||
// GLuint vbo = gpu::GLBackend::getBufferID((*buffers[bufferNum]));
|
||||
GLuint vbo = _input._bufferVBOs[bufferNum];
|
||||
if (boundVBO != vbo) {
|
||||
glBindBuffer(GL_ARRAY_BUFFER, vbo);
|
||||
(void) CHECK_GL_ERROR();
|
||||
boundVBO = vbo;
|
||||
}
|
||||
_input._invalidBuffers[bufferNum] = false;
|
||||
|
||||
for (unsigned int i = 0; i < channel._slots.size(); i++) {
|
||||
const Stream::Attribute& attrib = attributes.at(channel._slots[i]);
|
||||
GLuint slot = attrib._slot;
|
||||
GLuint count = attrib._element.getLocationScalarCount();
|
||||
uint8_t locationCount = attrib._element.getLocationCount();
|
||||
GLenum type = gl::ELEMENT_TYPE_TO_GL[attrib._element.getType()];
|
||||
// GLenum perLocationStride = strides[bufferNum];
|
||||
GLenum perLocationStride = attrib._element.getLocationSize();
|
||||
GLuint stride = (GLuint)strides[bufferNum];
|
||||
GLuint pointer = (GLuint)(attrib._offset + offsets[bufferNum]);
|
||||
GLboolean isNormalized = attrib._element.isNormalized();
|
||||
|
||||
for (size_t locNum = 0; locNum < locationCount; ++locNum) {
|
||||
glVertexAttribPointer(slot + (GLuint)locNum, count, type, isNormalized, stride,
|
||||
reinterpret_cast<GLvoid*>(pointer + perLocationStride * (GLuint)locNum));
|
||||
glVertexAttribDivisor(slot + (GLuint)locNum, attrib._frequency);
|
||||
}
|
||||
|
||||
// TODO: Support properly the IAttrib version
|
||||
|
||||
(void) CHECK_GL_ERROR();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// everything format related should be in sync now
|
||||
_input._invalidFormat = false;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
169
libraries/gpu-gl/src/gpu/gl41/GLBackendOutput.cpp
Normal file
169
libraries/gpu-gl/src/gpu/gl41/GLBackendOutput.cpp
Normal file
|
@ -0,0 +1,169 @@
|
|||
//
|
||||
// GLBackendTexture.cpp
|
||||
// libraries/gpu/src/gpu
|
||||
//
|
||||
// Created by Sam Gateau on 1/19/2015.
|
||||
// Copyright 2014 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
#include "GLBackend.h"
|
||||
|
||||
#include <QtGui/QImage>
|
||||
|
||||
#include "../gl/GLFramebuffer.h"
|
||||
#include "../gl/GLTexture.h"
|
||||
|
||||
namespace gpu { namespace gl41 {
|
||||
|
||||
class GLFramebuffer : public gl::GLFramebuffer {
|
||||
using Parent = gl::GLFramebuffer;
|
||||
static GLuint allocate() {
|
||||
GLuint result;
|
||||
glGenFramebuffers(1, &result);
|
||||
return result;
|
||||
}
|
||||
public:
|
||||
void update() override {
|
||||
GLint currentFBO = -1;
|
||||
glGetIntegerv(GL_DRAW_FRAMEBUFFER_BINDING, ¤tFBO);
|
||||
glBindFramebuffer(GL_FRAMEBUFFER, _fbo);
|
||||
gl::GLTexture* gltexture = nullptr;
|
||||
TexturePointer surface;
|
||||
if (_gpuObject.getColorStamps() != _colorStamps) {
|
||||
if (_gpuObject.hasColor()) {
|
||||
_colorBuffers.clear();
|
||||
static const GLenum colorAttachments[] = {
|
||||
GL_COLOR_ATTACHMENT0,
|
||||
GL_COLOR_ATTACHMENT1,
|
||||
GL_COLOR_ATTACHMENT2,
|
||||
GL_COLOR_ATTACHMENT3,
|
||||
GL_COLOR_ATTACHMENT4,
|
||||
GL_COLOR_ATTACHMENT5,
|
||||
GL_COLOR_ATTACHMENT6,
|
||||
GL_COLOR_ATTACHMENT7,
|
||||
GL_COLOR_ATTACHMENT8,
|
||||
GL_COLOR_ATTACHMENT9,
|
||||
GL_COLOR_ATTACHMENT10,
|
||||
GL_COLOR_ATTACHMENT11,
|
||||
GL_COLOR_ATTACHMENT12,
|
||||
GL_COLOR_ATTACHMENT13,
|
||||
GL_COLOR_ATTACHMENT14,
|
||||
GL_COLOR_ATTACHMENT15 };
|
||||
|
||||
int unit = 0;
|
||||
for (auto& b : _gpuObject.getRenderBuffers()) {
|
||||
surface = b._texture;
|
||||
if (surface) {
|
||||
gltexture = gl::GLTexture::sync<GLBackend::GLTexture>(surface, false); // Grab the gltexture and don't transfer
|
||||
} else {
|
||||
gltexture = nullptr;
|
||||
}
|
||||
|
||||
if (gltexture) {
|
||||
glFramebufferTexture2D(GL_FRAMEBUFFER, colorAttachments[unit], GL_TEXTURE_2D, gltexture->_texture, 0);
|
||||
_colorBuffers.push_back(colorAttachments[unit]);
|
||||
} else {
|
||||
glFramebufferTexture2D(GL_FRAMEBUFFER, colorAttachments[unit], GL_TEXTURE_2D, 0, 0);
|
||||
}
|
||||
unit++;
|
||||
}
|
||||
}
|
||||
_colorStamps = _gpuObject.getColorStamps();
|
||||
}
|
||||
|
||||
GLenum attachement = GL_DEPTH_STENCIL_ATTACHMENT;
|
||||
if (!_gpuObject.hasStencil()) {
|
||||
attachement = GL_DEPTH_ATTACHMENT;
|
||||
} else if (!_gpuObject.hasDepth()) {
|
||||
attachement = GL_STENCIL_ATTACHMENT;
|
||||
}
|
||||
|
||||
if (_gpuObject.getDepthStamp() != _depthStamp) {
|
||||
auto surface = _gpuObject.getDepthStencilBuffer();
|
||||
if (_gpuObject.hasDepthStencil() && surface) {
|
||||
gltexture = gl::GLTexture::sync<GLBackend::GLTexture>(surface, false); // Grab the gltexture and don't transfer
|
||||
}
|
||||
|
||||
if (gltexture) {
|
||||
glFramebufferTexture2D(GL_FRAMEBUFFER, attachement, GL_TEXTURE_2D, gltexture->_texture, 0);
|
||||
} else {
|
||||
glFramebufferTexture2D(GL_FRAMEBUFFER, attachement, GL_TEXTURE_2D, 0, 0);
|
||||
}
|
||||
_depthStamp = _gpuObject.getDepthStamp();
|
||||
}
|
||||
|
||||
|
||||
// Last but not least, define where we draw
|
||||
if (!_colorBuffers.empty()) {
|
||||
glDrawBuffers((GLsizei)_colorBuffers.size(), _colorBuffers.data());
|
||||
} else {
|
||||
glDrawBuffer(GL_NONE);
|
||||
}
|
||||
|
||||
// Now check for completness
|
||||
_status = glCheckFramebufferStatus(GL_FRAMEBUFFER);
|
||||
|
||||
// restore the current framebuffer
|
||||
if (currentFBO != -1) {
|
||||
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, currentFBO);
|
||||
}
|
||||
|
||||
checkStatus(GL_DRAW_FRAMEBUFFER);
|
||||
}
|
||||
|
||||
|
||||
public:
|
||||
GLFramebuffer(const gpu::Framebuffer& framebuffer)
|
||||
: Parent(framebuffer, allocate()) { }
|
||||
};
|
||||
|
||||
gl::GLFramebuffer* GLBackend::syncGPUObject(const Framebuffer& framebuffer) {
|
||||
return GLFramebuffer::sync<GLFramebuffer>(framebuffer);
|
||||
}
|
||||
|
||||
GLuint GLBackend::getFramebufferID(const FramebufferPointer& framebuffer) {
|
||||
return framebuffer ? GLFramebuffer::getId<GLFramebuffer>(*framebuffer) : 0;
|
||||
}
|
||||
|
||||
void GLBackend::do_blit(Batch& batch, size_t paramOffset) {
|
||||
auto srcframebuffer = batch._framebuffers.get(batch._params[paramOffset]._uint);
|
||||
Vec4i srcvp;
|
||||
for (auto i = 0; i < 4; ++i) {
|
||||
srcvp[i] = batch._params[paramOffset + 1 + i]._int;
|
||||
}
|
||||
|
||||
auto dstframebuffer = batch._framebuffers.get(batch._params[paramOffset + 5]._uint);
|
||||
Vec4i dstvp;
|
||||
for (auto i = 0; i < 4; ++i) {
|
||||
dstvp[i] = batch._params[paramOffset + 6 + i]._int;
|
||||
}
|
||||
|
||||
// Assign dest framebuffer if not bound already
|
||||
auto newDrawFBO = getFramebufferID(dstframebuffer);
|
||||
if (_output._drawFBO != newDrawFBO) {
|
||||
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, newDrawFBO);
|
||||
}
|
||||
|
||||
// always bind the read fbo
|
||||
glBindFramebuffer(GL_READ_FRAMEBUFFER, getFramebufferID(srcframebuffer));
|
||||
|
||||
// Blit!
|
||||
glBlitFramebuffer(srcvp.x, srcvp.y, srcvp.z, srcvp.w,
|
||||
dstvp.x, dstvp.y, dstvp.z, dstvp.w,
|
||||
GL_COLOR_BUFFER_BIT, GL_LINEAR);
|
||||
|
||||
// Always clean the read fbo to 0
|
||||
glBindFramebuffer(GL_READ_FRAMEBUFFER, 0);
|
||||
|
||||
// Restore draw fbo if changed
|
||||
if (_output._drawFBO != newDrawFBO) {
|
||||
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, _output._drawFBO);
|
||||
}
|
||||
|
||||
(void) CHECK_GL_ERROR();
|
||||
}
|
||||
|
||||
|
||||
} }
|
37
libraries/gpu-gl/src/gpu/gl41/GLBackendQuery.cpp
Normal file
37
libraries/gpu-gl/src/gpu/gl41/GLBackendQuery.cpp
Normal file
|
@ -0,0 +1,37 @@
|
|||
//
|
||||
// GLBackendQuery.cpp
|
||||
// libraries/gpu/src/gpu
|
||||
//
|
||||
// Created by Sam Gateau on 7/7/2015.
|
||||
// Copyright 2015 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
#include "GLBackend.h"
|
||||
|
||||
#include "../gl/GLQuery.h"
|
||||
|
||||
using namespace gpu;
|
||||
using namespace gpu::gl41;
|
||||
|
||||
class GLQuery : public gpu::gl::GLQuery {
|
||||
using Parent = gpu::gl::GLBuffer;
|
||||
public:
|
||||
static GLuint allocateQuery() {
|
||||
GLuint result;
|
||||
glGenQueries(1, &result);
|
||||
return result;
|
||||
}
|
||||
|
||||
GLQuery(const Query& query)
|
||||
: gl::GLQuery(query, allocateQuery()) { }
|
||||
};
|
||||
|
||||
gl::GLQuery* GLBackend::syncGPUObject(const Query& query) {
|
||||
return GLQuery::sync<GLQuery>(query);
|
||||
}
|
||||
|
||||
GLuint GLBackend::getQueryID(const QueryPointer& query) {
|
||||
return GLQuery::getId<GLQuery>(query);
|
||||
}
|
237
libraries/gpu-gl/src/gpu/gl41/GLBackendTexture.cpp
Normal file
237
libraries/gpu-gl/src/gpu/gl41/GLBackendTexture.cpp
Normal file
|
@ -0,0 +1,237 @@
|
|||
//
|
||||
// GLBackendTexture.cpp
|
||||
// libraries/gpu/src/gpu
|
||||
//
|
||||
// Created by Sam Gateau on 1/19/2015.
|
||||
// Copyright 2014 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
#include "GLBackend.h"
|
||||
|
||||
#include <unordered_set>
|
||||
#include <unordered_map>
|
||||
#include <QtCore/QThread>
|
||||
|
||||
#include "../gl/GLTexelFormat.h"
|
||||
|
||||
using namespace gpu;
|
||||
using namespace gpu::gl41;
|
||||
|
||||
using GLTexelFormat = gl::GLTexelFormat;
|
||||
using GLTexture = GLBackend::GLTexture;
|
||||
|
||||
GLuint GLTexture::allocate() {
|
||||
Backend::incrementTextureGPUCount();
|
||||
GLuint result;
|
||||
glGenTextures(1, &result);
|
||||
return result;
|
||||
}
|
||||
|
||||
GLuint GLBackend::getTextureID(const TexturePointer& texture, bool transfer) {
|
||||
return GLTexture::getId<GLTexture>(texture, transfer);
|
||||
}
|
||||
|
||||
gl::GLTexture* GLBackend::syncGPUObject(const TexturePointer& texture, bool transfer) {
|
||||
return GLTexture::sync<GLTexture>(texture, transfer);
|
||||
}
|
||||
|
||||
GLTexture::GLTexture(const Texture& texture, bool transferrable) : gl::GLTexture(texture, allocate(), transferrable) {}
|
||||
|
||||
GLTexture::GLTexture(const Texture& texture, GLTexture* original) : gl::GLTexture(texture, allocate(), original) {}
|
||||
|
||||
void GLBackend::GLTexture::withPreservedTexture(std::function<void()> f) const {
|
||||
GLint boundTex = -1;
|
||||
switch (_target) {
|
||||
case GL_TEXTURE_2D:
|
||||
glGetIntegerv(GL_TEXTURE_BINDING_2D, &boundTex);
|
||||
break;
|
||||
|
||||
case GL_TEXTURE_CUBE_MAP:
|
||||
glGetIntegerv(GL_TEXTURE_BINDING_CUBE_MAP, &boundTex);
|
||||
break;
|
||||
|
||||
default:
|
||||
qFatal("Unsupported texture type");
|
||||
}
|
||||
(void)CHECK_GL_ERROR();
|
||||
|
||||
glBindTexture(_target, _texture);
|
||||
f();
|
||||
glBindTexture(_target, boundTex);
|
||||
(void)CHECK_GL_ERROR();
|
||||
}
|
||||
|
||||
void GLBackend::GLTexture::generateMips() const {
|
||||
withPreservedTexture([&] {
|
||||
glGenerateMipmap(_target);
|
||||
});
|
||||
(void)CHECK_GL_ERROR();
|
||||
}
|
||||
|
||||
void GLBackend::GLTexture::allocateStorage() const {
|
||||
gl::GLTexelFormat texelFormat = gl::GLTexelFormat::evalGLTexelFormat(_gpuObject.getTexelFormat());
|
||||
glTexParameteri(_target, GL_TEXTURE_BASE_LEVEL, 0);
|
||||
(void)CHECK_GL_ERROR();
|
||||
glTexParameteri(_target, GL_TEXTURE_MAX_LEVEL, _maxMip - _minMip);
|
||||
(void)CHECK_GL_ERROR();
|
||||
if (GLEW_VERSION_4_2 && !_gpuObject.getTexelFormat().isCompressed()) {
|
||||
// Get the dimensions, accounting for the downgrade level
|
||||
Vec3u dimensions = _gpuObject.evalMipDimensions(_minMip);
|
||||
glTexStorage2D(_target, usedMipLevels(), texelFormat.internalFormat, dimensions.x, dimensions.y);
|
||||
(void)CHECK_GL_ERROR();
|
||||
} else {
|
||||
for (uint16_t l = _minMip; l <= _maxMip; l++) {
|
||||
// Get the mip level dimensions, accounting for the downgrade level
|
||||
Vec3u dimensions = _gpuObject.evalMipDimensions(l);
|
||||
for (GLenum target : getFaceTargets(_target)) {
|
||||
glTexImage2D(target, l - _minMip, texelFormat.internalFormat, dimensions.x, dimensions.y, 0, texelFormat.format, texelFormat.type, NULL);
|
||||
(void)CHECK_GL_ERROR();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void GLBackend::GLTexture::updateSize() const {
|
||||
setSize(_virtualSize);
|
||||
if (!_id) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (_gpuObject.getTexelFormat().isCompressed()) {
|
||||
GLenum proxyType = GL_TEXTURE_2D;
|
||||
GLuint numFaces = 1;
|
||||
if (_gpuObject.getType() == gpu::Texture::TEX_CUBE) {
|
||||
proxyType = CUBE_FACE_LAYOUT[0];
|
||||
numFaces = (GLuint)CUBE_NUM_FACES;
|
||||
}
|
||||
GLint gpuSize{ 0 };
|
||||
glGetTexLevelParameteriv(proxyType, 0, GL_TEXTURE_COMPRESSED, &gpuSize);
|
||||
(void)CHECK_GL_ERROR();
|
||||
|
||||
if (gpuSize) {
|
||||
for (GLuint level = _minMip; level < _maxMip; level++) {
|
||||
GLint levelSize{ 0 };
|
||||
glGetTexLevelParameteriv(proxyType, level, GL_TEXTURE_COMPRESSED_IMAGE_SIZE, &levelSize);
|
||||
levelSize *= numFaces;
|
||||
|
||||
if (levelSize <= 0) {
|
||||
break;
|
||||
}
|
||||
gpuSize += levelSize;
|
||||
}
|
||||
(void)CHECK_GL_ERROR();
|
||||
setSize(gpuSize);
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Move content bits from the CPU to the GPU for a given mip / face
|
||||
void GLBackend::GLTexture::transferMip(uint16_t mipLevel, uint8_t face) const {
|
||||
auto mip = _gpuObject.accessStoredMipFace(mipLevel, face);
|
||||
gl::GLTexelFormat texelFormat = gl::GLTexelFormat::evalGLTexelFormat(_gpuObject.getTexelFormat(), mip->getFormat());
|
||||
//GLenum target = getFaceTargets()[face];
|
||||
GLenum target = _target == GL_TEXTURE_2D ? GL_TEXTURE_2D : CUBE_FACE_LAYOUT[face];
|
||||
auto size = _gpuObject.evalMipDimensions(mipLevel);
|
||||
glTexSubImage2D(target, mipLevel, 0, 0, size.x, size.y, texelFormat.format, texelFormat.type, mip->readData());
|
||||
(void)CHECK_GL_ERROR();
|
||||
}
|
||||
|
||||
// This should never happen on the main thread
|
||||
// Move content bits from the CPU to the GPU
|
||||
void GLBackend::GLTexture::transfer() const {
|
||||
PROFILE_RANGE(__FUNCTION__);
|
||||
//qDebug() << "Transferring texture: " << _privateTexture;
|
||||
// Need to update the content of the GPU object from the source sysmem of the texture
|
||||
if (_contentStamp >= _gpuObject.getDataStamp()) {
|
||||
return;
|
||||
}
|
||||
|
||||
glBindTexture(_target, _id);
|
||||
(void)CHECK_GL_ERROR();
|
||||
|
||||
if (_downsampleSource._texture) {
|
||||
GLuint fbo { 0 };
|
||||
glGenFramebuffers(1, &fbo);
|
||||
(void)CHECK_GL_ERROR();
|
||||
glBindFramebuffer(GL_READ_FRAMEBUFFER, fbo);
|
||||
(void)CHECK_GL_ERROR();
|
||||
// Find the distance between the old min mip and the new one
|
||||
uint16 mipOffset = _minMip - _downsampleSource._minMip;
|
||||
for (uint16 i = _minMip; i <= _maxMip; ++i) {
|
||||
uint16 targetMip = i - _minMip;
|
||||
uint16 sourceMip = targetMip + mipOffset;
|
||||
Vec3u dimensions = _gpuObject.evalMipDimensions(i);
|
||||
for (GLenum target : getFaceTargets(_target)) {
|
||||
glFramebufferTexture2D(GL_READ_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, target, _downsampleSource._texture, sourceMip);
|
||||
(void)CHECK_GL_ERROR();
|
||||
glCopyTexSubImage2D(target, targetMip, 0, 0, 0, 0, dimensions.x, dimensions.y);
|
||||
(void)CHECK_GL_ERROR();
|
||||
}
|
||||
}
|
||||
glBindFramebuffer(GL_READ_FRAMEBUFFER, 0);
|
||||
glDeleteFramebuffers(1, &fbo);
|
||||
} else {
|
||||
// GO through the process of allocating the correct storage and/or update the content
|
||||
switch (_gpuObject.getType()) {
|
||||
case Texture::TEX_2D:
|
||||
{
|
||||
for (uint16_t i = _minMip; i <= _maxMip; ++i) {
|
||||
if (_gpuObject.isStoredMipFaceAvailable(i)) {
|
||||
transferMip(i);
|
||||
}
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
||||
case Texture::TEX_CUBE:
|
||||
// transfer pixels from each faces
|
||||
for (uint8_t f = 0; f < CUBE_NUM_FACES; f++) {
|
||||
for (uint16_t i = 0; i < Sampler::MAX_MIP_LEVEL; ++i) {
|
||||
if (_gpuObject.isStoredMipFaceAvailable(i, f)) {
|
||||
transferMip(i, f);
|
||||
}
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
qCWarning(gpugl41logging) << __FUNCTION__ << " case for Texture Type " << _gpuObject.getType() << " not supported";
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (_gpuObject.isAutogenerateMips()) {
|
||||
glGenerateMipmap(_target);
|
||||
(void)CHECK_GL_ERROR();
|
||||
}
|
||||
}
|
||||
|
||||
void GLBackend::GLTexture::syncSampler() const {
|
||||
const Sampler& sampler = _gpuObject.getSampler();
|
||||
Texture::Type type = _gpuObject.getType();
|
||||
auto object = this;
|
||||
|
||||
const auto& fm = FILTER_MODES[sampler.getFilter()];
|
||||
glTexParameteri(_target, GL_TEXTURE_MIN_FILTER, fm.minFilter);
|
||||
glTexParameteri(_target, GL_TEXTURE_MAG_FILTER, fm.magFilter);
|
||||
|
||||
if (sampler.doComparison()) {
|
||||
glTexParameteri(_target, GL_TEXTURE_COMPARE_MODE, GL_COMPARE_R_TO_TEXTURE);
|
||||
glTexParameteri(_target, GL_TEXTURE_COMPARE_FUNC, gl::COMPARISON_TO_GL[sampler.getComparisonFunction()]);
|
||||
} else {
|
||||
glTexParameteri(_target, GL_TEXTURE_COMPARE_MODE, GL_NONE);
|
||||
}
|
||||
|
||||
glTexParameteri(_target, GL_TEXTURE_WRAP_S, WRAP_MODES[sampler.getWrapModeU()]);
|
||||
glTexParameteri(_target, GL_TEXTURE_WRAP_T, WRAP_MODES[sampler.getWrapModeV()]);
|
||||
glTexParameteri(_target, GL_TEXTURE_WRAP_R, WRAP_MODES[sampler.getWrapModeW()]);
|
||||
|
||||
glTexParameterfv(_target, GL_TEXTURE_BORDER_COLOR, (const float*)&sampler.getBorderColor());
|
||||
glTexParameteri(_target, GL_TEXTURE_BASE_LEVEL, (uint16)sampler.getMipOffset());
|
||||
glTexParameterf(_target, GL_TEXTURE_MIN_LOD, (float)sampler.getMinMip());
|
||||
glTexParameterf(_target, GL_TEXTURE_MAX_LOD, (sampler.getMaxMip() == Sampler::MAX_MIP_LEVEL ? 1000.f : sampler.getMaxMip()));
|
||||
glTexParameterf(_target, GL_TEXTURE_MAX_ANISOTROPY_EXT, sampler.getMaxAnisotropy());
|
||||
}
|
||||
|
83
libraries/gpu-gl/src/gpu/gl41/GLBackendTransform.cpp
Normal file
83
libraries/gpu-gl/src/gpu/gl41/GLBackendTransform.cpp
Normal file
|
@ -0,0 +1,83 @@
|
|||
//
|
||||
// GLBackendTransform.cpp
|
||||
// libraries/gpu/src/gpu
|
||||
//
|
||||
// Created by Sam Gateau on 3/8/2015.
|
||||
// Copyright 2014 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
#include "GLBackend.h"
|
||||
|
||||
using namespace gpu;
|
||||
using namespace gpu::gl41;
|
||||
|
||||
void GLBackend::initTransform() {
|
||||
glGenBuffers(1, &_transform._objectBuffer);
|
||||
glGenBuffers(1, &_transform._cameraBuffer);
|
||||
glGenBuffers(1, &_transform._drawCallInfoBuffer);
|
||||
glGenTextures(1, &_transform._objectBufferTexture);
|
||||
size_t cameraSize = sizeof(TransformStageState::CameraBufferElement);
|
||||
while (_transform._cameraUboSize < cameraSize) {
|
||||
_transform._cameraUboSize += _uboAlignment;
|
||||
}
|
||||
}
|
||||
|
||||
void GLBackend::transferTransformState(const Batch& batch) const {
|
||||
// FIXME not thread safe
|
||||
static std::vector<uint8_t> bufferData;
|
||||
if (!_transform._cameras.empty()) {
|
||||
bufferData.resize(_transform._cameraUboSize * _transform._cameras.size());
|
||||
for (size_t i = 0; i < _transform._cameras.size(); ++i) {
|
||||
memcpy(bufferData.data() + (_transform._cameraUboSize * i), &_transform._cameras[i], sizeof(TransformStageState::CameraBufferElement));
|
||||
}
|
||||
glBindBuffer(GL_UNIFORM_BUFFER, _transform._cameraBuffer);
|
||||
glBufferData(GL_UNIFORM_BUFFER, bufferData.size(), bufferData.data(), GL_DYNAMIC_DRAW);
|
||||
glBindBuffer(GL_UNIFORM_BUFFER, 0);
|
||||
}
|
||||
|
||||
if (!batch._objects.empty()) {
|
||||
auto byteSize = batch._objects.size() * sizeof(Batch::TransformObject);
|
||||
bufferData.resize(byteSize);
|
||||
memcpy(bufferData.data(), batch._objects.data(), byteSize);
|
||||
|
||||
#ifdef GPU_SSBO_DRAW_CALL_INFO
|
||||
glBindBuffer(GL_SHADER_STORAGE_BUFFER, _transform._objectBuffer);
|
||||
glBufferData(GL_SHADER_STORAGE_BUFFER, bufferData.size(), bufferData.data(), GL_DYNAMIC_DRAW);
|
||||
glBindBuffer(GL_SHADER_STORAGE_BUFFER, 0);
|
||||
#else
|
||||
glBindBuffer(GL_TEXTURE_BUFFER, _transform._objectBuffer);
|
||||
glBufferData(GL_TEXTURE_BUFFER, bufferData.size(), bufferData.data(), GL_DYNAMIC_DRAW);
|
||||
glBindBuffer(GL_TEXTURE_BUFFER, 0);
|
||||
#endif
|
||||
}
|
||||
|
||||
if (!batch._namedData.empty()) {
|
||||
bufferData.clear();
|
||||
for (auto& data : batch._namedData) {
|
||||
auto currentSize = bufferData.size();
|
||||
auto bytesToCopy = data.second.drawCallInfos.size() * sizeof(Batch::DrawCallInfo);
|
||||
bufferData.resize(currentSize + bytesToCopy);
|
||||
memcpy(bufferData.data() + currentSize, data.second.drawCallInfos.data(), bytesToCopy);
|
||||
_transform._drawCallInfoOffsets[data.first] = (GLvoid*)currentSize;
|
||||
}
|
||||
|
||||
glBindBuffer(GL_ARRAY_BUFFER, _transform._drawCallInfoBuffer);
|
||||
glBufferData(GL_ARRAY_BUFFER, bufferData.size(), bufferData.data(), GL_DYNAMIC_DRAW);
|
||||
glBindBuffer(GL_ARRAY_BUFFER, 0);
|
||||
}
|
||||
|
||||
#ifdef GPU_SSBO_DRAW_CALL_INFO
|
||||
glBindBufferBase(GL_SHADER_STORAGE_BUFFER, TRANSFORM_OBJECT_SLOT, _transform._objectBuffer);
|
||||
#else
|
||||
glActiveTexture(GL_TEXTURE0 + TRANSFORM_OBJECT_SLOT);
|
||||
glBindTexture(GL_TEXTURE_BUFFER, _transform._objectBufferTexture);
|
||||
glTexBuffer(GL_TEXTURE_BUFFER, GL_RGBA32F, _transform._objectBuffer);
|
||||
#endif
|
||||
|
||||
CHECK_GL_ERROR();
|
||||
|
||||
// Make sure the current Camera offset is unknown before render Draw
|
||||
_transform._currentCameraOffset = INVALID_OFFSET;
|
||||
}
|
|
@ -2,3 +2,5 @@ set(TARGET_NAME gpu)
|
|||
AUTOSCRIBE_SHADER_LIB(gpu)
|
||||
setup_hifi_library()
|
||||
link_hifi_libraries(shared)
|
||||
|
||||
target_nsight()
|
||||
|
|
|
@ -10,9 +10,10 @@
|
|||
//
|
||||
#include "Batch.h"
|
||||
|
||||
#include <QDebug>
|
||||
#include <string.h>
|
||||
|
||||
#include <QDebug>
|
||||
|
||||
#if defined(NSIGHT_FOUND)
|
||||
#include "nvToolsExt.h"
|
||||
|
||||
|
@ -481,3 +482,106 @@ void Batch::popProfileRange() {
|
|||
ADD_COMMAND(popProfileRange);
|
||||
#endif
|
||||
}
|
||||
|
||||
#define GL_TEXTURE0 0x84C0
|
||||
|
||||
void Batch::_glActiveBindTexture(uint32 unit, uint32 target, uint32 texture) {
|
||||
// clean the cache on the texture unit we are going to use so the next call to setResourceTexture() at the same slot works fine
|
||||
setResourceTexture(unit - GL_TEXTURE0, nullptr);
|
||||
|
||||
ADD_COMMAND(glActiveBindTexture);
|
||||
_params.push_back(texture);
|
||||
_params.push_back(target);
|
||||
_params.push_back(unit);
|
||||
}
|
||||
|
||||
void Batch::_glUniform1i(int32 location, int32 v0) {
|
||||
if (location < 0) {
|
||||
return;
|
||||
}
|
||||
ADD_COMMAND(glUniform1i);
|
||||
_params.push_back(v0);
|
||||
_params.push_back(location);
|
||||
}
|
||||
|
||||
void Batch::_glUniform1f(int32 location, float v0) {
|
||||
if (location < 0) {
|
||||
return;
|
||||
}
|
||||
ADD_COMMAND(glUniform1f);
|
||||
_params.push_back(v0);
|
||||
_params.push_back(location);
|
||||
}
|
||||
|
||||
void Batch::_glUniform2f(int32 location, float v0, float v1) {
|
||||
ADD_COMMAND(glUniform2f);
|
||||
|
||||
_params.push_back(v1);
|
||||
_params.push_back(v0);
|
||||
_params.push_back(location);
|
||||
}
|
||||
|
||||
void Batch::_glUniform3f(int32 location, float v0, float v1, float v2) {
|
||||
ADD_COMMAND(glUniform3f);
|
||||
|
||||
_params.push_back(v2);
|
||||
_params.push_back(v1);
|
||||
_params.push_back(v0);
|
||||
_params.push_back(location);
|
||||
}
|
||||
|
||||
void Batch::_glUniform4f(int32 location, float v0, float v1, float v2, float v3) {
|
||||
ADD_COMMAND(glUniform4f);
|
||||
|
||||
_params.push_back(v3);
|
||||
_params.push_back(v2);
|
||||
_params.push_back(v1);
|
||||
_params.push_back(v0);
|
||||
_params.push_back(location);
|
||||
}
|
||||
|
||||
void Batch::_glUniform3fv(int32 location, int count, const float* value) {
|
||||
ADD_COMMAND(glUniform3fv);
|
||||
|
||||
const int VEC3_SIZE = 3 * sizeof(float);
|
||||
_params.push_back(cacheData(count * VEC3_SIZE, value));
|
||||
_params.push_back(count);
|
||||
_params.push_back(location);
|
||||
}
|
||||
|
||||
void Batch::_glUniform4fv(int32 location, int count, const float* value) {
|
||||
ADD_COMMAND(glUniform4fv);
|
||||
|
||||
const int VEC4_SIZE = 4 * sizeof(float);
|
||||
_params.push_back(cacheData(count * VEC4_SIZE, value));
|
||||
_params.push_back(count);
|
||||
_params.push_back(location);
|
||||
}
|
||||
|
||||
void Batch::_glUniform4iv(int32 location, int count, const int32* value) {
|
||||
ADD_COMMAND(glUniform4iv);
|
||||
|
||||
const int VEC4_SIZE = 4 * sizeof(int);
|
||||
_params.push_back(cacheData(count * VEC4_SIZE, value));
|
||||
_params.push_back(count);
|
||||
_params.push_back(location);
|
||||
}
|
||||
|
||||
void Batch::_glUniformMatrix4fv(int32 location, int count, uint8 transpose, const float* value) {
|
||||
ADD_COMMAND(glUniformMatrix4fv);
|
||||
|
||||
const int MATRIX4_SIZE = 16 * sizeof(float);
|
||||
_params.push_back(cacheData(count * MATRIX4_SIZE, value));
|
||||
_params.push_back(transpose);
|
||||
_params.push_back(count);
|
||||
_params.push_back(location);
|
||||
}
|
||||
|
||||
void Batch::_glColor4f(float red, float green, float blue, float alpha) {
|
||||
ADD_COMMAND(glColor4f);
|
||||
|
||||
_params.push_back(alpha);
|
||||
_params.push_back(blue);
|
||||
_params.push_back(green);
|
||||
_params.push_back(red);
|
||||
}
|
|
@ -84,6 +84,13 @@ namespace gpu {
|
|||
|
||||
namespace gl {
|
||||
class GLBuffer;
|
||||
}
|
||||
|
||||
namespace gl41 {
|
||||
class GLBackend;
|
||||
}
|
||||
|
||||
namespace gl45 {
|
||||
class GLBackend;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -184,8 +184,34 @@ public:
|
|||
return append(sizeof(T) * t.size(), reinterpret_cast<const Byte*>(&t[0]));
|
||||
}
|
||||
|
||||
bool getNextTransferBlock(Size& outOffset, Size& outSize, Size& currentPage) const {
|
||||
Size pageCount = _pages.size();
|
||||
// Advance to the first dirty page
|
||||
while (currentPage < pageCount && (0 == (Buffer::DIRTY & _pages[currentPage]))) {
|
||||
++currentPage;
|
||||
}
|
||||
|
||||
// If we got to the end, we're done
|
||||
if (currentPage >= pageCount) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Advance to the next clean page
|
||||
outOffset = static_cast<Size>(currentPage * _pageSize);
|
||||
while (currentPage < pageCount && (0 != (Buffer::DIRTY & _pages[currentPage]))) {
|
||||
_pages[currentPage] &= ~Buffer::DIRTY;
|
||||
++currentPage;
|
||||
}
|
||||
outSize = static_cast<Size>((currentPage * _pageSize) - outOffset);
|
||||
return true;
|
||||
}
|
||||
|
||||
const GPUObjectPointer gpuObject {};
|
||||
|
||||
// Access the sysmem object, limited to ourselves and GPUObject derived classes
|
||||
const Sysmem& getSysmem() const { return _sysmem; }
|
||||
// FIXME find a better access mechanism for clearing this
|
||||
mutable uint8_t _flags;
|
||||
protected:
|
||||
void markDirty(Size offset, Size bytes);
|
||||
|
||||
|
@ -194,21 +220,18 @@ protected:
|
|||
markDirty(sizeof(T) * index, sizeof(T) * count);
|
||||
}
|
||||
|
||||
// Access the sysmem object, limited to ourselves and GPUObject derived classes
|
||||
const Sysmem& getSysmem() const { return _sysmem; }
|
||||
Sysmem& editSysmem() { return _sysmem; }
|
||||
Byte* editData() { return editSysmem().editData(); }
|
||||
|
||||
Size getRequiredPageCount() const;
|
||||
|
||||
Size _end { 0 };
|
||||
mutable uint8_t _flags;
|
||||
mutable PageFlags _pages;
|
||||
const Size _pageSize;
|
||||
Sysmem _sysmem;
|
||||
|
||||
// FIXME find a more generic way to do this.
|
||||
friend class gl::GLBackend;
|
||||
friend class gl::GLBuffer;
|
||||
friend class BufferView;
|
||||
};
|
||||
|
||||
|
|
Loading…
Reference in a new issue