mirror of
https://github.com/overte-org/overte.git
synced 2025-08-08 14:58:03 +02:00
Fix stencil buffer background issue
This commit is contained in:
parent
51a0131414
commit
c685cc0e6f
14 changed files with 219 additions and 269 deletions
|
@ -174,7 +174,7 @@ void GLBackend::renderPassTransfer(const Batch& batch) {
|
||||||
|
|
||||||
_inRenderTransferPass = true;
|
_inRenderTransferPass = true;
|
||||||
{ // Sync all the buffers
|
{ // Sync all the buffers
|
||||||
ANDROID_PROFILE(render, "syncGPUBuffer", 0xffaaffaa, 1)
|
PROFILE_RANGE(render_gpu_gl_detail, "syncGPUBuffer");
|
||||||
|
|
||||||
for (auto& cached : batch._buffers._items) {
|
for (auto& cached : batch._buffers._items) {
|
||||||
if (cached._data) {
|
if (cached._data) {
|
||||||
|
@ -184,7 +184,7 @@ void GLBackend::renderPassTransfer(const Batch& batch) {
|
||||||
}
|
}
|
||||||
|
|
||||||
{ // Sync all the transform states
|
{ // Sync all the transform states
|
||||||
ANDROID_PROFILE(render, "syncCPUTransform", 0xffaaaaff, 1)
|
PROFILE_RANGE(render_gpu_gl_detail, "syncCPUTransform");
|
||||||
_transform._cameras.clear();
|
_transform._cameras.clear();
|
||||||
_transform._cameraOffsets.clear();
|
_transform._cameraOffsets.clear();
|
||||||
|
|
||||||
|
@ -210,7 +210,6 @@ void GLBackend::renderPassTransfer(const Batch& batch) {
|
||||||
case Batch::COMMAND_setViewportTransform:
|
case Batch::COMMAND_setViewportTransform:
|
||||||
case Batch::COMMAND_setViewTransform:
|
case Batch::COMMAND_setViewTransform:
|
||||||
case Batch::COMMAND_setProjectionTransform: {
|
case Batch::COMMAND_setProjectionTransform: {
|
||||||
ANDROID_PROFILE_COMMAND(render, (int)(*command), 0xffeeaaff, 1)
|
|
||||||
CommandCall call = _commandCalls[(*command)];
|
CommandCall call = _commandCalls[(*command)];
|
||||||
(this->*(call))(batch, *offset);
|
(this->*(call))(batch, *offset);
|
||||||
break;
|
break;
|
||||||
|
@ -225,8 +224,7 @@ void GLBackend::renderPassTransfer(const Batch& batch) {
|
||||||
}
|
}
|
||||||
|
|
||||||
{ // Sync the transform buffers
|
{ // Sync the transform buffers
|
||||||
//PROFILE_RANGE(render_gpu_gl, "transferTransformState");
|
PROFILE_RANGE(render_gpu_gl_detail, "syncGPUTransform");
|
||||||
ANDROID_PROFILE(render, "transferTransformState", 0xff0000ff, 1)
|
|
||||||
transferTransformState(batch);
|
transferTransformState(batch);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -261,15 +259,12 @@ void GLBackend::renderPassDraw(const Batch& batch) {
|
||||||
updateInput();
|
updateInput();
|
||||||
updateTransform(batch);
|
updateTransform(batch);
|
||||||
updatePipeline();
|
updatePipeline();
|
||||||
{
|
|
||||||
ANDROID_PROFILE_COMMAND(render, (int)(*command), 0xff0000ff, 1)
|
|
||||||
CommandCall call = _commandCalls[(*command)];
|
CommandCall call = _commandCalls[(*command)];
|
||||||
(this->*(call))(batch, *offset);
|
(this->*(call))(batch, *offset);
|
||||||
}
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
default: {
|
default: {
|
||||||
ANDROID_PROFILE_COMMAND(render, (int)(*command), 0xffff00ff, 1)
|
|
||||||
CommandCall call = _commandCalls[(*command)];
|
CommandCall call = _commandCalls[(*command)];
|
||||||
(this->*(call))(batch, *offset);
|
(this->*(call))(batch, *offset);
|
||||||
break;
|
break;
|
||||||
|
@ -282,7 +277,6 @@ void GLBackend::renderPassDraw(const Batch& batch) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void GLBackend::render(const Batch& batch) {
|
void GLBackend::render(const Batch& batch) {
|
||||||
ANDROID_PROFILE(render, "GLBackendRender", 0xffff00ff, 1)
|
|
||||||
_transform._skybox = _stereo._skybox = batch.isSkyboxEnabled();
|
_transform._skybox = _stereo._skybox = batch.isSkyboxEnabled();
|
||||||
// Allow the batch to override the rendering stereo settings
|
// Allow the batch to override the rendering stereo settings
|
||||||
// for things like full framebuffer copy operations (deferred lighting passes)
|
// for things like full framebuffer copy operations (deferred lighting passes)
|
||||||
|
@ -292,21 +286,24 @@ void GLBackend::render(const Batch& batch) {
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
//PROFILE_RANGE(render_gpu_gl, "Transfer");
|
PROFILE_RANGE(render_gpu_gl_detail, "Transfer");
|
||||||
ANDROID_PROFILE(render, "Transfer", 0xff0000ff, 1)
|
|
||||||
renderPassTransfer(batch);
|
renderPassTransfer(batch);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef GPU_STEREO_DRAWCALL_INSTANCED
|
#ifdef GPU_STEREO_DRAWCALL_INSTANCED
|
||||||
if (_stereo.isStereo()) {
|
if (_stereo.isStereo()) {
|
||||||
glEnable(GL_CLIP_DISTANCE0);
|
glEnable(GL_CLIP_DISTANCE0_EXT);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
{
|
{
|
||||||
//PROFILE_RANGE(render_gpu_gl, _stereo._enable ? "Render Stereo" : "Render");
|
PROFILE_RANGE(render_gpu_gl_detail, _stereo.isStereo() ? "Render Stereo" : "Render");
|
||||||
ANDROID_PROFILE(render, "RenderPassDraw", 0xff00ddff, 1)
|
|
||||||
renderPassDraw(batch);
|
renderPassDraw(batch);
|
||||||
}
|
}
|
||||||
|
#ifdef GPU_STEREO_DRAWCALL_INSTANCED
|
||||||
|
if (_stereo.isStereo()) {
|
||||||
|
glDisable(GL_CLIP_DISTANCE0_EXT);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
// Restore the saved stereo state for the next batch
|
// Restore the saved stereo state for the next batch
|
||||||
_stereo._enable = savedStereo;
|
_stereo._enable = savedStereo;
|
||||||
|
@ -314,15 +311,15 @@ void GLBackend::render(const Batch& batch) {
|
||||||
|
|
||||||
|
|
||||||
void GLBackend::syncCache() {
|
void GLBackend::syncCache() {
|
||||||
|
PROFILE_RANGE(render_gpu_gl_detail, __FUNCTION__);
|
||||||
|
|
||||||
syncTransformStateCache();
|
syncTransformStateCache();
|
||||||
syncPipelineStateCache();
|
syncPipelineStateCache();
|
||||||
syncInputStateCache();
|
syncInputStateCache();
|
||||||
syncOutputStateCache();
|
syncOutputStateCache();
|
||||||
|
|
||||||
//glEnable(GL_LINE_SMOOTH);
|
|
||||||
qDebug() << "TODO: GLBackend.cpp:syncCache GL_LINE_SMOOTH";
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef GPU_STEREO_DRAWCALL_DOUBLED
|
||||||
void GLBackend::setupStereoSide(int side) {
|
void GLBackend::setupStereoSide(int side) {
|
||||||
ivec4 vp = _transform._viewport;
|
ivec4 vp = _transform._viewport;
|
||||||
vp.z /= 2;
|
vp.z /= 2;
|
||||||
|
@ -330,14 +327,14 @@ void GLBackend::setupStereoSide(int side) {
|
||||||
|
|
||||||
#ifdef GPU_STEREO_CAMERA_BUFFER
|
#ifdef GPU_STEREO_CAMERA_BUFFER
|
||||||
#ifdef GPU_STEREO_DRAWCALL_DOUBLED
|
#ifdef GPU_STEREO_DRAWCALL_DOUBLED
|
||||||
//glVertexAttribI1i(14, side);
|
glVertexAttribI1i(14, side);
|
||||||
glVertexAttribI4i(14, side, 0, 0, 0);
|
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
#else
|
#else
|
||||||
_transform.bindCurrentCamera(side);
|
_transform.bindCurrentCamera(side);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
#else
|
||||||
|
#endif
|
||||||
|
|
||||||
void GLBackend::do_resetStages(const Batch& batch, size_t paramOffset) {
|
void GLBackend::do_resetStages(const Batch& batch, size_t paramOffset) {
|
||||||
resetStages();
|
resetStages();
|
||||||
|
@ -387,27 +384,34 @@ void GLBackend::resetStages() {
|
||||||
|
|
||||||
|
|
||||||
void GLBackend::do_pushProfileRange(const Batch& batch, size_t paramOffset) {
|
void GLBackend::do_pushProfileRange(const Batch& batch, size_t paramOffset) {
|
||||||
|
if (trace_render_gpu_gl_detail().isDebugEnabled()) {
|
||||||
auto name = batch._profileRanges.get(batch._params[paramOffset]._uint);
|
auto name = batch._profileRanges.get(batch._params[paramOffset]._uint);
|
||||||
profileRanges.push_back(name);
|
profileRanges.push_back(name);
|
||||||
#if defined(NSIGHT_FOUND)
|
#if defined(NSIGHT_FOUND)
|
||||||
nvtxRangePush(name.c_str());
|
nvtxRangePush(name.c_str());
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void GLBackend::do_popProfileRange(const Batch& batch, size_t paramOffset) {
|
void GLBackend::do_popProfileRange(const Batch& batch, size_t paramOffset) {
|
||||||
|
if (trace_render_gpu_gl_detail().isDebugEnabled()) {
|
||||||
profileRanges.pop_back();
|
profileRanges.pop_back();
|
||||||
#if defined(NSIGHT_FOUND)
|
#if defined(NSIGHT_FOUND)
|
||||||
nvtxRangePop();
|
nvtxRangePop();
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// TODO: As long as we have gl calls explicitely issued from interface
|
// TODO: As long as we have gl calls explicitely issued from interface
|
||||||
// code, we need to be able to record and batch these calls. THe long
|
// code, we need to be able to record and batch these calls. THe long
|
||||||
// term strategy is to get rid of any GL calls in favor of the HIFI GPU API
|
// term strategy is to get rid of any GL calls in favor of the HIFI GPU API
|
||||||
|
|
||||||
// As long as we don;t use several versions of shaders we can avoid this more complex code path
|
// As long as we don;t use several versions of shaders we can avoid this more complex code path
|
||||||
// #define GET_UNIFORM_LOCATION(shaderUniformLoc) _pipeline._programShader->getUniformLocation(shaderUniformLoc, isStereo());
|
#ifdef GPU_STEREO_CAMERA_BUFFER
|
||||||
|
#define GET_UNIFORM_LOCATION(shaderUniformLoc) ((_pipeline._programShader) ? _pipeline._programShader->getUniformLocation(shaderUniformLoc, (GLShader::Version) isStereo()) : -1)
|
||||||
|
#else
|
||||||
#define GET_UNIFORM_LOCATION(shaderUniformLoc) shaderUniformLoc
|
#define GET_UNIFORM_LOCATION(shaderUniformLoc) shaderUniformLoc
|
||||||
|
#endif
|
||||||
|
|
||||||
void GLBackend::do_glUniform1i(const Batch& batch, size_t paramOffset) {
|
void GLBackend::do_glUniform1i(const Batch& batch, size_t paramOffset) {
|
||||||
if (_pipeline._program == 0) {
|
if (_pipeline._program == 0) {
|
||||||
|
@ -571,6 +575,10 @@ void GLBackend::do_glColor4f(const Batch& batch, size_t paramOffset) {
|
||||||
if (_input._colorAttribute != newColor) {
|
if (_input._colorAttribute != newColor) {
|
||||||
_input._colorAttribute = newColor;
|
_input._colorAttribute = newColor;
|
||||||
glVertexAttrib4fv(gpu::Stream::COLOR, &_input._colorAttribute.r);
|
glVertexAttrib4fv(gpu::Stream::COLOR, &_input._colorAttribute.r);
|
||||||
|
// Color has been changed and is not white. To prevent colors from bleeding
|
||||||
|
// between different objects, we need to set the _hadColorAttribute flag
|
||||||
|
// as if a previous render call had potential colors
|
||||||
|
_input._hadColorAttribute = (newColor != glm::vec4(1.0f, 1.0f, 1.0f, 1.0f));
|
||||||
}
|
}
|
||||||
(void)CHECK_GL_ERROR();
|
(void)CHECK_GL_ERROR();
|
||||||
}
|
}
|
||||||
|
@ -616,7 +624,7 @@ void GLBackend::queueLambda(const std::function<void()> lambda) const {
|
||||||
}
|
}
|
||||||
|
|
||||||
void GLBackend::recycle() const {
|
void GLBackend::recycle() const {
|
||||||
CHECK_GL_ERROR();
|
PROFILE_RANGE(render_gpu_gl, __FUNCTION__)
|
||||||
{
|
{
|
||||||
std::list<std::function<void()>> lamdbasTrash;
|
std::list<std::function<void()>> lamdbasTrash;
|
||||||
{
|
{
|
||||||
|
@ -625,7 +633,6 @@ void GLBackend::recycle() const {
|
||||||
}
|
}
|
||||||
for (auto lambda : lamdbasTrash) {
|
for (auto lambda : lamdbasTrash) {
|
||||||
lambda();
|
lambda();
|
||||||
CHECK_GL_ERROR();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -642,7 +649,6 @@ void GLBackend::recycle() const {
|
||||||
}
|
}
|
||||||
if (!ids.empty()) {
|
if (!ids.empty()) {
|
||||||
glDeleteBuffers((GLsizei)ids.size(), ids.data());
|
glDeleteBuffers((GLsizei)ids.size(), ids.data());
|
||||||
CHECK_GL_ERROR();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -659,7 +665,6 @@ void GLBackend::recycle() const {
|
||||||
}
|
}
|
||||||
if (!ids.empty()) {
|
if (!ids.empty()) {
|
||||||
glDeleteFramebuffers((GLsizei)ids.size(), ids.data());
|
glDeleteFramebuffers((GLsizei)ids.size(), ids.data());
|
||||||
CHECK_GL_ERROR();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -676,7 +681,6 @@ void GLBackend::recycle() const {
|
||||||
}
|
}
|
||||||
if (!ids.empty()) {
|
if (!ids.empty()) {
|
||||||
glDeleteTextures((GLsizei)ids.size(), ids.data());
|
glDeleteTextures((GLsizei)ids.size(), ids.data());
|
||||||
CHECK_GL_ERROR();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -691,7 +695,6 @@ void GLBackend::recycle() const {
|
||||||
fences.resize(externalTexturesTrash.size());
|
fences.resize(externalTexturesTrash.size());
|
||||||
for (size_t i = 0; i < externalTexturesTrash.size(); ++i) {
|
for (size_t i = 0; i < externalTexturesTrash.size(); ++i) {
|
||||||
fences[i] = glFenceSync(GL_SYNC_GPU_COMMANDS_COMPLETE, 0);
|
fences[i] = glFenceSync(GL_SYNC_GPU_COMMANDS_COMPLETE, 0);
|
||||||
CHECK_GL_ERROR();
|
|
||||||
}
|
}
|
||||||
// External texture fences will be read in another thread/context, so we need a flush
|
// External texture fences will be read in another thread/context, so we need a flush
|
||||||
glFlush();
|
glFlush();
|
||||||
|
@ -722,7 +725,6 @@ void GLBackend::recycle() const {
|
||||||
}
|
}
|
||||||
for (auto id : shadersTrash) {
|
for (auto id : shadersTrash) {
|
||||||
glDeleteShader(id);
|
glDeleteShader(id);
|
||||||
CHECK_GL_ERROR();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -739,13 +741,12 @@ void GLBackend::recycle() const {
|
||||||
}
|
}
|
||||||
if (!ids.empty()) {
|
if (!ids.empty()) {
|
||||||
glDeleteQueries((GLsizei)ids.size(), ids.data());
|
glDeleteQueries((GLsizei)ids.size(), ids.data());
|
||||||
CHECK_GL_ERROR();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
GLVariableAllocationSupport::manageMemory();
|
GLVariableAllocationSupport::manageMemory();
|
||||||
GLVariableAllocationSupport::_frameTexturesCreated = 0;
|
GLVariableAllocationSupport::_frameTexturesCreated = 0;
|
||||||
|
Texture::KtxStorage::releaseOpenKtxFiles();
|
||||||
}
|
}
|
||||||
|
|
||||||
void GLBackend::setCameraCorrection(const Mat4& correction) {
|
void GLBackend::setCameraCorrection(const Mat4& correction) {
|
||||||
|
|
|
@ -29,9 +29,16 @@
|
||||||
|
|
||||||
// Different versions for the stereo drawcall
|
// Different versions for the stereo drawcall
|
||||||
// Current preferred is "instanced" which draw the shape twice but instanced and rely on clipping plane to draw left/right side only
|
// Current preferred is "instanced" which draw the shape twice but instanced and rely on clipping plane to draw left/right side only
|
||||||
|
#define GPU_STEREO_TECHNIQUE_DOUBLED_SIMPLE
|
||||||
//#define GPU_STEREO_TECHNIQUE_DOUBLED_SMARTER
|
//#define GPU_STEREO_TECHNIQUE_DOUBLED_SMARTER
|
||||||
//#define GPU_STEREO_TECHNIQUE_INSTANCED
|
//#define GPU_STEREO_TECHNIQUE_INSTANCED
|
||||||
|
|
||||||
|
|
||||||
|
// Let these be configured by the one define picked above
|
||||||
|
#ifdef GPU_STEREO_TECHNIQUE_DOUBLED_SIMPLE
|
||||||
|
#define GPU_STEREO_DRAWCALL_DOUBLED
|
||||||
|
#endif
|
||||||
|
|
||||||
#ifdef GPU_STEREO_TECHNIQUE_DOUBLED_SMARTER
|
#ifdef GPU_STEREO_TECHNIQUE_DOUBLED_SMARTER
|
||||||
#define GPU_STEREO_DRAWCALL_DOUBLED
|
#define GPU_STEREO_DRAWCALL_DOUBLED
|
||||||
#define GPU_STEREO_CAMERA_BUFFER
|
#define GPU_STEREO_CAMERA_BUFFER
|
||||||
|
@ -42,15 +49,6 @@
|
||||||
#define GPU_STEREO_CAMERA_BUFFER
|
#define GPU_STEREO_CAMERA_BUFFER
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
//#define ANDROID_INTENSIVE_INSTRUMENTATION 1
|
|
||||||
|
|
||||||
#ifdef ANDROID_INTENSIVE_INSTRUMENTATION
|
|
||||||
#define ANDROID_PROFILE_COMMAND(category, commandIndex, argbColor, payload, ...) PROFILE_RANGE_EX(category, commandNames[commandIndex], argbColor, payload, ##__VA_ARGS__);
|
|
||||||
#define ANDROID_PROFILE(category, name, argbColor, payload, ...) PROFILE_RANGE_EX(category, name, argbColor, payload, ##__VA_ARGS__);
|
|
||||||
#else
|
|
||||||
#define ANDROID_PROFILE_COMMAND(category, commandIndex, argbColor, payload, ...)
|
|
||||||
#define ANDROID_PROFILE(category, name, argbColor, payload, ...)
|
|
||||||
#endif
|
|
||||||
namespace gpu { namespace gl {
|
namespace gpu { namespace gl {
|
||||||
|
|
||||||
class GLBackend : public Backend, public std::enable_shared_from_this<GLBackend> {
|
class GLBackend : public Backend, public std::enable_shared_from_this<GLBackend> {
|
||||||
|
@ -239,7 +237,10 @@ protected:
|
||||||
|
|
||||||
void renderPassTransfer(const Batch& batch);
|
void renderPassTransfer(const Batch& batch);
|
||||||
void renderPassDraw(const Batch& batch);
|
void renderPassDraw(const Batch& batch);
|
||||||
|
|
||||||
|
#ifdef GPU_STEREO_DRAWCALL_DOUBLED
|
||||||
void setupStereoSide(int side);
|
void setupStereoSide(int side);
|
||||||
|
#endif
|
||||||
|
|
||||||
virtual void initInput() final;
|
virtual void initInput() final;
|
||||||
virtual void killInput() final;
|
virtual void killInput() final;
|
||||||
|
@ -249,6 +250,7 @@ protected:
|
||||||
|
|
||||||
struct InputStageState {
|
struct InputStageState {
|
||||||
bool _invalidFormat { true };
|
bool _invalidFormat { true };
|
||||||
|
bool _hadColorAttribute{ true };
|
||||||
Stream::FormatPointer _format;
|
Stream::FormatPointer _format;
|
||||||
std::string _formatKey;
|
std::string _formatKey;
|
||||||
|
|
||||||
|
@ -276,7 +278,6 @@ protected:
|
||||||
Offset _indirectBufferStride{ 0 };
|
Offset _indirectBufferStride{ 0 };
|
||||||
|
|
||||||
GLuint _defaultVAO { 0 };
|
GLuint _defaultVAO { 0 };
|
||||||
bool _hadColorAttribute{ false };
|
|
||||||
|
|
||||||
InputStageState() :
|
InputStageState() :
|
||||||
_invalidFormat(true),
|
_invalidFormat(true),
|
||||||
|
|
|
@ -73,13 +73,11 @@ void GLBackend::initInput() {
|
||||||
if(!_input._defaultVAO) {
|
if(!_input._defaultVAO) {
|
||||||
glGenVertexArrays(1, &_input._defaultVAO);
|
glGenVertexArrays(1, &_input._defaultVAO);
|
||||||
}
|
}
|
||||||
qDebug() << "glBindVertexArray(" << _input._defaultVAO << ")";
|
|
||||||
glBindVertexArray(_input._defaultVAO);
|
glBindVertexArray(_input._defaultVAO);
|
||||||
(void) CHECK_GL_ERROR();
|
(void) CHECK_GL_ERROR();
|
||||||
}
|
}
|
||||||
|
|
||||||
void GLBackend::killInput() {
|
void GLBackend::killInput() {
|
||||||
qDebug() << "glBindVertexArray(0)";
|
|
||||||
glBindVertexArray(0);
|
glBindVertexArray(0);
|
||||||
if(_input._defaultVAO) {
|
if(_input._defaultVAO) {
|
||||||
glDeleteVertexArrays(1, &_input._defaultVAO);
|
glDeleteVertexArrays(1, &_input._defaultVAO);
|
||||||
|
@ -94,7 +92,6 @@ void GLBackend::syncInputStateCache() {
|
||||||
_input._attributeActivation[i] = active;
|
_input._attributeActivation[i] = active;
|
||||||
}
|
}
|
||||||
//_input._defaultVAO
|
//_input._defaultVAO
|
||||||
qDebug() << "glBindVertexArray("<<_input._defaultVAO<< ")";
|
|
||||||
glBindVertexArray(_input._defaultVAO);
|
glBindVertexArray(_input._defaultVAO);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -103,7 +100,6 @@ void GLBackend::resetInputStage() {
|
||||||
_input._indexBufferType = UINT32;
|
_input._indexBufferType = UINT32;
|
||||||
_input._indexBufferOffset = 0;
|
_input._indexBufferOffset = 0;
|
||||||
_input._indexBuffer.reset();
|
_input._indexBuffer.reset();
|
||||||
//qDebug() << "GLBackend::resetInputStage glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);";
|
|
||||||
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
|
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
|
||||||
(void) CHECK_GL_ERROR();
|
(void) CHECK_GL_ERROR();
|
||||||
|
|
||||||
|
|
|
@ -48,8 +48,8 @@ void GLBackend::do_setFramebuffer(const Batch& batch, size_t paramOffset) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void GLBackend::do_clearFramebuffer(const Batch& batch, size_t paramOffset) {
|
void GLBackend::do_clearFramebuffer(const Batch& batch, size_t paramOffset) {
|
||||||
if (_stereo._enable && !_pipeline._stateCache.scissorEnable) {
|
if (_stereo.isStereo() && !_pipeline._stateCache.scissorEnable) {
|
||||||
//qWarning("Clear without scissor in stereo mode");
|
qWarning("Clear without scissor in stereo mode");
|
||||||
}
|
}
|
||||||
|
|
||||||
uint32 masks = batch._params[paramOffset + 7]._uint;
|
uint32 masks = batch._params[paramOffset + 7]._uint;
|
||||||
|
@ -63,17 +63,22 @@ void GLBackend::do_clearFramebuffer(const Batch& batch, size_t paramOffset) {
|
||||||
int useScissor = batch._params[paramOffset + 0]._int;
|
int useScissor = batch._params[paramOffset + 0]._int;
|
||||||
|
|
||||||
GLuint glmask = 0;
|
GLuint glmask = 0;
|
||||||
|
bool restoreStencilMask = false;
|
||||||
|
uint8_t cacheStencilMask = 0xFF;
|
||||||
if (masks & Framebuffer::BUFFER_STENCIL) {
|
if (masks & Framebuffer::BUFFER_STENCIL) {
|
||||||
glClearStencil(stencil);
|
glClearStencil(stencil);
|
||||||
glmask |= GL_STENCIL_BUFFER_BIT;
|
glmask |= GL_STENCIL_BUFFER_BIT;
|
||||||
// TODO: we will probably need to also check the write mask of stencil like we do
|
|
||||||
// for depth buffer, but as would say a famous Fez owner "We'll cross that bridge when we come to it"
|
cacheStencilMask = _pipeline._stateCache.stencilActivation.getWriteMaskFront();
|
||||||
|
if (cacheStencilMask != 0xFF) {
|
||||||
|
restoreStencilMask = true;
|
||||||
|
glStencilMask( 0xFF);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
bool restoreDepthMask = false;
|
bool restoreDepthMask = false;
|
||||||
if (masks & Framebuffer::BUFFER_DEPTH) {
|
if (masks & Framebuffer::BUFFER_DEPTH) {
|
||||||
glClearDepthf(depth);
|
glClearDepthf(depth);
|
||||||
|
|
||||||
glmask |= GL_DEPTH_BUFFER_BIT;
|
glmask |= GL_DEPTH_BUFFER_BIT;
|
||||||
|
|
||||||
bool cacheDepthMask = _pipeline._stateCache.depthTest.getWriteMask();
|
bool cacheDepthMask = _pipeline._stateCache.depthTest.getWriteMask();
|
||||||
|
@ -122,6 +127,11 @@ void GLBackend::do_clearFramebuffer(const Batch& batch, size_t paramOffset) {
|
||||||
glDisable(GL_SCISSOR_TEST);
|
glDisable(GL_SCISSOR_TEST);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Restore Stencil write mask
|
||||||
|
if (restoreStencilMask) {
|
||||||
|
glStencilMask(cacheStencilMask);
|
||||||
|
}
|
||||||
|
|
||||||
// Restore write mask meaning turn back off
|
// Restore write mask meaning turn back off
|
||||||
if (restoreDepthMask) {
|
if (restoreDepthMask) {
|
||||||
glDepthMask(GL_FALSE);
|
glDepthMask(GL_FALSE);
|
||||||
|
@ -138,26 +148,24 @@ void GLBackend::do_clearFramebuffer(const Batch& batch, size_t paramOffset) {
|
||||||
(void) CHECK_GL_ERROR();
|
(void) CHECK_GL_ERROR();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#pragma optimize( "", on )
|
||||||
void GLBackend::downloadFramebuffer(const FramebufferPointer& srcFramebuffer, const Vec4i& region, QImage& destImage) {
|
void GLBackend::downloadFramebuffer(const FramebufferPointer& srcFramebuffer, const Vec4i& region, QImage& destImage) {
|
||||||
auto readFBO = getFramebufferID(srcFramebuffer);
|
auto readFBO = getFramebufferID(srcFramebuffer);
|
||||||
if (srcFramebuffer && readFBO) {
|
if (srcFramebuffer && readFBO) {
|
||||||
if ((srcFramebuffer->getWidth() < (region.x + region.z)) || (srcFramebuffer->getHeight() < (region.y + region.w))) {
|
if ((srcFramebuffer->getWidth() < (region.x + region.z)) || (srcFramebuffer->getHeight() < (region.y + region.w))) {
|
||||||
qCDebug(gpugllogging) << "GLBackend::downloadFramebuffer : srcFramebuffer is too small to provide the region queried";
|
qCWarning(gpugllogging) << "GLBackend::downloadFramebuffer : srcFramebuffer is too small to provide the region queried";
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((destImage.width() < region.z) || (destImage.height() < region.w)) {
|
if ((destImage.width() < region.z) || (destImage.height() < region.w)) {
|
||||||
qCDebug(gpugllogging) << "GLBackend::downloadFramebuffer : destImage is too small to receive the region of the framebuffer";
|
qCWarning(gpugllogging) << "GLBackend::downloadFramebuffer : destImage is too small to receive the region of the framebuffer";
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
GLenum format = GL_RGBA;
|
GLenum format = GL_RGBA;
|
||||||
//GLenum format = GL_BGRA;
|
|
||||||
qDebug() << "TODO: GLBackendOutput.cpp:do_clearFramebuffer GL_BGRA";
|
|
||||||
|
|
||||||
if (destImage.format() != QImage::Format_ARGB32) {
|
if (destImage.format() != QImage::Format_ARGB32) {
|
||||||
qCDebug(gpugllogging) << "GLBackend::downloadFramebuffer : destImage format must be FORMAT_ARGB32 to receive the region of the framebuffer";
|
qCWarning(gpugllogging) << "GLBackend::downloadFramebuffer : destImage format must be FORMAT_ARGB32 to receive the region of the framebuffer";
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -31,7 +31,6 @@ void GLBackend::do_setPipeline(const Batch& batch, size_t paramOffset) {
|
||||||
|
|
||||||
// null pipeline == reset
|
// null pipeline == reset
|
||||||
if (!pipeline) {
|
if (!pipeline) {
|
||||||
qDebug() << " null pipeline";
|
|
||||||
_pipeline._pipeline.reset();
|
_pipeline._pipeline.reset();
|
||||||
|
|
||||||
_pipeline._program = 0;
|
_pipeline._program = 0;
|
||||||
|
@ -78,7 +77,12 @@ void GLBackend::do_setPipeline(const Batch& batch, size_t paramOffset) {
|
||||||
if (_pipeline._invalidProgram) {
|
if (_pipeline._invalidProgram) {
|
||||||
glUseProgram(_pipeline._program);
|
glUseProgram(_pipeline._program);
|
||||||
if (_pipeline._cameraCorrectionLocation != -1) {
|
if (_pipeline._cameraCorrectionLocation != -1) {
|
||||||
auto cameraCorrectionBuffer = syncGPUObject(*_pipeline._cameraCorrectionBuffer._buffer);
|
gl::GLBuffer* cameraCorrectionBuffer = nullptr;
|
||||||
|
if (_transform._viewCorrectionEnabled) {
|
||||||
|
cameraCorrectionBuffer = syncGPUObject(*_pipeline._cameraCorrectionBuffer._buffer);
|
||||||
|
} else {
|
||||||
|
cameraCorrectionBuffer = syncGPUObject(*_pipeline._cameraCorrectionBufferIdentity._buffer);
|
||||||
|
}
|
||||||
glBindBufferRange(GL_UNIFORM_BUFFER, _pipeline._cameraCorrectionLocation, cameraCorrectionBuffer->_id, 0, sizeof(CameraCorrection));
|
glBindBufferRange(GL_UNIFORM_BUFFER, _pipeline._cameraCorrectionLocation, cameraCorrectionBuffer->_id, 0, sizeof(CameraCorrection));
|
||||||
}
|
}
|
||||||
(void) CHECK_GL_ERROR();
|
(void) CHECK_GL_ERROR();
|
||||||
|
@ -150,6 +154,10 @@ void GLBackend::resetUniformStage() {
|
||||||
|
|
||||||
void GLBackend::do_setUniformBuffer(const Batch& batch, size_t paramOffset) {
|
void GLBackend::do_setUniformBuffer(const Batch& batch, size_t paramOffset) {
|
||||||
GLuint slot = batch._params[paramOffset + 3]._uint;
|
GLuint slot = batch._params[paramOffset + 3]._uint;
|
||||||
|
if (slot >(GLuint)MAX_NUM_UNIFORM_BUFFERS) {
|
||||||
|
qCDebug(gpugllogging) << "GLBackend::do_setUniformBuffer: Trying to set a uniform Buffer at slot #" << slot << " which doesn't exist. MaxNumUniformBuffers = " << getMaxNumUniformBuffers();
|
||||||
|
return;
|
||||||
|
}
|
||||||
BufferPointer uniformBuffer = batch._buffers.get(batch._params[paramOffset + 2]._uint);
|
BufferPointer uniformBuffer = batch._buffers.get(batch._params[paramOffset + 2]._uint);
|
||||||
GLintptr rangeStart = batch._params[paramOffset + 1]._uint;
|
GLintptr rangeStart = batch._params[paramOffset + 1]._uint;
|
||||||
GLsizeiptr rangeSize = batch._params[paramOffset + 0]._uint;
|
GLsizeiptr rangeSize = batch._params[paramOffset + 0]._uint;
|
||||||
|
@ -192,6 +200,9 @@ void GLBackend::releaseResourceTexture(uint32_t slot) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void GLBackend::resetResourceStage() {
|
void GLBackend::resetResourceStage() {
|
||||||
|
for (uint32_t i = 0; i < _resource._buffers.size(); i++) {
|
||||||
|
releaseResourceBuffer(i);
|
||||||
|
}
|
||||||
for (uint32_t i = 0; i < _resource._textures.size(); i++) {
|
for (uint32_t i = 0; i < _resource._textures.size(); i++) {
|
||||||
releaseResourceTexture(i);
|
releaseResourceTexture(i);
|
||||||
}
|
}
|
||||||
|
@ -230,7 +241,7 @@ void GLBackend::do_setResourceBuffer(const Batch& batch, size_t paramOffset) {
|
||||||
void GLBackend::do_setResourceTexture(const Batch& batch, size_t paramOffset) {
|
void GLBackend::do_setResourceTexture(const Batch& batch, size_t paramOffset) {
|
||||||
GLuint slot = batch._params[paramOffset + 1]._uint;
|
GLuint slot = batch._params[paramOffset + 1]._uint;
|
||||||
if (slot >= (GLuint) MAX_NUM_RESOURCE_TEXTURES) {
|
if (slot >= (GLuint) MAX_NUM_RESOURCE_TEXTURES) {
|
||||||
// "GLBackend::do_setResourceTexture: Trying to set a resource Texture at slot #" + slot + " which doesn't exist. MaxNumResourceTextures = " + getMaxNumResourceTextures());
|
qCDebug(gpugllogging) << "GLBackend::do_setResourceTexture: Trying to set a resource Texture at slot #" << slot << " which doesn't exist. MaxNumResourceTextures = " << getMaxNumResourceTextures();
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -260,7 +271,7 @@ void GLBackend::do_setResourceTexture(const Batch& batch, size_t paramOffset) {
|
||||||
|
|
||||||
_resource._textures[slot] = resourceTexture;
|
_resource._textures[slot] = resourceTexture;
|
||||||
|
|
||||||
_stats._RSAmountTextureMemoryBounded += object->size();
|
_stats._RSAmountTextureMemoryBounded += (int) object->size();
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
releaseResourceTexture(slot);
|
releaseResourceTexture(slot);
|
||||||
|
|
|
@ -11,6 +11,8 @@
|
||||||
#include "GLBackend.h"
|
#include "GLBackend.h"
|
||||||
#include "GLState.h"
|
#include "GLState.h"
|
||||||
|
|
||||||
|
#include <gpu/GPULogging.h>
|
||||||
|
|
||||||
using namespace gpu;
|
using namespace gpu;
|
||||||
using namespace gpu::gl;
|
using namespace gpu::gl;
|
||||||
|
|
||||||
|
@ -96,13 +98,11 @@ void GLBackend::do_setStateFrontFaceClockwise(bool isClockwise) {
|
||||||
|
|
||||||
void GLBackend::do_setStateDepthClampEnable(bool enable) {
|
void GLBackend::do_setStateDepthClampEnable(bool enable) {
|
||||||
if (_pipeline._stateCache.depthClampEnable != enable) {
|
if (_pipeline._stateCache.depthClampEnable != enable) {
|
||||||
if (enable) {
|
//if (enable) {
|
||||||
qDebug() << "TODO: GLBackendState.cpp:do_setStateDepthClampEnable GL_DEPTH_CLAMP";
|
// glEnable(GL_DEPTH_CLAMP);
|
||||||
//glEnable(GL_DEPTH_CLAMP);
|
//} else {
|
||||||
} else {
|
// glDisable(GL_DEPTH_CLAMP);
|
||||||
//glDisable(GL_DEPTH_CLAMP);
|
//}
|
||||||
qDebug() << "TODO: GLBackendState.cpp:do_setStateDepthClampEnable GL_DEPTH_CLAMP";
|
|
||||||
}
|
|
||||||
(void)CHECK_GL_ERROR();
|
(void)CHECK_GL_ERROR();
|
||||||
|
|
||||||
_pipeline._stateCache.depthClampEnable = enable;
|
_pipeline._stateCache.depthClampEnable = enable;
|
||||||
|
@ -124,13 +124,11 @@ void GLBackend::do_setStateScissorEnable(bool enable) {
|
||||||
|
|
||||||
void GLBackend::do_setStateMultisampleEnable(bool enable) {
|
void GLBackend::do_setStateMultisampleEnable(bool enable) {
|
||||||
if (_pipeline._stateCache.multisampleEnable != enable) {
|
if (_pipeline._stateCache.multisampleEnable != enable) {
|
||||||
if (enable) {
|
//if (enable) {
|
||||||
//glEnable(GL_MULTISAMPLE);
|
// glEnable(GL_MULTISAMPLE);
|
||||||
qDebug() << "TODO: GLBackendState.cpp:do_setStateMultisampleEnable GL_MULTISAMPLE";
|
//} else {
|
||||||
} else {
|
// glDisable(GL_MULTISAMPLE);
|
||||||
//glDisable(GL_MULTISAMPLE);
|
//}
|
||||||
qDebug() << "TODO: GLBackendState.cpp:do_setStateMultisampleEnable GL_MULTISAMPLE";
|
|
||||||
}
|
|
||||||
(void)CHECK_GL_ERROR();
|
(void)CHECK_GL_ERROR();
|
||||||
|
|
||||||
_pipeline._stateCache.multisampleEnable = enable;
|
_pipeline._stateCache.multisampleEnable = enable;
|
||||||
|
@ -139,13 +137,11 @@ void GLBackend::do_setStateMultisampleEnable(bool enable) {
|
||||||
|
|
||||||
void GLBackend::do_setStateAntialiasedLineEnable(bool enable) {
|
void GLBackend::do_setStateAntialiasedLineEnable(bool enable) {
|
||||||
if (_pipeline._stateCache.antialisedLineEnable != enable) {
|
if (_pipeline._stateCache.antialisedLineEnable != enable) {
|
||||||
if (enable) {
|
//if (enable) {
|
||||||
//glEnable(GL_LINE_SMOOTH);
|
// glEnable(GL_LINE_SMOOTH);
|
||||||
qDebug() << "TODO: GLBackendState.cpp:do_setStateAntialiasedLineEnable GL_LINE_SMOOTH";
|
//} else {
|
||||||
} else {
|
// glDisable(GL_LINE_SMOOTH);
|
||||||
//glDisable(GL_LINE_SMOOTH);
|
//}
|
||||||
qDebug() << "TODO: GLBackendState.cpp:do_setStateAntialiasedLineEnable GL_LINE_SMOOTH";
|
|
||||||
}
|
|
||||||
(void)CHECK_GL_ERROR();
|
(void)CHECK_GL_ERROR();
|
||||||
|
|
||||||
_pipeline._stateCache.antialisedLineEnable = enable;
|
_pipeline._stateCache.antialisedLineEnable = enable;
|
||||||
|
@ -157,16 +153,12 @@ void GLBackend::do_setStateDepthBias(Vec2 bias) {
|
||||||
if ((bias.x != 0.0f) || (bias.y != 0.0f)) {
|
if ((bias.x != 0.0f) || (bias.y != 0.0f)) {
|
||||||
glEnable(GL_POLYGON_OFFSET_FILL);
|
glEnable(GL_POLYGON_OFFSET_FILL);
|
||||||
//glEnable(GL_POLYGON_OFFSET_LINE);
|
//glEnable(GL_POLYGON_OFFSET_LINE);
|
||||||
qDebug() << "TODO: GLBackendState.cpp:do_setStateDepthBias GL_POLYGON_OFFSET_LINE";
|
|
||||||
//glEnable(GL_POLYGON_OFFSET_POINT);
|
//glEnable(GL_POLYGON_OFFSET_POINT);
|
||||||
qDebug() << "TODO: GLBackendState.cpp:do_setStateDepthBias GL_POLYGON_OFFSET_POINT";
|
|
||||||
glPolygonOffset(bias.x, bias.y);
|
glPolygonOffset(bias.x, bias.y);
|
||||||
} else {
|
} else {
|
||||||
glDisable(GL_POLYGON_OFFSET_FILL);
|
glDisable(GL_POLYGON_OFFSET_FILL);
|
||||||
//glDisable(GL_POLYGON_OFFSET_LINE);
|
//glDisable(GL_POLYGON_OFFSET_LINE);
|
||||||
qDebug() << "TODO: GLBackendState.cpp:do_setStateDepthBias GL_POLYGON_OFFSET_LINE";
|
|
||||||
//glDisable(GL_POLYGON_OFFSET_POINT);
|
//glDisable(GL_POLYGON_OFFSET_POINT);
|
||||||
qDebug() << "TODO: GLBackendState.cpp:do_setStateDepthBias GL_POLYGON_OFFSET_POINT";
|
|
||||||
}
|
}
|
||||||
(void)CHECK_GL_ERROR();
|
(void)CHECK_GL_ERROR();
|
||||||
|
|
||||||
|
@ -190,7 +182,7 @@ void GLBackend::do_setStateDepthTest(State::DepthTest test) {
|
||||||
glDepthFunc(COMPARISON_TO_GL[test.getFunction()]);
|
glDepthFunc(COMPARISON_TO_GL[test.getFunction()]);
|
||||||
}
|
}
|
||||||
if (CHECK_GL_ERROR()) {
|
if (CHECK_GL_ERROR()) {
|
||||||
qDebug() << "DepthTest" << (test.isEnabled() ? "Enabled" : "Disabled")
|
qCDebug(gpulogging) << "DepthTest" << (test.isEnabled() ? "Enabled" : "Disabled")
|
||||||
<< "Mask=" << (test.getWriteMask() ? "Write" : "no Write")
|
<< "Mask=" << (test.getWriteMask() ? "Write" : "no Write")
|
||||||
<< "Func=" << test.getFunction()
|
<< "Func=" << test.getFunction()
|
||||||
<< "Raw=" << test.getRaw();
|
<< "Raw=" << test.getRaw();
|
||||||
|
|
|
@ -74,7 +74,6 @@ void GLBackend::do_generateTextureMips(const Batch& batch, size_t paramOffset) {
|
||||||
|
|
||||||
// DO not transfer the texture, this call is expected for rendering texture
|
// DO not transfer the texture, this call is expected for rendering texture
|
||||||
GLTexture* object = syncGPUObject(resourceTexture);
|
GLTexture* object = syncGPUObject(resourceTexture);
|
||||||
qDebug() << "GLBackendTexture do_generateTextureMips syncGPUObject";
|
|
||||||
if (!object) {
|
if (!object) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
|
@ -15,7 +15,6 @@ using namespace gpu::gl;
|
||||||
|
|
||||||
// Transform Stage
|
// Transform Stage
|
||||||
void GLBackend::do_setModelTransform(const Batch& batch, size_t paramOffset) {
|
void GLBackend::do_setModelTransform(const Batch& batch, size_t paramOffset) {
|
||||||
qDebug() << "do_setModelTransform";
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void GLBackend::do_setViewTransform(const Batch& batch, size_t paramOffset) {
|
void GLBackend::do_setViewTransform(const Batch& batch, size_t paramOffset) {
|
||||||
|
|
|
@ -22,27 +22,26 @@ GLFramebuffer::~GLFramebuffer() {
|
||||||
}
|
}
|
||||||
|
|
||||||
bool GLFramebuffer::checkStatus() const {
|
bool GLFramebuffer::checkStatus() const {
|
||||||
bool result = false;
|
|
||||||
switch (_status) {
|
switch (_status) {
|
||||||
case GL_FRAMEBUFFER_COMPLETE:
|
case GL_FRAMEBUFFER_COMPLETE:
|
||||||
// Success !
|
// Success !
|
||||||
result = true;
|
return true;
|
||||||
break;
|
|
||||||
case GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT:
|
case GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT:
|
||||||
qCDebug(gpugllogging) << "GLFramebuffer::syncGPUObject : Framebuffer not valid, GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT.";
|
qCWarning(gpugllogging) << "GLFramebuffer::syncGPUObject : Framebuffer not valid, GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT.";
|
||||||
break;
|
break;
|
||||||
case GL_FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT:
|
case GL_FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT:
|
||||||
qCDebug(gpugllogging) << "GLFramebuffer::syncGPUObject : Framebuffer not valid, GL_FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT.";
|
qCWarning(gpugllogging) << "GLFramebuffer::syncGPUObject : Framebuffer not valid, GL_FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT.";
|
||||||
break;
|
break;
|
||||||
/* TODO: case GL_FRAMEBUFFER_INCOMPLETE_DRAW_BUFFER:
|
//case GL_FRAMEBUFFER_INCOMPLETE_DRAW_BUFFER:
|
||||||
qCDebug(gpugllogging) << "GLFramebuffer::syncGPUObject : Framebuffer not valid, GL_FRAMEBUFFER_INCOMPLETE_DRAW_BUFFER.";
|
// qCWarning(gpugllogging) << "GLFramebuffer::syncGPUObject : Framebuffer not valid, GL_FRAMEBUFFER_INCOMPLETE_DRAW_BUFFER.";
|
||||||
break;
|
// break;
|
||||||
case GL_FRAMEBUFFER_INCOMPLETE_READ_BUFFER:
|
//case GL_FRAMEBUFFER_INCOMPLETE_READ_BUFFER:
|
||||||
qCDebug(gpugllogging) << "GLFramebuffer::syncGPUObject : Framebuffer not valid, GL_FRAMEBUFFER_INCOMPLETE_READ_BUFFER.";
|
// qCWarning(gpugllogging) << "GLFramebuffer::syncGPUObject : Framebuffer not valid, GL_FRAMEBUFFER_INCOMPLETE_READ_BUFFER.";
|
||||||
break; */
|
// break;
|
||||||
case GL_FRAMEBUFFER_UNSUPPORTED:
|
case GL_FRAMEBUFFER_UNSUPPORTED:
|
||||||
qCDebug(gpugllogging) << "GLFramebuffer::syncGPUObject : Framebuffer not valid, GL_FRAMEBUFFER_UNSUPPORTED.";
|
qCWarning(gpugllogging) << "GLFramebuffer::syncGPUObject : Framebuffer not valid, GL_FRAMEBUFFER_UNSUPPORTED.";
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
return result;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
|
@ -51,10 +51,7 @@ GLPipeline* GLPipeline::sync(GLBackend& backend, const Pipeline& pipeline) {
|
||||||
// Special case for view correction matrices, any pipeline that declares the correction buffer
|
// Special case for view correction matrices, any pipeline that declares the correction buffer
|
||||||
// uniform will automatically have it provided without any client code necessary.
|
// uniform will automatically have it provided without any client code necessary.
|
||||||
// Required for stable lighting in the HMD.
|
// Required for stable lighting in the HMD.
|
||||||
//CLIMAX_MERGE_START
|
|
||||||
//getbuffers() doesnt exist anymore.. use get uniformbuffers()?
|
|
||||||
object->_cameraCorrection = shader->getUniformBuffers().findLocation("cameraCorrectionBuffer");
|
object->_cameraCorrection = shader->getUniformBuffers().findLocation("cameraCorrectionBuffer");
|
||||||
//CLIMAX_MERGE_END
|
|
||||||
object->_program = programObject;
|
object->_program = programObject;
|
||||||
object->_state = stateObject;
|
object->_state = stateObject;
|
||||||
|
|
||||||
|
|
|
@ -17,6 +17,7 @@
|
||||||
|
|
||||||
Q_LOGGING_CATEGORY(gpugllogging, "hifi.gpu.gl")
|
Q_LOGGING_CATEGORY(gpugllogging, "hifi.gpu.gl")
|
||||||
Q_LOGGING_CATEGORY(trace_render_gpu_gl, "trace.render.gpu.gl")
|
Q_LOGGING_CATEGORY(trace_render_gpu_gl, "trace.render.gpu.gl")
|
||||||
|
Q_LOGGING_CATEGORY(trace_render_gpu_gl_detail, "trace.render.gpu.gl.detail")
|
||||||
|
|
||||||
namespace gpu { namespace gl {
|
namespace gpu { namespace gl {
|
||||||
|
|
||||||
|
@ -41,8 +42,11 @@ bool checkGLError(const char* name) {
|
||||||
case GL_OUT_OF_MEMORY:
|
case GL_OUT_OF_MEMORY:
|
||||||
qCWarning(gpugllogging) << "GLBackend" << name << ": There is not enough memory left to execute the command.The state of the GL is undefined, except for the state of the error flags, after this error is recorded.";
|
qCWarning(gpugllogging) << "GLBackend" << name << ": There is not enough memory left to execute the command.The state of the GL is undefined, except for the state of the error flags, after this error is recorded.";
|
||||||
break;
|
break;
|
||||||
default:
|
case GL_STACK_UNDERFLOW:
|
||||||
qCWarning(gpugllogging) << "GLBackend" << name << ": Unknown error: " << error;
|
qCWarning(gpugllogging) << "GLBackend" << name << ": An attempt has been made to perform an operation that would cause an internal stack to underflow.";
|
||||||
|
break;
|
||||||
|
case GL_STACK_OVERFLOW:
|
||||||
|
qCWarning(gpugllogging) << "GLBackend" << name << ": An attempt has been made to perform an operation that would cause an internal stack to overflow.";
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
return true;
|
return true;
|
||||||
|
@ -178,21 +182,17 @@ State::BlendArg blendArgFromGL(GLenum blendArg) {
|
||||||
|
|
||||||
void getCurrentGLState(State::Data& state) {
|
void getCurrentGLState(State::Data& state) {
|
||||||
{
|
{
|
||||||
GLint modes[2];
|
//GLint modes[2];
|
||||||
//glGetIntegerv(GL_POLYGON_MODE, modes);
|
//glGetIntegerv(GL_POLYGON_MODE, modes);
|
||||||
qDebug() << "TODO: GLShared.cpp:getCurrentGLState GL_POLYGON_MODE";
|
//if (modes[0] == GL_FILL) {
|
||||||
qDebug() << "TODO: GLShared.cpp:getCurrentGLState GL_FILL";
|
// state.fillMode = State::FILL_FACE;
|
||||||
qDebug() << "TODO: GLShared.cpp:getCurrentGLState GL_LINE";
|
//} else {
|
||||||
|
// if (modes[0] == GL_LINE) {
|
||||||
if (modes[0] == 0 /*GL_FILL*/) {
|
// state.fillMode = State::FILL_LINE;
|
||||||
state.fillMode = State::FILL_FACE;
|
// } else {
|
||||||
} else {
|
// state.fillMode = State::FILL_POINT;
|
||||||
if (modes[0] == 0 /*GL_LINE*/) {
|
// }
|
||||||
state.fillMode = State::FILL_LINE;
|
//}
|
||||||
} else {
|
|
||||||
state.fillMode = State::FILL_POINT;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
if (glIsEnabled(GL_CULL_FACE)) {
|
if (glIsEnabled(GL_CULL_FACE)) {
|
||||||
|
@ -207,15 +207,10 @@ void getCurrentGLState(State::Data& state) {
|
||||||
GLint winding;
|
GLint winding;
|
||||||
glGetIntegerv(GL_FRONT_FACE, &winding);
|
glGetIntegerv(GL_FRONT_FACE, &winding);
|
||||||
state.frontFaceClockwise = (winding == GL_CW);
|
state.frontFaceClockwise = (winding == GL_CW);
|
||||||
//state.depthClampEnable = glIsEnabled(GL_DEPTH_CLAMP);
|
state.depthClampEnable = false; //glIsEnabled(GL_DEPTH_CLAMP_EXT);
|
||||||
qDebug() << "TODO: GLShared.cpp.cpp:getCurrentGLState GL_DEPTH_CLAMP";
|
|
||||||
state.scissorEnable = glIsEnabled(GL_SCISSOR_TEST);
|
state.scissorEnable = glIsEnabled(GL_SCISSOR_TEST);
|
||||||
//state.multisampleEnable = glIsEnabled(GL_MULTISAMPLE);
|
state.multisampleEnable = false; //glIsEnabled(GL_MULTISAMPLE_EXT);
|
||||||
qDebug() << "TODO: GLShared.cpp.cpp:getCurrentGLState GL_MULTISAMPLE";
|
state.antialisedLineEnable = false; //glIsEnabled(GL_LINE_SMOOTH);
|
||||||
|
|
||||||
//state.antialisedLineEnable = glIsEnabled(GL_LINE_SMOOTH);
|
|
||||||
qDebug() << "TODO: GLShared.cpp.cpp:getCurrentGLState GL_LINE_SMOOTH";
|
|
||||||
|
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
if (glIsEnabled(GL_POLYGON_OFFSET_FILL)) {
|
if (glIsEnabled(GL_POLYGON_OFFSET_FILL)) {
|
||||||
|
|
|
@ -16,6 +16,7 @@
|
||||||
|
|
||||||
Q_DECLARE_LOGGING_CATEGORY(gpugllogging)
|
Q_DECLARE_LOGGING_CATEGORY(gpugllogging)
|
||||||
Q_DECLARE_LOGGING_CATEGORY(trace_render_gpu_gl)
|
Q_DECLARE_LOGGING_CATEGORY(trace_render_gpu_gl)
|
||||||
|
Q_DECLARE_LOGGING_CATEGORY(trace_render_gpu_gl_detail)
|
||||||
|
|
||||||
#define BUFFER_OFFSET(bytes) ((GLubyte*) nullptr + (bytes))
|
#define BUFFER_OFFSET(bytes) ((GLubyte*) nullptr + (bytes))
|
||||||
|
|
||||||
|
|
|
@ -12,11 +12,52 @@ using namespace gpu;
|
||||||
using namespace gpu::gl;
|
using namespace gpu::gl;
|
||||||
|
|
||||||
bool GLTexelFormat::isCompressed() const {
|
bool GLTexelFormat::isCompressed() const {
|
||||||
|
switch (internalFormat) {
|
||||||
|
case GL_COMPRESSED_R11_EAC:
|
||||||
|
case GL_COMPRESSED_SIGNED_R11_EAC:
|
||||||
|
case GL_COMPRESSED_RG11_EAC:
|
||||||
|
case GL_COMPRESSED_SIGNED_RG11_EAC:
|
||||||
|
case GL_COMPRESSED_RGB8_ETC2:
|
||||||
|
case GL_COMPRESSED_SRGB8_ETC2:
|
||||||
|
case GL_COMPRESSED_RGB8_PUNCHTHROUGH_ALPHA1_ETC2:
|
||||||
|
case GL_COMPRESSED_SRGB8_PUNCHTHROUGH_ALPHA1_ETC2:
|
||||||
|
case GL_COMPRESSED_RGBA8_ETC2_EAC:
|
||||||
|
case GL_COMPRESSED_SRGB8_ALPHA8_ETC2_EAC:
|
||||||
|
case GL_COMPRESSED_RGBA_ASTC_4x4:
|
||||||
|
case GL_COMPRESSED_RGBA_ASTC_5x4:
|
||||||
|
case GL_COMPRESSED_RGBA_ASTC_5x5:
|
||||||
|
case GL_COMPRESSED_RGBA_ASTC_6x5:
|
||||||
|
case GL_COMPRESSED_RGBA_ASTC_6x6:
|
||||||
|
case GL_COMPRESSED_RGBA_ASTC_8x5:
|
||||||
|
case GL_COMPRESSED_RGBA_ASTC_8x6:
|
||||||
|
case GL_COMPRESSED_RGBA_ASTC_8x8:
|
||||||
|
case GL_COMPRESSED_RGBA_ASTC_10x5:
|
||||||
|
case GL_COMPRESSED_RGBA_ASTC_10x6:
|
||||||
|
case GL_COMPRESSED_RGBA_ASTC_10x8:
|
||||||
|
case GL_COMPRESSED_RGBA_ASTC_10x10:
|
||||||
|
case GL_COMPRESSED_RGBA_ASTC_12x10:
|
||||||
|
case GL_COMPRESSED_RGBA_ASTC_12x12:
|
||||||
|
case GL_COMPRESSED_SRGB8_ALPHA8_ASTC_4x4:
|
||||||
|
case GL_COMPRESSED_SRGB8_ALPHA8_ASTC_5x4:
|
||||||
|
case GL_COMPRESSED_SRGB8_ALPHA8_ASTC_5x5:
|
||||||
|
case GL_COMPRESSED_SRGB8_ALPHA8_ASTC_6x5:
|
||||||
|
case GL_COMPRESSED_SRGB8_ALPHA8_ASTC_6x6:
|
||||||
|
case GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x5:
|
||||||
|
case GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x6:
|
||||||
|
case GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x8:
|
||||||
|
case GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x5:
|
||||||
|
case GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x6:
|
||||||
|
case GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x8:
|
||||||
|
case GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x10:
|
||||||
|
case GL_COMPRESSED_SRGB8_ALPHA8_ASTC_12x10:
|
||||||
|
case GL_COMPRESSED_SRGB8_ALPHA8_ASTC_12x12:
|
||||||
|
return true;
|
||||||
|
default:
|
||||||
return false;
|
return false;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
GLenum GLTexelFormat::evalGLTexelFormatInternal(const gpu::Element& dstFormat) {
|
GLenum GLTexelFormat::evalGLTexelFormatInternal(const gpu::Element& dstFormat) {
|
||||||
// qDebug() << "GLTexelFormat::evalGLTexelFormatInternal " << dstFormat.getDimension() << ", " << dstFormat.getSemantic() << ", " << dstFormat.getType();
|
|
||||||
GLenum result = GL_RGBA8;
|
GLenum result = GL_RGBA8;
|
||||||
switch (dstFormat.getDimension()) {
|
switch (dstFormat.getDimension()) {
|
||||||
case gpu::SCALAR: {
|
case gpu::SCALAR: {
|
||||||
|
@ -48,6 +89,12 @@ GLenum GLTexelFormat::evalGLTexelFormatInternal(const gpu::Element& dstFormat) {
|
||||||
case gpu::INT16:
|
case gpu::INT16:
|
||||||
result = GL_R16I;
|
result = GL_R16I;
|
||||||
break;
|
break;
|
||||||
|
case gpu::NUINT16:
|
||||||
|
//result = GL_R16_EXT;
|
||||||
|
break;
|
||||||
|
case gpu::NINT16:
|
||||||
|
//result = GL_R16_SNORM_EXT;
|
||||||
|
break;
|
||||||
case gpu::HALF:
|
case gpu::HALF:
|
||||||
result = GL_R16F;
|
result = GL_R16F;
|
||||||
break;
|
break;
|
||||||
|
@ -59,8 +106,7 @@ GLenum GLTexelFormat::evalGLTexelFormatInternal(const gpu::Element& dstFormat) {
|
||||||
break;
|
break;
|
||||||
case gpu::NUINT8:
|
case gpu::NUINT8:
|
||||||
if ((dstFormat.getSemantic() == gpu::SRGB || dstFormat.getSemantic() == gpu::SRGBA)) {
|
if ((dstFormat.getSemantic() == gpu::SRGB || dstFormat.getSemantic() == gpu::SRGBA)) {
|
||||||
//result = GL_SLUMINANCE8;
|
//result = GL_SLUMINANCE8_NV;
|
||||||
qDebug() << "TODO: GLTexelFormat.cpp:evalGLTexelFormatInternal GL_SLUMINANCE8";
|
|
||||||
} else {
|
} else {
|
||||||
result = GL_R8;
|
result = GL_R8;
|
||||||
}
|
}
|
||||||
|
@ -69,7 +115,6 @@ GLenum GLTexelFormat::evalGLTexelFormatInternal(const gpu::Element& dstFormat) {
|
||||||
result = GL_R8_SNORM;
|
result = GL_R8_SNORM;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
qDebug() << "TODO: GLTexelFormat.cpp:evalGLTexelFormatInternal " << dstFormat.getType();
|
|
||||||
Q_UNREACHABLE();
|
Q_UNREACHABLE();
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -85,6 +130,12 @@ GLenum GLTexelFormat::evalGLTexelFormatInternal(const gpu::Element& dstFormat) {
|
||||||
case gpu::DEPTH:
|
case gpu::DEPTH:
|
||||||
result = GL_DEPTH_COMPONENT16;
|
result = GL_DEPTH_COMPONENT16;
|
||||||
switch (dstFormat.getType()) {
|
switch (dstFormat.getType()) {
|
||||||
|
case gpu::UINT32:
|
||||||
|
case gpu::INT32:
|
||||||
|
case gpu::NUINT32:
|
||||||
|
case gpu::NINT32:
|
||||||
|
result = GL_DEPTH_COMPONENT32_OES;
|
||||||
|
break;
|
||||||
case gpu::FLOAT:
|
case gpu::FLOAT:
|
||||||
result = GL_DEPTH_COMPONENT32F;
|
result = GL_DEPTH_COMPONENT32F;
|
||||||
break;
|
break;
|
||||||
|
@ -112,7 +163,7 @@ GLenum GLTexelFormat::evalGLTexelFormatInternal(const gpu::Element& dstFormat) {
|
||||||
break;
|
break;
|
||||||
|
|
||||||
default:
|
default:
|
||||||
qCDebug(gpugllogging) << "Unknown combination of texel format";
|
qCWarning(gpugllogging) << "Unknown combination of texel format";
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -125,7 +176,7 @@ GLenum GLTexelFormat::evalGLTexelFormatInternal(const gpu::Element& dstFormat) {
|
||||||
result = GL_RG8;
|
result = GL_RG8;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
qCDebug(gpugllogging) << "Unknown combination of texel format";
|
qCWarning(gpugllogging) << "Unknown combination of texel format";
|
||||||
}
|
}
|
||||||
|
|
||||||
break;
|
break;
|
||||||
|
@ -139,11 +190,10 @@ GLenum GLTexelFormat::evalGLTexelFormatInternal(const gpu::Element& dstFormat) {
|
||||||
break;
|
break;
|
||||||
case gpu::SRGB:
|
case gpu::SRGB:
|
||||||
case gpu::SRGBA:
|
case gpu::SRGBA:
|
||||||
//result = GL_SRGB8; // standard 2.2 gamma correction color
|
result = GL_SRGB8; // standard 2.2 gamma correction color
|
||||||
result = GL_RGB8; // standard 2.2 gamma correction color
|
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
qCDebug(gpugllogging) << "Unknown combination of texel format";
|
qCWarning(gpugllogging) << "Unknown combination of texel format";
|
||||||
}
|
}
|
||||||
|
|
||||||
break;
|
break;
|
||||||
|
@ -171,6 +221,12 @@ GLenum GLTexelFormat::evalGLTexelFormatInternal(const gpu::Element& dstFormat) {
|
||||||
case gpu::INT16:
|
case gpu::INT16:
|
||||||
result = GL_RGBA16I;
|
result = GL_RGBA16I;
|
||||||
break;
|
break;
|
||||||
|
case gpu::NUINT16:
|
||||||
|
//result = GL_RGBA16_EXT;
|
||||||
|
break;
|
||||||
|
case gpu::NINT16:
|
||||||
|
//result = GL_RGBA16_SNORM_EXT;
|
||||||
|
break;
|
||||||
case gpu::HALF:
|
case gpu::HALF:
|
||||||
result = GL_RGBA16F;
|
result = GL_RGBA16F;
|
||||||
break;
|
break;
|
||||||
|
@ -183,12 +239,13 @@ GLenum GLTexelFormat::evalGLTexelFormatInternal(const gpu::Element& dstFormat) {
|
||||||
case gpu::NUINT8:
|
case gpu::NUINT8:
|
||||||
result = GL_RGBA8;
|
result = GL_RGBA8;
|
||||||
break;
|
break;
|
||||||
|
case gpu::NUINT2:
|
||||||
|
//result = GL_RGBA2;
|
||||||
|
break;
|
||||||
case gpu::NINT8:
|
case gpu::NINT8:
|
||||||
result = GL_RGBA8_SNORM;
|
result = GL_RGBA8_SNORM;
|
||||||
break;
|
break;
|
||||||
case gpu::NUINT2:
|
case gpu::NINT2_10_10_10:
|
||||||
case gpu::NINT16:
|
|
||||||
case gpu::NUINT16:
|
|
||||||
case gpu::NUINT32:
|
case gpu::NUINT32:
|
||||||
case gpu::NINT32:
|
case gpu::NINT32:
|
||||||
case gpu::COMPRESSED:
|
case gpu::COMPRESSED:
|
||||||
|
@ -197,55 +254,21 @@ GLenum GLTexelFormat::evalGLTexelFormatInternal(const gpu::Element& dstFormat) {
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case gpu::SRGB:
|
case gpu::SRGB:
|
||||||
//result = GL_SRGB8;
|
result = GL_SRGB8;
|
||||||
result = GL_RGB8;
|
|
||||||
qDebug() << "SRGBA Here 2";
|
|
||||||
break;
|
break;
|
||||||
case gpu::SRGBA:
|
case gpu::SRGBA:
|
||||||
result = GL_SRGB8_ALPHA8; // standard 2.2 gamma correction color
|
result = GL_SRGB8_ALPHA8; // standard 2.2 gamma correction color
|
||||||
break;
|
break;
|
||||||
default:
|
|
||||||
qCDebug(gpugllogging) << "Unknown combination of texel format";
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
// TODO: implement compression formats supported by android (ASTC, ETC2)
|
|
||||||
/*
|
|
||||||
case gpu::TILE4x4: {
|
|
||||||
switch (dstFormat.getSemantic()) {
|
|
||||||
case gpu::COMPRESSED_BC4_RED:
|
|
||||||
result = GL_COMPRESSED_RED_RGTC1;
|
|
||||||
break;
|
|
||||||
case gpu::COMPRESSED_BC1_SRGB:
|
|
||||||
result = GL_COMPRESSED_SRGB_S3TC_DXT1_EXT;
|
|
||||||
break;
|
|
||||||
case gpu::COMPRESSED_BC1_SRGBA:
|
|
||||||
result = GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT;
|
|
||||||
break;
|
|
||||||
case gpu::COMPRESSED_BC3_SRGBA:
|
|
||||||
result = GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT;
|
|
||||||
break;
|
|
||||||
case gpu::COMPRESSED_BC5_XY:
|
|
||||||
result = GL_COMPRESSED_RG_RGTC2;
|
|
||||||
break;
|
|
||||||
case gpu::COMPRESSED_BC6_RGB:
|
|
||||||
result = GL_COMPRESSED_RGB_BPTC_UNSIGNED_FLOAT;
|
|
||||||
break;
|
|
||||||
case gpu::COMPRESSED_BC7_SRGBA:
|
|
||||||
result = GL_COMPRESSED_SRGB_ALPHA_BPTC_UNORM;
|
|
||||||
break;
|
|
||||||
|
|
||||||
default:
|
default:
|
||||||
qCWarning(gpugllogging) << "Unknown combination of texel format";
|
qCWarning(gpugllogging) << "Unknown combination of texel format";
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
*/
|
}
|
||||||
default:
|
default:
|
||||||
qCDebug(gpugllogging) << "Unknown combination of texel format";
|
qCDebug(gpugllogging) << "Unknown combination of texel format";
|
||||||
}
|
}
|
||||||
|
|
||||||
//qDebug() << "GLTexelFormat::evalGLTexelFormatInternal result " << result;
|
//qDebug() << "GLTexelFormat::evalGLTexelFormatInternal result " << result;
|
||||||
|
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -260,21 +283,15 @@ GLTexelFormat GLTexelFormat::evalGLTexelFormat(const Element& dstFormat, const E
|
||||||
texel.type = ELEMENT_TYPE_TO_GL[dstFormat.getType()];
|
texel.type = ELEMENT_TYPE_TO_GL[dstFormat.getType()];
|
||||||
|
|
||||||
switch (dstFormat.getSemantic()) {
|
switch (dstFormat.getSemantic()) {
|
||||||
|
case gpu::RED:
|
||||||
case gpu::RGB:
|
case gpu::RGB:
|
||||||
case gpu::RGBA:
|
case gpu::RGBA:
|
||||||
texel.internalFormat = GL_R8;
|
texel.internalFormat = GL_R8;
|
||||||
break;
|
break;
|
||||||
|
|
||||||
//CLIMAX_MERGE_START
|
|
||||||
// case gpu::COMPRESSED_R:
|
|
||||||
// qDebug() << "TODO: GLTexelFormat.cpp:evalGLTexelFormat GL_COMPRESSED_RED_RGTC1";
|
|
||||||
// //texel.internalFormat = GL_COMPRESSED_RED_RGTC1;
|
|
||||||
// break;
|
|
||||||
//CLIMAX_MERGE_END
|
|
||||||
|
|
||||||
case gpu::DEPTH:
|
case gpu::DEPTH:
|
||||||
texel.format = GL_DEPTH_COMPONENT;
|
texel.format = GL_DEPTH_COMPONENT;
|
||||||
texel.internalFormat = GL_DEPTH_COMPONENT32_OES;
|
texel.internalFormat = GL_DEPTH_COMPONENT32F;
|
||||||
break;
|
break;
|
||||||
case gpu::DEPTH_STENCIL:
|
case gpu::DEPTH_STENCIL:
|
||||||
texel.type = GL_UNSIGNED_INT_24_8;
|
texel.type = GL_UNSIGNED_INT_24_8;
|
||||||
|
@ -314,17 +331,6 @@ GLTexelFormat GLTexelFormat::evalGLTexelFormat(const Element& dstFormat, const E
|
||||||
case gpu::RGBA:
|
case gpu::RGBA:
|
||||||
texel.internalFormat = GL_RGB8;
|
texel.internalFormat = GL_RGB8;
|
||||||
break;
|
break;
|
||||||
//CLIMAX_MERGE_START
|
|
||||||
//not needed?
|
|
||||||
// case gpu::COMPRESSED_RGB:
|
|
||||||
// qDebug() << "TODO: GLTexelFormat.cpp:evalGLTexelFormat GL_COMPRESSED_RGB";
|
|
||||||
// //texel.internalFormat = GL_COMPRESSED_RGB;
|
|
||||||
// break;
|
|
||||||
// case gpu::COMPRESSED_SRGB:
|
|
||||||
// qDebug() << "TODO: GLTexelFormat.cpp:evalGLTexelFormat GL_COMPRESSED_SRGB";
|
|
||||||
// //texel.internalFormat = GL_COMPRESSED_SRGB;
|
|
||||||
// break;
|
|
||||||
//CLIMAX_MERGE_END
|
|
||||||
default:
|
default:
|
||||||
qCWarning(gpugllogging) << "Unknown combination of texel format";
|
qCWarning(gpugllogging) << "Unknown combination of texel format";
|
||||||
}
|
}
|
||||||
|
@ -339,8 +345,7 @@ GLTexelFormat GLTexelFormat::evalGLTexelFormat(const Element& dstFormat, const E
|
||||||
switch (srcFormat.getSemantic()) {
|
switch (srcFormat.getSemantic()) {
|
||||||
case gpu::BGRA:
|
case gpu::BGRA:
|
||||||
case gpu::SBGRA:
|
case gpu::SBGRA:
|
||||||
qDebug() << "TODO: GLTexelFormat.cpp:evalGLTexelFormat GL_BGRA";
|
texel.format = GL_RGBA; // GL_BGRA_EXT;
|
||||||
//texel.format = GL_BGRA;
|
|
||||||
break;
|
break;
|
||||||
case gpu::RGB:
|
case gpu::RGB:
|
||||||
case gpu::RGBA:
|
case gpu::RGBA:
|
||||||
|
@ -358,41 +363,11 @@ GLTexelFormat GLTexelFormat::evalGLTexelFormat(const Element& dstFormat, const E
|
||||||
texel.internalFormat = GL_RGBA8;
|
texel.internalFormat = GL_RGBA8;
|
||||||
break;
|
break;
|
||||||
case gpu::SRGB:
|
case gpu::SRGB:
|
||||||
//texel.internalFormat = GL_SRGB8;
|
texel.internalFormat = GL_SRGB8;
|
||||||
texel.internalFormat = GL_RGB8;
|
|
||||||
qDebug() << "SRGBA Here 3";
|
|
||||||
break;
|
break;
|
||||||
case gpu::SRGBA:
|
case gpu::SRGBA:
|
||||||
texel.internalFormat = GL_SRGB8_ALPHA8;
|
texel.internalFormat = GL_SRGB8_ALPHA8;
|
||||||
break;
|
break;
|
||||||
|
|
||||||
//CLIMAX_MERGE_START
|
|
||||||
// case gpu::COMPRESSED_RGBA:
|
|
||||||
// //texel.internalFormat = GL_COMPRESSED_RGBA;
|
|
||||||
// qDebug() << "TODO: GLTexelFormat.cpp:evalGLTexelFormat GL_COMPRESSED_RGBA";
|
|
||||||
// break;
|
|
||||||
// case gpu::COMPRESSED_SRGBA:
|
|
||||||
// //texel.internalFormat = GL_COMPRESSED_SRGB_ALPHA;
|
|
||||||
// qDebug() << "TODO: GLTexelFormat.cpp:evalGLTexelFormat GL_COMPRESSED_SRGB_ALPHA";
|
|
||||||
// break;
|
|
||||||
//CLIMAX_MERGE_END
|
|
||||||
// FIXME: WE will want to support this later
|
|
||||||
/*
|
|
||||||
case gpu::COMPRESSED_BC3_RGBA:
|
|
||||||
texel.internalFormat = GL_COMPRESSED_RGBA_S3TC_DXT5_EXT;
|
|
||||||
break;
|
|
||||||
case gpu::COMPRESSED_BC3_SRGBA:
|
|
||||||
texel.internalFormat = GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT;
|
|
||||||
break;
|
|
||||||
|
|
||||||
case gpu::COMPRESSED_BC7_RGBA:
|
|
||||||
texel.internalFormat = GL_COMPRESSED_RGBA_BPTC_UNORM_ARB;
|
|
||||||
break;
|
|
||||||
case gpu::COMPRESSED_BC7_SRGBA:
|
|
||||||
texel.internalFormat = GL_COMPRESSED_SRGB_ALPHA_BPTC_UNORM;
|
|
||||||
break;
|
|
||||||
*/
|
|
||||||
|
|
||||||
default:
|
default:
|
||||||
qCWarning(gpugllogging) << "Unknown combination of texel format";
|
qCWarning(gpugllogging) << "Unknown combination of texel format";
|
||||||
}
|
}
|
||||||
|
@ -449,13 +424,11 @@ GLTexelFormat GLTexelFormat::evalGLTexelFormat(const Element& dstFormat, const E
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case gpu::NUINT16: {
|
case gpu::NUINT16: {
|
||||||
//texel.internalFormat = GL_R16;
|
//texel.internalFormat = GL_R16_EXT;
|
||||||
qDebug() << "TODO: GLTexelFormat.cpp:evalGLTexelFormat GL_R16";
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case gpu::NINT16: {
|
case gpu::NINT16: {
|
||||||
//texel.internalFormat = GL_R16_SNORM;
|
//texel.internalFormat = GL_R16_SNORM_EXT;
|
||||||
qDebug() << "TODO: GLTexelFormat.cpp:evalGLTexelFormat GL_R16_SNORM";
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case gpu::HALF: {
|
case gpu::HALF: {
|
||||||
|
@ -472,9 +445,7 @@ GLTexelFormat GLTexelFormat::evalGLTexelFormat(const Element& dstFormat, const E
|
||||||
}
|
}
|
||||||
case gpu::NUINT8: {
|
case gpu::NUINT8: {
|
||||||
if ((dstFormat.getSemantic() == gpu::SRGB || dstFormat.getSemantic() == gpu::SRGBA)) {
|
if ((dstFormat.getSemantic() == gpu::SRGB || dstFormat.getSemantic() == gpu::SRGBA)) {
|
||||||
// texel.internalFormat = GL_SLUMINANCE8;
|
//texel.internalFormat = GL_SLUMINANCE8_NV;
|
||||||
qDebug() << "TODO: GLTexelFormat.cpp:evalGLTexelFormat GL_SLUMINANCE8";
|
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
texel.internalFormat = GL_R8;
|
texel.internalFormat = GL_R8;
|
||||||
}
|
}
|
||||||
|
@ -495,7 +466,7 @@ GLTexelFormat GLTexelFormat::evalGLTexelFormat(const Element& dstFormat, const E
|
||||||
|
|
||||||
case gpu::R11G11B10:
|
case gpu::R11G11B10:
|
||||||
texel.format = GL_RGB;
|
texel.format = GL_RGB;
|
||||||
// the type should be float
|
texel.type = GL_UNSIGNED_INT_10F_11F_11F_REV;
|
||||||
texel.internalFormat = GL_R11F_G11F_B10F;
|
texel.internalFormat = GL_R11F_G11F_B10F;
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
@ -537,6 +508,7 @@ GLTexelFormat GLTexelFormat::evalGLTexelFormat(const Element& dstFormat, const E
|
||||||
}
|
}
|
||||||
case gpu::COMPRESSED:
|
case gpu::COMPRESSED:
|
||||||
case gpu::NUINT2:
|
case gpu::NUINT2:
|
||||||
|
case gpu::NINT2_10_10_10:
|
||||||
case gpu::NUM_TYPES: { // quiet compiler
|
case gpu::NUM_TYPES: { // quiet compiler
|
||||||
Q_UNREACHABLE();
|
Q_UNREACHABLE();
|
||||||
}
|
}
|
||||||
|
@ -583,18 +555,8 @@ GLTexelFormat GLTexelFormat::evalGLTexelFormat(const Element& dstFormat, const E
|
||||||
break;
|
break;
|
||||||
case gpu::SRGB:
|
case gpu::SRGB:
|
||||||
case gpu::SRGBA:
|
case gpu::SRGBA:
|
||||||
//texel.internalFormat = GL_SRGB8; // standard 2.2 gamma correction color
|
texel.internalFormat = GL_SRGB8; // standard 2.2 gamma correction color
|
||||||
texel.internalFormat = GL_RGB8; // standard 2.2 gamma correction color
|
|
||||||
break;
|
break;
|
||||||
//CLIMAX_MERGE_START
|
|
||||||
// case gpu::COMPRESSED_RGB:
|
|
||||||
// //texel.internalFormat = GL_COMPRESSED_RGB;
|
|
||||||
// qDebug() << "TODO: GLTexelFormat.cpp:evalGLTexelFormat GL_COMPRESSED_RGB";
|
|
||||||
// break;
|
|
||||||
// case gpu::COMPRESSED_SRGB:
|
|
||||||
// //texel.internalFormat = GL_COMPRESSED_SRGB;
|
|
||||||
// qDebug() << "TODO: GLTexelFormat.cpp:evalGLTexelFormat GL_COMPRESSED_SRGB";
|
|
||||||
// break;
|
|
||||||
default:
|
default:
|
||||||
qCWarning(gpugllogging) << "Unknown combination of texel format";
|
qCWarning(gpugllogging) << "Unknown combination of texel format";
|
||||||
}
|
}
|
||||||
|
@ -633,13 +595,11 @@ GLTexelFormat GLTexelFormat::evalGLTexelFormat(const Element& dstFormat, const E
|
||||||
break;
|
break;
|
||||||
case gpu::NUINT16:
|
case gpu::NUINT16:
|
||||||
texel.format = GL_RGBA;
|
texel.format = GL_RGBA;
|
||||||
//texel.internalFormat = GL_RGBA16;
|
//texel.internalFormat = GL_RGBA16_EXT;
|
||||||
qDebug() << "TODO: GLTexelFormat.cpp:evalGLTexelFormat GL_RGBA16";
|
|
||||||
break;
|
break;
|
||||||
case gpu::NINT16:
|
case gpu::NINT16:
|
||||||
texel.format = GL_RGBA;
|
texel.format = GL_RGBA;
|
||||||
qDebug() << "TODO: GLTexelFormat.cpp:evalGLTexelFormat GL_RGBA16_SNORM";
|
//texel.internalFormat = GL_RGBA16_SNORM_EXT;
|
||||||
//texel.internalFormat = GL_RGBA16_SNORM;
|
|
||||||
break;
|
break;
|
||||||
case gpu::HALF:
|
case gpu::HALF:
|
||||||
texel.format = GL_RGBA;
|
texel.format = GL_RGBA;
|
||||||
|
@ -661,30 +621,24 @@ GLTexelFormat GLTexelFormat::evalGLTexelFormat(const Element& dstFormat, const E
|
||||||
texel.format = GL_RGBA;
|
texel.format = GL_RGBA;
|
||||||
texel.internalFormat = GL_RGBA8_SNORM;
|
texel.internalFormat = GL_RGBA8_SNORM;
|
||||||
break;
|
break;
|
||||||
|
case gpu::NUINT2:
|
||||||
|
texel.format = GL_RGBA;
|
||||||
|
texel.internalFormat = GL_RGBA8;
|
||||||
|
break;
|
||||||
case gpu::NUINT32:
|
case gpu::NUINT32:
|
||||||
case gpu::NINT32:
|
case gpu::NINT32:
|
||||||
|
case gpu::NINT2_10_10_10:
|
||||||
case gpu::COMPRESSED:
|
case gpu::COMPRESSED:
|
||||||
case gpu::NUINT2:
|
|
||||||
case gpu::NUM_TYPES: // quiet compiler
|
case gpu::NUM_TYPES: // quiet compiler
|
||||||
Q_UNREACHABLE();
|
Q_UNREACHABLE();
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case gpu::SRGB:
|
case gpu::SRGB:
|
||||||
//texel.internalFormat = GL_SRGB8;
|
texel.internalFormat = GL_SRGB8;
|
||||||
texel.internalFormat = GL_RGB8; // standard 2.2 gamma correction color
|
|
||||||
break;
|
break;
|
||||||
case gpu::SRGBA:
|
case gpu::SRGBA:
|
||||||
texel.internalFormat = GL_SRGB8_ALPHA8; // standard 2.2 gamma correction color
|
texel.internalFormat = GL_SRGB8_ALPHA8; // standard 2.2 gamma correction color
|
||||||
break;
|
break;
|
||||||
//CLIMAX_MERGE_START
|
|
||||||
// case gpu::COMPRESSED_RGBA:
|
|
||||||
// //texel.internalFormat = GL_COMPRESSED_RGBA;
|
|
||||||
// qDebug() << "TODO: GLTexelFormat.cpp:evalGLTexelFormat GL_COMPRESSED_RGBA";
|
|
||||||
// break;
|
|
||||||
// case gpu::COMPRESSED_SRGBA:
|
|
||||||
// qDebug() << "TODO: GLTexelFormat.cpp:evalGLTexelFormat GL_COMPRESSED_SRGB_ALPHA";
|
|
||||||
// //texel.internalFormat = GL_COMPRESSED_SRGB_ALPHA;
|
|
||||||
// break;
|
|
||||||
default:
|
default:
|
||||||
qCWarning(gpugllogging) << "Unknown combination of texel format";
|
qCWarning(gpugllogging) << "Unknown combination of texel format";
|
||||||
}
|
}
|
||||||
|
@ -693,7 +647,6 @@ GLTexelFormat GLTexelFormat::evalGLTexelFormat(const Element& dstFormat, const E
|
||||||
default:
|
default:
|
||||||
qCDebug(gpugllogging) << "Unknown combination of texel format";
|
qCDebug(gpugllogging) << "Unknown combination of texel format";
|
||||||
}
|
}
|
||||||
//qDebug() << "GLTexelFormat::evalGLTexelFormat Texel.type " << texel.type << " - texel.format=" << texel.format << " texel.internalFormat=" << texel.internalFormat;
|
|
||||||
return texel;
|
return texel;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -28,10 +28,8 @@ const GLenum GLTexture::WRAP_MODES[Sampler::NUM_WRAP_MODES] = {
|
||||||
GL_REPEAT, // WRAP_REPEAT,
|
GL_REPEAT, // WRAP_REPEAT,
|
||||||
GL_MIRRORED_REPEAT, // WRAP_MIRROR,
|
GL_MIRRORED_REPEAT, // WRAP_MIRROR,
|
||||||
GL_CLAMP_TO_EDGE, // WRAP_CLAMP,
|
GL_CLAMP_TO_EDGE, // WRAP_CLAMP,
|
||||||
GL_CLAMP_TO_BORDER_EXT, // WRAP_BORDER,
|
GL_CLAMP_TO_BORDER, // WRAP_BORDER,
|
||||||
|
GL_MIRRORED_REPEAT //GL_MIRROR_CLAMP_TO_EDGE_EXT // WRAP_MIRROR_ONCE,
|
||||||
//GL_MIRROR_CLAMP_TO_EDGE_EXT // WRAP_MIRROR_ONCE,
|
|
||||||
// qDebug() << "TODO: GLTexture.cpp:WRAP_MODES GL_MIRROR_CLAMP_TO_EDGE_EXT";
|
|
||||||
};
|
};
|
||||||
|
|
||||||
const GLFilterMode GLTexture::FILTER_MODES[Sampler::NUM_FILTERS] = {
|
const GLFilterMode GLTexture::FILTER_MODES[Sampler::NUM_FILTERS] = {
|
||||||
|
|
Loading…
Reference in a new issue