splitting the code of GLBackend into separate .cpp, adding the PIpeline,adn the state to gpu

This commit is contained in:
Sam Gateau 2015-03-09 11:14:42 -07:00
parent 518d52b1c4
commit cc85f468d6
17 changed files with 803 additions and 464 deletions

View file

@ -25,7 +25,8 @@ Batch::Batch() :
_buffers(),
_textures(),
_streamFormats(),
_transforms()
_transforms(),
_pipelines()
{
}
@ -42,6 +43,7 @@ void Batch::clear() {
_textures.clear();
_streamFormats.clear();
_transforms.clear();
_pipelines.clear();
}
uint32 Batch::cacheResource(Resource* res) {
@ -159,6 +161,12 @@ void Batch::setProjectionTransform(const Mat4& proj) {
_params.push_back(cacheData(sizeof(Mat4), &proj));
}
void Batch::setPipeline(const PipelinePointer& pipeline) {
ADD_COMMAND(setPipeline);
_params.push_back(_pipelines.cache(pipeline));
}
void Batch::setUniformBuffer(uint32 slot, const BufferPointer& buffer, Offset offset, Offset size) {
ADD_COMMAND(setUniformBuffer);

View file

@ -21,6 +21,8 @@
#include "Stream.h"
#include "Texture.h"
#include "Pipeline.h"
#if defined(NSIGHT_FOUND)
#include "nvToolsExt.h"
class ProfileRange {
@ -96,7 +98,9 @@ public:
void setViewTransform(const Transform& view);
void setProjectionTransform(const Mat4& proj);
// Shader Stage
// Pipeline Stage
void setPipeline(const PipelinePointer& pipeline);
void setUniformBuffer(uint32 slot, const BufferPointer& buffer, Offset offset, Offset size);
void setUniformBuffer(uint32 slot, const BufferView& view); // not a command, just a shortcut from a BufferView
@ -164,6 +168,7 @@ public:
COMMAND_setViewTransform,
COMMAND_setProjectionTransform,
COMMAND_setPipeline,
COMMAND_setUniformBuffer,
COMMAND_setUniformTexture,
@ -281,6 +286,7 @@ public:
typedef Cache<TexturePointer>::Vector TextureCaches;
typedef Cache<Stream::FormatPointer>::Vector StreamFormatCaches;
typedef Cache<Transform>::Vector TransformCaches;
typedef Cache<PipelinePointer>::Vector PipelineCaches;
typedef unsigned char Byte;
typedef std::vector<Byte> Bytes;
@ -320,6 +326,7 @@ public:
TextureCaches _textures;
StreamFormatCaches _streamFormats;
TransformCaches _transforms;
PipelineCaches _pipelines;
protected:
};

View file

@ -16,6 +16,7 @@
#include "Resource.h"
#include "Texture.h"
#include "Shader.h"
#include "Pipeline.h"
namespace gpu {
@ -47,7 +48,6 @@ public:
template< typename T >
static void setGPUObject(const Buffer& buffer, T* bo) {
// buffer.setGPUObject(reinterpret_cast<GPUObject*>(bo));
buffer.setGPUObject(bo);
}
template< typename T >
@ -55,30 +55,32 @@ public:
return reinterpret_cast<T*>(buffer.getGPUObject());
}
//void syncGPUObject(const Buffer& buffer);
template< typename T >
static void setGPUObject(const Texture& texture, T* to) {
texture.setGPUObject(reinterpret_cast<GPUObject*>(to));
texture.setGPUObject(to);
}
template< typename T >
static T* getGPUObject(const Texture& texture) {
return reinterpret_cast<T*>(texture.getGPUObject());
}
//void syncGPUObject(const Texture& texture);
template< typename T >
static void setGPUObject(const Shader& shader, T* so) {
shader.setGPUObject(reinterpret_cast<GPUObject*>(so));
shader.setGPUObject(so);
}
template< typename T >
static T* getGPUObject(const Shader& shader) {
return reinterpret_cast<T*>(shader.getGPUObject());
}
// void syncGPUObject(const Shader& shader);
template< typename T >
static void setGPUObject(const Pipeline& pipeline, T* po) {
pipeline.setGPUObject(po);
}
template< typename T >
static T* getGPUObject(const Pipeline& pipeline) {
return reinterpret_cast<T*>(pipeline.getGPUObject());
}
protected:

View file

@ -25,6 +25,7 @@ GLBackend::CommandCall GLBackend::_commandCalls[Batch::NUM_COMMANDS] =
(&::gpu::GLBackend::do_setViewTransform),
(&::gpu::GLBackend::do_setProjectionTransform),
(&::gpu::GLBackend::do_setPipeline),
(&::gpu::GLBackend::do_setUniformBuffer),
(&::gpu::GLBackend::do_setUniformTexture),
@ -71,7 +72,8 @@ GLBackend::CommandCall GLBackend::_commandCalls[Batch::NUM_COMMANDS] =
GLBackend::GLBackend() :
_input(),
_transform()
_transform(),
_pipeline()
{
initTransform();
}
@ -134,6 +136,7 @@ void GLBackend::checkGLError() {
void GLBackend::do_draw(Batch& batch, uint32 paramOffset) {
updateInput();
updateTransform();
updatePipeline();
Primitive primitiveType = (Primitive)batch._params[paramOffset + 2]._uint;
GLenum mode = _primitiveToGLmode[primitiveType];
@ -147,6 +150,7 @@ void GLBackend::do_draw(Batch& batch, uint32 paramOffset) {
void GLBackend::do_drawIndexed(Batch& batch, uint32 paramOffset) {
updateInput();
updateTransform();
updatePipeline();
Primitive primitiveType = (Primitive)batch._params[paramOffset + 2]._uint;
GLenum mode = _primitiveToGLmode[primitiveType];
@ -167,389 +171,6 @@ void GLBackend::do_drawIndexedInstanced(Batch& batch, uint32 paramOffset) {
CHECK_GL_ERROR();
}
void GLBackend::do_setInputFormat(Batch& batch, uint32 paramOffset) {
Stream::FormatPointer format = batch._streamFormats.get(batch._params[paramOffset]._uint);
if (format != _input._format) {
_input._format = format;
_input._invalidFormat = true;
}
}
void GLBackend::do_setInputBuffer(Batch& batch, uint32 paramOffset) {
Offset stride = batch._params[paramOffset + 0]._uint;
Offset offset = batch._params[paramOffset + 1]._uint;
BufferPointer buffer = batch._buffers.get(batch._params[paramOffset + 2]._uint);
uint32 channel = batch._params[paramOffset + 3]._uint;
if (channel < getNumInputBuffers()) {
_input._buffers[channel] = buffer;
_input._bufferOffsets[channel] = offset;
_input._bufferStrides[channel] = stride;
_input._buffersState.set(channel);
}
}
#define SUPPORT_LEGACY_OPENGL
#if defined(SUPPORT_LEGACY_OPENGL)
static const int NUM_CLASSIC_ATTRIBS = Stream::TANGENT;
static const GLenum attributeSlotToClassicAttribName[NUM_CLASSIC_ATTRIBS] = {
GL_VERTEX_ARRAY,
GL_NORMAL_ARRAY,
GL_COLOR_ARRAY,
GL_TEXTURE_COORD_ARRAY
};
#endif
void GLBackend::updateInput() {
if (_input._invalidFormat || _input._buffersState.any()) {
if (_input._invalidFormat) {
InputStageState::ActivationCache newActivation;
// Check expected activation
if (_input._format) {
const Stream::Format::AttributeMap& attributes = _input._format->getAttributes();
for (Stream::Format::AttributeMap::const_iterator it = attributes.begin(); it != attributes.end(); it++) {
const Stream::Attribute& attrib = (*it).second;
newActivation.set(attrib._slot);
}
}
// Manage Activation what was and what is expected now
for (unsigned int i = 0; i < newActivation.size(); i++) {
bool newState = newActivation[i];
if (newState != _input._attributeActivation[i]) {
#if defined(SUPPORT_LEGACY_OPENGL)
if (i < NUM_CLASSIC_ATTRIBS) {
if (newState) {
glEnableClientState(attributeSlotToClassicAttribName[i]);
}
else {
glDisableClientState(attributeSlotToClassicAttribName[i]);
}
} else {
#else
{
#endif
if (newState) {
glEnableVertexAttribArray(i);
} else {
glDisableVertexAttribArray(i);
}
}
CHECK_GL_ERROR();
_input._attributeActivation.flip(i);
}
}
}
// now we need to bind the buffers and assign the attrib pointers
if (_input._format) {
const Buffers& buffers = _input._buffers;
const Offsets& offsets = _input._bufferOffsets;
const Offsets& strides = _input._bufferStrides;
const Stream::Format::AttributeMap& attributes = _input._format->getAttributes();
for (Stream::Format::ChannelMap::const_iterator channelIt = _input._format->getChannels().begin();
channelIt != _input._format->getChannels().end();
channelIt++) {
const Stream::Format::ChannelMap::value_type::second_type& channel = (*channelIt).second;
if ((*channelIt).first < buffers.size()) {
int bufferNum = (*channelIt).first;
if (_input._buffersState.test(bufferNum) || _input._invalidFormat) {
GLuint vbo = gpu::GLBackend::getBufferID((*buffers[bufferNum]));
glBindBuffer(GL_ARRAY_BUFFER, vbo);
CHECK_GL_ERROR();
_input._buffersState[bufferNum] = false;
for (unsigned int i = 0; i < channel._slots.size(); i++) {
const Stream::Attribute& attrib = attributes.at(channel._slots[i]);
GLuint slot = attrib._slot;
GLuint count = attrib._element.getDimensionCount();
GLenum type = _elementTypeToGLType[attrib._element.getType()];
GLuint stride = strides[bufferNum];
GLuint pointer = attrib._offset + offsets[bufferNum];
#if defined(SUPPORT_LEGACY_OPENGL)
if (slot < NUM_CLASSIC_ATTRIBS) {
switch (slot) {
case Stream::POSITION:
glVertexPointer(count, type, stride, reinterpret_cast<GLvoid*>(pointer));
break;
case Stream::NORMAL:
glNormalPointer(type, stride, reinterpret_cast<GLvoid*>(pointer));
break;
case Stream::COLOR:
glColorPointer(count, type, stride, reinterpret_cast<GLvoid*>(pointer));
break;
case Stream::TEXCOORD:
glTexCoordPointer(count, type, stride, reinterpret_cast<GLvoid*>(pointer));
break;
};
} else {
#else
{
#endif
GLboolean isNormalized = attrib._element.isNormalized();
glVertexAttribPointer(slot, count, type, isNormalized, stride,
reinterpret_cast<GLvoid*>(pointer));
}
CHECK_GL_ERROR();
}
}
}
}
}
// everything format related should be in sync now
_input._invalidFormat = false;
}
/* TODO: Fancy version GL4.4
if (_needInputFormatUpdate) {
InputActivationCache newActivation;
// Assign the vertex format required
if (_inputFormat) {
const StreamFormat::AttributeMap& attributes = _inputFormat->getAttributes();
for (StreamFormat::AttributeMap::const_iterator it = attributes.begin(); it != attributes.end(); it++) {
const StreamFormat::Attribute& attrib = (*it).second;
newActivation.set(attrib._slot);
glVertexAttribFormat(
attrib._slot,
attrib._element.getDimensionCount(),
_elementTypeToGLType[attrib._element.getType()],
attrib._element.isNormalized(),
attrib._stride);
}
CHECK_GL_ERROR();
}
// Manage Activation what was and what is expected now
for (int i = 0; i < newActivation.size(); i++) {
bool newState = newActivation[i];
if (newState != _inputAttributeActivation[i]) {
if (newState) {
glEnableVertexAttribArray(i);
} else {
glDisableVertexAttribArray(i);
}
_inputAttributeActivation.flip(i);
}
}
CHECK_GL_ERROR();
_needInputFormatUpdate = false;
}
if (_needInputStreamUpdate) {
if (_inputStream) {
const Stream::Buffers& buffers = _inputStream->getBuffers();
const Stream::Offsets& offsets = _inputStream->getOffsets();
const Stream::Strides& strides = _inputStream->getStrides();
for (int i = 0; i < buffers.size(); i++) {
GLuint vbo = gpu::GLBackend::getBufferID((*buffers[i]));
glBindVertexBuffer(i, vbo, offsets[i], strides[i]);
}
CHECK_GL_ERROR();
}
_needInputStreamUpdate = false;
}
*/
}
void GLBackend::do_setIndexBuffer(Batch& batch, uint32 paramOffset) {
_input._indexBufferType = (Type) batch._params[paramOffset + 2]._uint;
BufferPointer indexBuffer = batch._buffers.get(batch._params[paramOffset + 1]._uint);
_input._indexBufferOffset = batch._params[paramOffset + 0]._uint;
_input._indexBuffer = indexBuffer;
if (indexBuffer) {
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, getBufferID(*indexBuffer));
} else {
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
}
CHECK_GL_ERROR();
}
// Transform Stage
void GLBackend::do_setModelTransform(Batch& batch, uint32 paramOffset) {
_transform._model = batch._transforms.get(batch._params[paramOffset]._uint);
_transform._invalidModel = true;
}
void GLBackend::do_setViewTransform(Batch& batch, uint32 paramOffset) {
_transform._view = batch._transforms.get(batch._params[paramOffset]._uint);
_transform._invalidView = true;
}
void GLBackend::do_setProjectionTransform(Batch& batch, uint32 paramOffset) {
memcpy(&_transform._projection, batch.editData(batch._params[paramOffset]._uint), sizeof(Mat4));
_transform._invalidProj = true;
}
void GLBackend::initTransform() {
#if defined(Q_OS_WIN)
glGenBuffers(1, &_transform._transformObjectBuffer);
glGenBuffers(1, &_transform._transformCameraBuffer);
glBindBuffer(GL_UNIFORM_BUFFER, _transform._transformObjectBuffer);
glBufferData(GL_UNIFORM_BUFFER, sizeof(_transform._transformObject), (const void*) &_transform._transformObject, GL_DYNAMIC_DRAW);
glBindBuffer(GL_UNIFORM_BUFFER, _transform._transformCameraBuffer);
glBufferData(GL_UNIFORM_BUFFER, sizeof(_transform._transformCamera), (const void*) &_transform._transformCamera, GL_DYNAMIC_DRAW);
glBindBuffer(GL_UNIFORM_BUFFER, 0);
#else
#endif
}
void GLBackend::killTransform() {
#if defined(Q_OS_WIN)
glDeleteBuffers(1, &_transform._transformObjectBuffer);
glDeleteBuffers(1, &_transform._transformCameraBuffer);
#else
#endif
}
void GLBackend::updateTransform() {
// Check all the dirty flags and update the state accordingly
if (_transform._invalidProj) {
_transform._transformCamera._projection = _transform._projection;
}
if (_transform._invalidView) {
_transform._view.getInverseMatrix(_transform._transformCamera._view);
_transform._view.getMatrix(_transform._transformCamera._viewInverse);
}
if (_transform._invalidModel) {
_transform._model.getMatrix(_transform._transformObject._model);
_transform._model.getInverseMatrix(_transform._transformObject._modelInverse);
}
if (_transform._invalidView || _transform._invalidProj) {
Mat4 viewUntranslated = _transform._transformCamera._view;
viewUntranslated[3] = Vec4(0.0f, 0.0f, 0.0f, 1.0f);
_transform._transformCamera._projectionViewUntranslated = _transform._transformCamera._projection * viewUntranslated;
}
if (_transform._invalidView || _transform._invalidProj) {
#if defined(Q_OS_WIN)
glBindBufferBase(GL_UNIFORM_BUFFER, TRANSFORM_CAMERA_SLOT, 0);
glBindBuffer(GL_ARRAY_BUFFER, _transform._transformCameraBuffer);
glBufferData(GL_ARRAY_BUFFER, sizeof(_transform._transformCamera), (const void*) &_transform._transformCamera, GL_DYNAMIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, 0);
CHECK_GL_ERROR();
#endif
}
if (_transform._invalidModel) {
#if defined(Q_OS_WIN)
glBindBufferBase(GL_UNIFORM_BUFFER, TRANSFORM_OBJECT_SLOT, 0);
glBindBuffer(GL_ARRAY_BUFFER, _transform._transformObjectBuffer);
glBufferData(GL_ARRAY_BUFFER, sizeof(_transform._transformObject), (const void*) &_transform._transformObject, GL_DYNAMIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, 0);
CHECK_GL_ERROR();
#endif
}
#if defined(Q_OS_WIN)
glBindBufferBase(GL_UNIFORM_BUFFER, TRANSFORM_OBJECT_SLOT, _transform._transformObjectBuffer);
glBindBufferBase(GL_UNIFORM_BUFFER, TRANSFORM_CAMERA_SLOT, _transform._transformCameraBuffer);
CHECK_GL_ERROR();
#endif
#if defined(Q_OS_MAC) || defined(Q_OS_LINUX)
// Do it again for fixed pipeline until we can get rid of it
if (_transform._invalidProj) {
if (_transform._lastMode != GL_PROJECTION) {
glMatrixMode(GL_PROJECTION);
_transform._lastMode = GL_PROJECTION;
}
glLoadMatrixf(reinterpret_cast< const GLfloat* >(&_transform._projection));
CHECK_GL_ERROR();
}
if (_transform._invalidModel || _transform._invalidView) {
if (!_transform._model.isIdentity()) {
if (_transform._lastMode != GL_MODELVIEW) {
glMatrixMode(GL_MODELVIEW);
_transform._lastMode = GL_MODELVIEW;
}
Transform::Mat4 modelView;
if (!_transform._view.isIdentity()) {
Transform mvx;
Transform::inverseMult(mvx, _transform._view, _transform._model);
mvx.getMatrix(modelView);
} else {
_transform._model.getMatrix(modelView);
}
glLoadMatrixf(reinterpret_cast< const GLfloat* >(&modelView));
} else {
if (!_transform._view.isIdentity()) {
if (_transform._lastMode != GL_MODELVIEW) {
glMatrixMode(GL_MODELVIEW);
_transform._lastMode = GL_MODELVIEW;
}
Transform::Mat4 modelView;
_transform._view.getInverseMatrix(modelView);
glLoadMatrixf(reinterpret_cast< const GLfloat* >(&modelView));
} else {
// TODO: eventually do something about the matrix when neither view nor model is specified?
// glLoadIdentity();
}
}
CHECK_GL_ERROR();
}
#endif
// Flags are clean
_transform._invalidView = _transform._invalidProj = _transform._invalidModel = false;
}
void GLBackend::do_setUniformBuffer(Batch& batch, uint32 paramOffset) {
GLuint slot = batch._params[paramOffset + 3]._uint;
BufferPointer uniformBuffer = batch._buffers.get(batch._params[paramOffset + 2]._uint);
GLintptr rangeStart = batch._params[paramOffset + 1]._uint;
GLsizeiptr rangeSize = batch._params[paramOffset + 0]._uint;
#if defined(Q_OS_MAC)
GLfloat* data = (GLfloat*) (uniformBuffer->getData() + rangeStart);
glUniform4fv(slot, rangeSize / sizeof(GLfloat[4]), data);
// NOT working so we ll stick to the uniform float array until we move to core profile
// GLuint bo = getBufferID(*uniformBuffer);
//glUniformBufferEXT(_shader._program, slot, bo);
#elif defined(Q_OS_WIN)
GLuint bo = getBufferID(*uniformBuffer);
glBindBufferRange(GL_UNIFORM_BUFFER, slot, bo, rangeStart, rangeSize);
#else
GLfloat* data = (GLfloat*) (uniformBuffer->getData() + rangeStart);
glUniform4fv(slot, rangeSize / sizeof(GLfloat[4]), data);
#endif
CHECK_GL_ERROR();
}
void GLBackend::do_setUniformTexture(Batch& batch, uint32 paramOffset) {
GLuint slot = batch._params[paramOffset + 1]._uint;
TexturePointer uniformTexture = batch._textures.get(batch._params[paramOffset + 0]._uint);
GLuint to = getTextureID(uniformTexture);
glActiveTexture(GL_TEXTURE0 + slot);
glBindTexture(GL_TEXTURE_2D, to);
CHECK_GL_ERROR();
}
// TODO: As long as we have gl calls explicitely issued from interface
// code, we need to be able to record and batch these calls. THe long
// term strategy is to get rid of any GL calls in favor of the HIFI GPU API
@ -740,8 +361,10 @@ void Batch::_glUseProgram(GLuint program) {
}
void GLBackend::do_glUseProgram(Batch& batch, uint32 paramOffset) {
_shader._program = batch._params[paramOffset]._uint;
glUseProgram(_shader._program);
_pipeline._program = batch._params[paramOffset]._uint;
// for this call we still want to execute the glUseProgram in the order of the glCOmmand to avoid any issue
_pipeline._invalidProgram = false;
glUseProgram(_pipeline._program);
CHECK_GL_ERROR();
}
@ -998,55 +621,3 @@ void GLBackend::do_glColor4f(Batch& batch, uint32 paramOffset) {
CHECK_GL_ERROR();
}
GLBackend::GLBuffer::GLBuffer() :
_stamp(0),
_buffer(0),
_size(0)
{}
GLBackend::GLBuffer::~GLBuffer() {
if (_buffer != 0) {
glDeleteBuffers(1, &_buffer);
}
}
GLBackend::GLBuffer* GLBackend::syncGPUObject(const Buffer& buffer) {
GLBuffer* object = Backend::getGPUObject<GLBackend::GLBuffer>(buffer);
if (object && (object->_stamp == buffer.getSysmem().getStamp())) {
return object;
}
// need to have a gpu object?
if (!object) {
object = new GLBuffer();
glGenBuffers(1, &object->_buffer);
CHECK_GL_ERROR();
Backend::setGPUObject(buffer, object);
}
// Now let's update the content of the bo with the sysmem version
// TODO: in the future, be smarter about when to actually upload the glBO version based on the data that did change
//if () {
glBindBuffer(GL_ARRAY_BUFFER, object->_buffer);
glBufferData(GL_ARRAY_BUFFER, buffer.getSysmem().getSize(), buffer.getSysmem().readData(), GL_DYNAMIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, 0);
object->_stamp = buffer.getSysmem().getStamp();
object->_size = buffer.getSysmem().getSize();
//}
CHECK_GL_ERROR();
return object;
}
GLuint GLBackend::getBufferID(const Buffer& buffer) {
GLBuffer* bo = GLBackend::syncGPUObject(buffer);
if (bo) {
return bo->_buffer;
} else {
return 0;
}
}

View file

@ -75,6 +75,16 @@ public:
static GLShader* syncGPUObject(const Shader& shader);
static GLuint getShaderID(const ShaderPointer& shader);
class GLPipeline : public GPUObject {
public:
GLShader* _program;
GLPipeline();
~GLPipeline();
};
static GLPipeline* syncGPUObject(const Pipeline& pipeline);
static const int MAX_NUM_ATTRIBUTES = Stream::NUM_INPUT_SLOTS;
static const int MAX_NUM_INPUT_BUFFERS = 16;
@ -160,18 +170,24 @@ protected:
_lastMode(GL_TEXTURE) {}
} _transform;
// Shader Stage
// Pipeline Stage
void do_setPipeline(Batch& batch, uint32 paramOffset);
void do_setUniformBuffer(Batch& batch, uint32 paramOffset);
void do_setUniformTexture(Batch& batch, uint32 paramOffset);
void updateShader();
struct ShaderStageState {
void updatePipeline();
struct PipelineStageState {
PipelinePointer _pipeline;
GLuint _program;
bool _invalidProgram;
ShaderStageState() :
_program(0) {}
} _shader;
PipelineStageState() :
_pipeline(),
_program(0),
_invalidProgram(false)
{}
} _pipeline;
// TODO: As long as we have gl calls explicitely issued from interface

View file

@ -0,0 +1,66 @@
//
// GLBackendBuffer.cpp
// libraries/gpu/src/gpu
//
// Created by Sam Gateau on 3/8/2015.
// Copyright 2014 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include "GLBackendShared.h"
using namespace gpu;
GLBackend::GLBuffer::GLBuffer() :
_stamp(0),
_buffer(0),
_size(0)
{}
GLBackend::GLBuffer::~GLBuffer() {
if (_buffer != 0) {
glDeleteBuffers(1, &_buffer);
}
}
GLBackend::GLBuffer* GLBackend::syncGPUObject(const Buffer& buffer) {
GLBuffer* object = Backend::getGPUObject<GLBackend::GLBuffer>(buffer);
if (object && (object->_stamp == buffer.getSysmem().getStamp())) {
return object;
}
// need to have a gpu object?
if (!object) {
object = new GLBuffer();
glGenBuffers(1, &object->_buffer);
CHECK_GL_ERROR();
Backend::setGPUObject(buffer, object);
}
// Now let's update the content of the bo with the sysmem version
// TODO: in the future, be smarter about when to actually upload the glBO version based on the data that did change
//if () {
glBindBuffer(GL_ARRAY_BUFFER, object->_buffer);
glBufferData(GL_ARRAY_BUFFER, buffer.getSysmem().getSize(), buffer.getSysmem().readData(), GL_DYNAMIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, 0);
object->_stamp = buffer.getSysmem().getStamp();
object->_size = buffer.getSysmem().getSize();
//}
CHECK_GL_ERROR();
return object;
}
GLuint GLBackend::getBufferID(const Buffer& buffer) {
GLBuffer* bo = GLBackend::syncGPUObject(buffer);
if (bo) {
return bo->_buffer;
} else {
return 0;
}
}

View file

@ -0,0 +1,223 @@
//
// GLBackendInput.cpp
// libraries/gpu/src/gpu
//
// Created by Sam Gateau on 3/8/2015.
// Copyright 2014 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include "GLBackendShared.h"
using namespace gpu;
void GLBackend::do_setInputFormat(Batch& batch, uint32 paramOffset) {
Stream::FormatPointer format = batch._streamFormats.get(batch._params[paramOffset]._uint);
if (format != _input._format) {
_input._format = format;
_input._invalidFormat = true;
}
}
void GLBackend::do_setInputBuffer(Batch& batch, uint32 paramOffset) {
Offset stride = batch._params[paramOffset + 0]._uint;
Offset offset = batch._params[paramOffset + 1]._uint;
BufferPointer buffer = batch._buffers.get(batch._params[paramOffset + 2]._uint);
uint32 channel = batch._params[paramOffset + 3]._uint;
if (channel < getNumInputBuffers()) {
_input._buffers[channel] = buffer;
_input._bufferOffsets[channel] = offset;
_input._bufferStrides[channel] = stride;
_input._buffersState.set(channel);
}
}
#define SUPPORT_LEGACY_OPENGL
#if defined(SUPPORT_LEGACY_OPENGL)
static const int NUM_CLASSIC_ATTRIBS = Stream::TANGENT;
static const GLenum attributeSlotToClassicAttribName[NUM_CLASSIC_ATTRIBS] = {
GL_VERTEX_ARRAY,
GL_NORMAL_ARRAY,
GL_COLOR_ARRAY,
GL_TEXTURE_COORD_ARRAY
};
#endif
void GLBackend::updateInput() {
if (_input._invalidFormat || _input._buffersState.any()) {
if (_input._invalidFormat) {
InputStageState::ActivationCache newActivation;
// Check expected activation
if (_input._format) {
const Stream::Format::AttributeMap& attributes = _input._format->getAttributes();
for (Stream::Format::AttributeMap::const_iterator it = attributes.begin(); it != attributes.end(); it++) {
const Stream::Attribute& attrib = (*it).second;
newActivation.set(attrib._slot);
}
}
// Manage Activation what was and what is expected now
for (unsigned int i = 0; i < newActivation.size(); i++) {
bool newState = newActivation[i];
if (newState != _input._attributeActivation[i]) {
#if defined(SUPPORT_LEGACY_OPENGL)
if (i < NUM_CLASSIC_ATTRIBS) {
if (newState) {
glEnableClientState(attributeSlotToClassicAttribName[i]);
}
else {
glDisableClientState(attributeSlotToClassicAttribName[i]);
}
} else {
#else
{
#endif
if (newState) {
glEnableVertexAttribArray(i);
} else {
glDisableVertexAttribArray(i);
}
}
CHECK_GL_ERROR();
_input._attributeActivation.flip(i);
}
}
}
// now we need to bind the buffers and assign the attrib pointers
if (_input._format) {
const Buffers& buffers = _input._buffers;
const Offsets& offsets = _input._bufferOffsets;
const Offsets& strides = _input._bufferStrides;
const Stream::Format::AttributeMap& attributes = _input._format->getAttributes();
for (Stream::Format::ChannelMap::const_iterator channelIt = _input._format->getChannels().begin();
channelIt != _input._format->getChannels().end();
channelIt++) {
const Stream::Format::ChannelMap::value_type::second_type& channel = (*channelIt).second;
if ((*channelIt).first < buffers.size()) {
int bufferNum = (*channelIt).first;
if (_input._buffersState.test(bufferNum) || _input._invalidFormat) {
GLuint vbo = gpu::GLBackend::getBufferID((*buffers[bufferNum]));
glBindBuffer(GL_ARRAY_BUFFER, vbo);
CHECK_GL_ERROR();
_input._buffersState[bufferNum] = false;
for (unsigned int i = 0; i < channel._slots.size(); i++) {
const Stream::Attribute& attrib = attributes.at(channel._slots[i]);
GLuint slot = attrib._slot;
GLuint count = attrib._element.getDimensionCount();
GLenum type = _elementTypeToGLType[attrib._element.getType()];
GLuint stride = strides[bufferNum];
GLuint pointer = attrib._offset + offsets[bufferNum];
#if defined(SUPPORT_LEGACY_OPENGL)
if (slot < NUM_CLASSIC_ATTRIBS) {
switch (slot) {
case Stream::POSITION:
glVertexPointer(count, type, stride, reinterpret_cast<GLvoid*>(pointer));
break;
case Stream::NORMAL:
glNormalPointer(type, stride, reinterpret_cast<GLvoid*>(pointer));
break;
case Stream::COLOR:
glColorPointer(count, type, stride, reinterpret_cast<GLvoid*>(pointer));
break;
case Stream::TEXCOORD:
glTexCoordPointer(count, type, stride, reinterpret_cast<GLvoid*>(pointer));
break;
};
} else {
#else
{
#endif
GLboolean isNormalized = attrib._element.isNormalized();
glVertexAttribPointer(slot, count, type, isNormalized, stride,
reinterpret_cast<GLvoid*>(pointer));
}
CHECK_GL_ERROR();
}
}
}
}
}
// everything format related should be in sync now
_input._invalidFormat = false;
}
/* TODO: Fancy version GL4.4
if (_needInputFormatUpdate) {
InputActivationCache newActivation;
// Assign the vertex format required
if (_inputFormat) {
const StreamFormat::AttributeMap& attributes = _inputFormat->getAttributes();
for (StreamFormat::AttributeMap::const_iterator it = attributes.begin(); it != attributes.end(); it++) {
const StreamFormat::Attribute& attrib = (*it).second;
newActivation.set(attrib._slot);
glVertexAttribFormat(
attrib._slot,
attrib._element.getDimensionCount(),
_elementTypeToGLType[attrib._element.getType()],
attrib._element.isNormalized(),
attrib._stride);
}
CHECK_GL_ERROR();
}
// Manage Activation what was and what is expected now
for (int i = 0; i < newActivation.size(); i++) {
bool newState = newActivation[i];
if (newState != _inputAttributeActivation[i]) {
if (newState) {
glEnableVertexAttribArray(i);
} else {
glDisableVertexAttribArray(i);
}
_inputAttributeActivation.flip(i);
}
}
CHECK_GL_ERROR();
_needInputFormatUpdate = false;
}
if (_needInputStreamUpdate) {
if (_inputStream) {
const Stream::Buffers& buffers = _inputStream->getBuffers();
const Stream::Offsets& offsets = _inputStream->getOffsets();
const Stream::Strides& strides = _inputStream->getStrides();
for (int i = 0; i < buffers.size(); i++) {
GLuint vbo = gpu::GLBackend::getBufferID((*buffers[i]));
glBindVertexBuffer(i, vbo, offsets[i], strides[i]);
}
CHECK_GL_ERROR();
}
_needInputStreamUpdate = false;
}
*/
}
void GLBackend::do_setIndexBuffer(Batch& batch, uint32 paramOffset) {
_input._indexBufferType = (Type) batch._params[paramOffset + 2]._uint;
BufferPointer indexBuffer = batch._buffers.get(batch._params[paramOffset + 1]._uint);
_input._indexBufferOffset = batch._params[paramOffset + 0]._uint;
_input._indexBuffer = indexBuffer;
if (indexBuffer) {
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, getBufferID(*indexBuffer));
} else {
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
}
CHECK_GL_ERROR();
}

View file

@ -0,0 +1,95 @@
//
// GLBackendPipeline.cpp
// libraries/gpu/src/gpu
//
// Created by Sam Gateau on 3/8/2015.
// Copyright 2014 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include "GLBackendShared.h"
#include "Format.h"
using namespace gpu;
GLBackend::GLPipeline::GLPipeline() :
_program(nullptr)
{}
GLBackend::GLPipeline::~GLPipeline() {
_program = nullptr;
}
GLBackend::GLPipeline* GLBackend::syncGPUObject(const Pipeline& pipeline) {
GLPipeline* object = Backend::getGPUObject<GLBackend::GLPipeline>(pipeline);
// If GPU object already created then good
if (object) {
return object;
}
return nullptr;
}
void GLBackend::do_setPipeline(Batch& batch, uint32 paramOffset) {
PipelinePointer pipeline = batch._pipelines.get(batch._params[paramOffset + 0]._uint);
if (pipeline == _pipeline._pipeline) {
return;
}
auto pipelineObject = syncGPUObject((*pipeline));
if (!pipelineObject) {
return;
}
_pipeline._pipeline = pipeline;
_pipeline._program = pipelineObject->_program->_program;
_pipeline._invalidProgram = true;
}
void GLBackend::do_setUniformBuffer(Batch& batch, uint32 paramOffset) {
GLuint slot = batch._params[paramOffset + 3]._uint;
BufferPointer uniformBuffer = batch._buffers.get(batch._params[paramOffset + 2]._uint);
GLintptr rangeStart = batch._params[paramOffset + 1]._uint;
GLsizeiptr rangeSize = batch._params[paramOffset + 0]._uint;
#if defined(Q_OS_MAC)
GLfloat* data = (GLfloat*) (uniformBuffer->getData() + rangeStart);
glUniform4fv(slot, rangeSize / sizeof(GLfloat[4]), data);
// NOT working so we ll stick to the uniform float array until we move to core profile
// GLuint bo = getBufferID(*uniformBuffer);
//glUniformBufferEXT(_shader._program, slot, bo);
#elif defined(Q_OS_WIN)
GLuint bo = getBufferID(*uniformBuffer);
glBindBufferRange(GL_UNIFORM_BUFFER, slot, bo, rangeStart, rangeSize);
#else
GLfloat* data = (GLfloat*) (uniformBuffer->getData() + rangeStart);
glUniform4fv(slot, rangeSize / sizeof(GLfloat[4]), data);
#endif
CHECK_GL_ERROR();
}
void GLBackend::do_setUniformTexture(Batch& batch, uint32 paramOffset) {
GLuint slot = batch._params[paramOffset + 1]._uint;
TexturePointer uniformTexture = batch._textures.get(batch._params[paramOffset + 0]._uint);
GLuint to = getTextureID(uniformTexture);
glActiveTexture(GL_TEXTURE0 + slot);
glBindTexture(GL_TEXTURE_2D, to);
CHECK_GL_ERROR();
}
void GLBackend::updatePipeline() {
if (_pipeline._invalidProgram) {
glUseProgram(_pipeline._program);
CHECK_GL_ERROR();
_pipeline._invalidProgram = true;
}
}

View file

@ -672,3 +672,4 @@ bool GLBackend::makeProgram(Shader& shader, const Shader::BindingSet& slotBindin
return true;
}

View file

@ -0,0 +1,156 @@
//
// GLBackendTransform.cpp
// libraries/gpu/src/gpu
//
// Created by Sam Gateau on 3/8/2015.
// Copyright 2014 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include "GLBackendShared.h"
#include "Format.h"
using namespace gpu;
// Transform Stage
void GLBackend::do_setModelTransform(Batch& batch, uint32 paramOffset) {
_transform._model = batch._transforms.get(batch._params[paramOffset]._uint);
_transform._invalidModel = true;
}
void GLBackend::do_setViewTransform(Batch& batch, uint32 paramOffset) {
_transform._view = batch._transforms.get(batch._params[paramOffset]._uint);
_transform._invalidView = true;
}
void GLBackend::do_setProjectionTransform(Batch& batch, uint32 paramOffset) {
memcpy(&_transform._projection, batch.editData(batch._params[paramOffset]._uint), sizeof(Mat4));
_transform._invalidProj = true;
}
void GLBackend::initTransform() {
#if defined(Q_OS_WIN)
glGenBuffers(1, &_transform._transformObjectBuffer);
glGenBuffers(1, &_transform._transformCameraBuffer);
glBindBuffer(GL_UNIFORM_BUFFER, _transform._transformObjectBuffer);
glBufferData(GL_UNIFORM_BUFFER, sizeof(_transform._transformObject), (const void*) &_transform._transformObject, GL_DYNAMIC_DRAW);
glBindBuffer(GL_UNIFORM_BUFFER, _transform._transformCameraBuffer);
glBufferData(GL_UNIFORM_BUFFER, sizeof(_transform._transformCamera), (const void*) &_transform._transformCamera, GL_DYNAMIC_DRAW);
glBindBuffer(GL_UNIFORM_BUFFER, 0);
#else
#endif
}
void GLBackend::killTransform() {
#if defined(Q_OS_WIN)
glDeleteBuffers(1, &_transform._transformObjectBuffer);
glDeleteBuffers(1, &_transform._transformCameraBuffer);
#else
#endif
}
void GLBackend::updateTransform() {
// Check all the dirty flags and update the state accordingly
if (_transform._invalidProj) {
_transform._transformCamera._projection = _transform._projection;
}
if (_transform._invalidView) {
_transform._view.getInverseMatrix(_transform._transformCamera._view);
_transform._view.getMatrix(_transform._transformCamera._viewInverse);
}
if (_transform._invalidModel) {
_transform._model.getMatrix(_transform._transformObject._model);
_transform._model.getInverseMatrix(_transform._transformObject._modelInverse);
}
if (_transform._invalidView || _transform._invalidProj) {
Mat4 viewUntranslated = _transform._transformCamera._view;
viewUntranslated[3] = Vec4(0.0f, 0.0f, 0.0f, 1.0f);
_transform._transformCamera._projectionViewUntranslated = _transform._transformCamera._projection * viewUntranslated;
}
if (_transform._invalidView || _transform._invalidProj) {
#if defined(Q_OS_WIN)
glBindBufferBase(GL_UNIFORM_BUFFER, TRANSFORM_CAMERA_SLOT, 0);
glBindBuffer(GL_ARRAY_BUFFER, _transform._transformCameraBuffer);
glBufferData(GL_ARRAY_BUFFER, sizeof(_transform._transformCamera), (const void*) &_transform._transformCamera, GL_DYNAMIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, 0);
CHECK_GL_ERROR();
#endif
}
if (_transform._invalidModel) {
#if defined(Q_OS_WIN)
glBindBufferBase(GL_UNIFORM_BUFFER, TRANSFORM_OBJECT_SLOT, 0);
glBindBuffer(GL_ARRAY_BUFFER, _transform._transformObjectBuffer);
glBufferData(GL_ARRAY_BUFFER, sizeof(_transform._transformObject), (const void*) &_transform._transformObject, GL_DYNAMIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, 0);
CHECK_GL_ERROR();
#endif
}
#if defined(Q_OS_WIN)
glBindBufferBase(GL_UNIFORM_BUFFER, TRANSFORM_OBJECT_SLOT, _transform._transformObjectBuffer);
glBindBufferBase(GL_UNIFORM_BUFFER, TRANSFORM_CAMERA_SLOT, _transform._transformCameraBuffer);
CHECK_GL_ERROR();
#endif
#if defined(Q_OS_MAC) || defined(Q_OS_LINUX)
// Do it again for fixed pipeline until we can get rid of it
if (_transform._invalidProj) {
if (_transform._lastMode != GL_PROJECTION) {
glMatrixMode(GL_PROJECTION);
_transform._lastMode = GL_PROJECTION;
}
glLoadMatrixf(reinterpret_cast< const GLfloat* >(&_transform._projection));
CHECK_GL_ERROR();
}
if (_transform._invalidModel || _transform._invalidView) {
if (!_transform._model.isIdentity()) {
if (_transform._lastMode != GL_MODELVIEW) {
glMatrixMode(GL_MODELVIEW);
_transform._lastMode = GL_MODELVIEW;
}
Transform::Mat4 modelView;
if (!_transform._view.isIdentity()) {
Transform mvx;
Transform::inverseMult(mvx, _transform._view, _transform._model);
mvx.getMatrix(modelView);
} else {
_transform._model.getMatrix(modelView);
}
glLoadMatrixf(reinterpret_cast< const GLfloat* >(&modelView));
} else {
if (!_transform._view.isIdentity()) {
if (_transform._lastMode != GL_MODELVIEW) {
glMatrixMode(GL_MODELVIEW);
_transform._lastMode = GL_MODELVIEW;
}
Transform::Mat4 modelView;
_transform._view.getInverseMatrix(modelView);
glLoadMatrixf(reinterpret_cast< const GLfloat* >(&modelView));
} else {
// TODO: eventually do something about the matrix when neither view nor model is specified?
// glLoadIdentity();
}
}
CHECK_GL_ERROR();
}
#endif
// Flags are clean
_transform._invalidView = _transform._invalidProj = _transform._invalidModel = false;
}

View file

@ -0,0 +1,34 @@
//
// Pipeline.cpp
// libraries/gpu/src/gpu
//
// Created by Sam Gateau on 3/8/2015.
// Copyright 2014 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include "Pipeline.h"
#include <math.h>
#include <QDebug>
using namespace gpu;
Pipeline::Pipeline():
_program(),
_states()
{
}
Pipeline::~Pipeline()
{
}
Pipeline* Pipeline::create(const ShaderPointer& program, const States& states) {
Pipeline* pipeline = new Pipeline();
pipeline->_program = program;
pipeline->_states = states;
return pipeline;
}

View file

@ -0,0 +1,53 @@
//
// Pipeline.h
// libraries/gpu/src/gpu
//
// Created by Sam Gateau on 3/8/2015.
// Copyright 2014 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#ifndef hifi_gpu_Pipeline_h
#define hifi_gpu_Pipeline_h
#include "Resource.h"
#include <memory>
#include <set>
#include "Shader.h"
#include "State.h"
namespace gpu {
class Pipeline {
public:
static Pipeline* create(const ShaderPointer& program, const States& states);
~Pipeline();
const ShaderPointer& getProgram() const { return _program; }
const States& getStates() const { return _states; }
protected:
ShaderPointer _program;
States _states;
Pipeline();
Pipeline(const Pipeline& pipeline); // deep copy of the sysmem shader
Pipeline& operator=(const Pipeline& pipeline); // deep copy of the sysmem texture
// This shouldn't be used by anything else than the Backend class with the proper casting.
mutable GPUObject* _gpuObject = NULL;
void setGPUObject(GPUObject* gpuObject) const { _gpuObject = gpuObject; }
GPUObject* getGPUObject() const { return _gpuObject; }
friend class Backend;
};
typedef QSharedPointer< Pipeline > PipelinePointer;
typedef std::vector< PipelinePointer > Pipelines;
};
#endif

20
libraries/gpu/src/gpu/State.cpp Executable file
View file

@ -0,0 +1,20 @@
//
// State.cpp
// libraries/gpu/src/gpu
//
// Created by Sam Gateau on 3/8/2015.
// Copyright 2014 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include "State.h"
#include <QDebug>
using namespace gpu;
State::~State()
{
}

88
libraries/gpu/src/gpu/State.h Executable file
View file

@ -0,0 +1,88 @@
//
// Pipeline.h
// libraries/gpu/src/gpu
//
// Created by Sam Gateau on 3/8/2015.
// Copyright 2014 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#ifndef hifi_gpu_State_h
#define hifi_gpu_State_h
#include "Format.h"
#include <vector>
#include <QSharedPointer>
namespace gpu {
class GPUObject;
class State {
public:
State() {}
virtual ~State();
// Work in progress, not used
/*
enum Field {
FILL_MODE,
CULL_MODE,
DEPTH_BIAS,
DEPTH_BIAS_CLAMP,
DEPTH_BIASSLOPE_SCALE,
FRONT_CLOCKWISE,
DEPTH_CLIP_ENABLE,
SCISSR_ENABLE,
MULTISAMPLE_ENABLE,
ANTIALISED_LINE_ENABLE,
DEPTH_ENABLE,
DEPTH_WRITE_MASK,
DEPTH_FUNCTION,
STENCIL_ENABLE,
STENCIL_READ_MASK,
STENCIL_WRITE_MASK,
STENCIL_FUNCTION_FRONT,
STENCIL_FUNCTION_BACK,
STENCIL_REFERENCE,
BLEND_INDEPENDANT_ENABLE,
BLEND_ENABLE,
BLEND_SOURCE,
BLEND_DESTINATION,
BLEND_OPERATION,
BLEND_SOURCE_ALPHA,
BLEND_DESTINATION_ALPHA,
BLEND_OPERATION_ALPHA,
BLEND_WRITE_MASK,
BLEND_FACTOR,
SAMPLE_MASK,
ALPHA_TO_COVERAGE_ENABLE,
};
*/
protected:
State(const State& state);
State& operator=(const State& state);
// This shouldn't be used by anything else than the Backend class with the proper casting.
mutable GPUObject* _gpuObject = NULL;
void setGPUObject(GPUObject* gpuObject) const { _gpuObject = gpuObject; }
GPUObject* getGPUObject() const { return _gpuObject; }
friend class Backend;
};
typedef QSharedPointer< State > StatePointer;
typedef std::vector< StatePointer > States;
};
#endif

View file

@ -15,8 +15,6 @@
#include "SkyFromAtmosphere_vert.h"
#include "SkyFromAtmosphere_frag.h"
#include "gpu/Context.h"
#include "gpu/GLBackend.h"
using namespace model;
@ -156,12 +154,14 @@ SunSkyStage::SunSkyStage() :
// Begining of march
setYearTime(60.0f);
_skyShader = gpu::ShaderPointer(
auto skyShader = gpu::ShaderPointer(
gpu::Shader::createProgram(
gpu::ShaderPointer(gpu::Shader::createVertex(std::string(SkyFromAtmosphere_vert))),
gpu::ShaderPointer(gpu::Shader::createPixel(std::string(SkyFromAtmosphere_frag)))
)
);
_skyPipeline = gpu::PipelinePointer(gpu::Pipeline::create(skyShader, gpu::States()));
}
SunSkyStage::~SunSkyStage() {
@ -217,12 +217,11 @@ void SunSkyStage::updateGraphicsObject() const {
double originAlt = _earthSunModel.getAltitude();
_sunLight->setPosition(Vec3(0.0f, originAlt, 0.0f));
GLuint program = gpu::GLBackend::getShaderID(_skyShader);
static int firstTime = 0;
if (firstTime == 0) {
firstTime++;
gpu::Shader::makeProgram(*_skyShader);
bool result = gpu::Shader::makeProgram(*(_skyPipeline->getProgram()));
}
}

View file

@ -11,7 +11,7 @@
#ifndef hifi_model_Stage_h
#define hifi_model_Stage_h
#include "gpu/Shader.h"
#include "gpu/Pipeline.h"
#include "Light.h"
@ -145,7 +145,7 @@ public:
protected:
LightPointer _sunLight;
gpu::ShaderPointer _skyShader;
gpu::PipelinePointer _skyPipeline;
float _dayTime;
int _yearTime;

View file

@ -299,7 +299,7 @@ void Model::initJointTransforms() {
void Model::init() {
if (!_program.isLinked()) {
/*
/* //Work in progress not used yet
gpu::Shader::BindingSet slotBindings;
slotBindings.insert(gpu::Shader::Binding(std::string("materialBuffer"), 1));
slotBindings.insert(gpu::Shader::Binding(std::string("diffuseMap"), 0));