Updating GPU for 64 bit

This commit is contained in:
Brad Davis 2015-12-02 16:47:48 -08:00
parent 744da64c50
commit 7a05a664f4
14 changed files with 121 additions and 122 deletions

View file

@ -80,9 +80,9 @@ void Batch::clear() {
_framebuffers.clear();
}
uint32 Batch::cacheData(uint32 size, const void* data) {
uint32 offset = _data.size();
uint32 numBytes = size;
size_t Batch::cacheData(size_t size, const void* data) {
size_t offset = _data.size();
size_t numBytes = size;
_data.resize(offset + numBytes);
memcpy(_data.data() + offset, data, size);

View file

@ -334,7 +334,7 @@ public:
NUM_COMMANDS,
};
typedef std::vector<Command> Commands;
typedef std::vector<uint32> CommandOffsets;
typedef std::vector<size_t> CommandOffsets;
const Commands& getCommands() const { return _commands; }
const CommandOffsets& getCommandOffsets() const { return _commandOffsets; }
@ -342,11 +342,13 @@ public:
class Param {
public:
union {
size_t _size;
int32 _int;
uint32 _uint;
float _float;
char _chars[4];
float _float;
char _chars[sizeof(size_t)];
};
Param(size_t val) : _size(val) {}
Param(int32 val) : _int(val) {}
Param(uint32 val) : _uint(val) {}
Param(float val) : _float(val) {}
@ -370,8 +372,8 @@ public:
std::vector< Cache<T> > _items;
size_t size() const { return _items.size(); }
uint32 cache(const Data& data) {
uint32 offset = _items.size();
size_t cache(const Data& data) {
size_t offset = _items.size();
_items.push_back(Cache<T>(data));
return offset;
}
@ -403,8 +405,8 @@ public:
// FOr example Mat4s are going there
typedef unsigned char Byte;
typedef std::vector<Byte> Bytes;
uint32 cacheData(uint32 size, const void* data);
Byte* editData(uint32 offset) {
size_t cacheData(size_t size, const void* data);
Byte* editData(size_t offset) {
if (offset >= _data.size()) {
return 0;
}

View file

@ -33,7 +33,7 @@ typedef char int8;
typedef unsigned char Byte;
typedef uint32 Offset;
typedef size_t Offset;
typedef glm::mat4 Mat4;
typedef glm::mat3 Mat3;

View file

@ -304,7 +304,7 @@ void GLBackend::syncCache() {
glEnable(GL_LINE_SMOOTH);
}
void GLBackend::do_draw(Batch& batch, uint32 paramOffset) {
void GLBackend::do_draw(Batch& batch, size_t paramOffset) {
updateInput();
updateTransform();
updatePipeline();
@ -317,7 +317,7 @@ void GLBackend::do_draw(Batch& batch, uint32 paramOffset) {
(void) CHECK_GL_ERROR();
}
void GLBackend::do_drawIndexed(Batch& batch, uint32 paramOffset) {
void GLBackend::do_drawIndexed(Batch& batch, size_t paramOffset) {
updateInput();
updateTransform();
updatePipeline();
@ -336,7 +336,7 @@ void GLBackend::do_drawIndexed(Batch& batch, uint32 paramOffset) {
(void) CHECK_GL_ERROR();
}
void GLBackend::do_drawInstanced(Batch& batch, uint32 paramOffset) {
void GLBackend::do_drawInstanced(Batch& batch, size_t paramOffset) {
updateInput();
updateTransform();
updatePipeline();
@ -351,7 +351,7 @@ void GLBackend::do_drawInstanced(Batch& batch, uint32 paramOffset) {
(void) CHECK_GL_ERROR();
}
void GLBackend::do_drawIndexedInstanced(Batch& batch, uint32 paramOffset) {
void GLBackend::do_drawIndexedInstanced(Batch& batch, size_t paramOffset) {
updateInput();
updateTransform();
updatePipeline();
@ -378,7 +378,7 @@ void GLBackend::do_drawIndexedInstanced(Batch& batch, uint32 paramOffset) {
}
void GLBackend::do_multiDrawIndirect(Batch& batch, uint32 paramOffset) {
void GLBackend::do_multiDrawIndirect(Batch& batch, size_t paramOffset) {
#if (GPU_INPUT_PROFILE == GPU_CORE_43)
updateInput();
updateTransform();
@ -387,7 +387,7 @@ void GLBackend::do_multiDrawIndirect(Batch& batch, uint32 paramOffset) {
uint commandCount = batch._params[paramOffset + 0]._uint;
GLenum mode = _primitiveToGLmode[(Primitive)batch._params[paramOffset + 1]._uint];
glMultiDrawArraysIndirect(mode, reinterpret_cast<GLvoid*>(_input._indirectBufferOffset), commandCount, _input._indirectBufferStride);
glMultiDrawArraysIndirect(mode, reinterpret_cast<GLvoid*>(_input._indirectBufferOffset), commandCount, (GLsizei)_input._indirectBufferStride);
#else
// FIXME implement the slow path
#endif
@ -395,7 +395,7 @@ void GLBackend::do_multiDrawIndirect(Batch& batch, uint32 paramOffset) {
}
void GLBackend::do_multiDrawIndexedIndirect(Batch& batch, uint32 paramOffset) {
void GLBackend::do_multiDrawIndexedIndirect(Batch& batch, size_t paramOffset) {
#if (GPU_INPUT_PROFILE == GPU_CORE_43)
updateInput();
updateTransform();
@ -405,7 +405,7 @@ void GLBackend::do_multiDrawIndexedIndirect(Batch& batch, uint32 paramOffset) {
GLenum mode = _primitiveToGLmode[(Primitive)batch._params[paramOffset + 1]._uint];
GLenum indexType = _elementTypeToGLType[_input._indexBufferType];
glMultiDrawElementsIndirect(mode, indexType, reinterpret_cast<GLvoid*>(_input._indirectBufferOffset), commandCount, _input._indirectBufferStride);
glMultiDrawElementsIndirect(mode, indexType, reinterpret_cast<GLvoid*>(_input._indirectBufferOffset), commandCount, (GLsizei)_input._indirectBufferStride);
#else
// FIXME implement the slow path
#endif
@ -413,11 +413,11 @@ void GLBackend::do_multiDrawIndexedIndirect(Batch& batch, uint32 paramOffset) {
}
void GLBackend::do_resetStages(Batch& batch, uint32 paramOffset) {
void GLBackend::do_resetStages(Batch& batch, size_t paramOffset) {
resetStages();
}
void GLBackend::do_runLambda(Batch& batch, uint32 paramOffset) {
void GLBackend::do_runLambda(Batch& batch, size_t paramOffset) {
std::function<void()> f = batch._lambdas.get(batch._params[paramOffset]._uint);
f();
}
@ -455,7 +455,7 @@ void Batch::_glActiveBindTexture(GLenum unit, GLenum target, GLuint texture) {
DO_IT_NOW(_glActiveBindTexture, 3);
}
void GLBackend::do_glActiveBindTexture(Batch& batch, uint32 paramOffset) {
void GLBackend::do_glActiveBindTexture(Batch& batch, size_t paramOffset) {
glActiveTexture(batch._params[paramOffset + 2]._uint);
glBindTexture(
batch._params[paramOffset + 1]._uint,
@ -474,7 +474,7 @@ void Batch::_glUniform1i(GLint location, GLint v0) {
DO_IT_NOW(_glUniform1i, 1);
}
void GLBackend::do_glUniform1i(Batch& batch, uint32 paramOffset) {
void GLBackend::do_glUniform1i(Batch& batch, size_t paramOffset) {
if (_pipeline._program == 0) {
// We should call updatePipeline() to bind the program but we are not doing that
// because these uniform setters are deprecated and we don;t want to create side effect
@ -497,7 +497,7 @@ void Batch::_glUniform1f(GLint location, GLfloat v0) {
DO_IT_NOW(_glUniform1f, 1);
}
void GLBackend::do_glUniform1f(Batch& batch, uint32 paramOffset) {
void GLBackend::do_glUniform1f(Batch& batch, size_t paramOffset) {
if (_pipeline._program == 0) {
// We should call updatePipeline() to bind the program but we are not doing that
// because these uniform setters are deprecated and we don;t want to create side effect
@ -521,7 +521,7 @@ void Batch::_glUniform2f(GLint location, GLfloat v0, GLfloat v1) {
DO_IT_NOW(_glUniform2f, 1);
}
void GLBackend::do_glUniform2f(Batch& batch, uint32 paramOffset) {
void GLBackend::do_glUniform2f(Batch& batch, size_t paramOffset) {
if (_pipeline._program == 0) {
// We should call updatePipeline() to bind the program but we are not doing that
// because these uniform setters are deprecated and we don;t want to create side effect
@ -546,7 +546,7 @@ void Batch::_glUniform3f(GLint location, GLfloat v0, GLfloat v1, GLfloat v2) {
DO_IT_NOW(_glUniform3f, 1);
}
void GLBackend::do_glUniform3f(Batch& batch, uint32 paramOffset) {
void GLBackend::do_glUniform3f(Batch& batch, size_t paramOffset) {
if (_pipeline._program == 0) {
// We should call updatePipeline() to bind the program but we are not doing that
// because these uniform setters are deprecated and we don;t want to create side effect
@ -575,7 +575,7 @@ void Batch::_glUniform4f(GLint location, GLfloat v0, GLfloat v1, GLfloat v2, GLf
}
void GLBackend::do_glUniform4f(Batch& batch, uint32 paramOffset) {
void GLBackend::do_glUniform4f(Batch& batch, size_t paramOffset) {
if (_pipeline._program == 0) {
// We should call updatePipeline() to bind the program but we are not doing that
// because these uniform setters are deprecated and we don;t want to create side effect
@ -601,7 +601,7 @@ void Batch::_glUniform3fv(GLint location, GLsizei count, const GLfloat* value) {
DO_IT_NOW(_glUniform3fv, 3);
}
void GLBackend::do_glUniform3fv(Batch& batch, uint32 paramOffset) {
void GLBackend::do_glUniform3fv(Batch& batch, size_t paramOffset) {
if (_pipeline._program == 0) {
// We should call updatePipeline() to bind the program but we are not doing that
// because these uniform setters are deprecated and we don;t want to create side effect
@ -627,7 +627,7 @@ void Batch::_glUniform4fv(GLint location, GLsizei count, const GLfloat* value) {
DO_IT_NOW(_glUniform4fv, 3);
}
void GLBackend::do_glUniform4fv(Batch& batch, uint32 paramOffset) {
void GLBackend::do_glUniform4fv(Batch& batch, size_t paramOffset) {
if (_pipeline._program == 0) {
// We should call updatePipeline() to bind the program but we are not doing that
// because these uniform setters are deprecated and we don;t want to create side effect
@ -653,7 +653,7 @@ void Batch::_glUniform4iv(GLint location, GLsizei count, const GLint* value) {
DO_IT_NOW(_glUniform4iv, 3);
}
void GLBackend::do_glUniform4iv(Batch& batch, uint32 paramOffset) {
void GLBackend::do_glUniform4iv(Batch& batch, size_t paramOffset) {
if (_pipeline._program == 0) {
// We should call updatePipeline() to bind the program but we are not doing that
// because these uniform setters are deprecated and we don;t want to create side effect
@ -679,7 +679,7 @@ void Batch::_glUniformMatrix4fv(GLint location, GLsizei count, GLboolean transpo
DO_IT_NOW(_glUniformMatrix4fv, 4);
}
void GLBackend::do_glUniformMatrix4fv(Batch& batch, uint32 paramOffset) {
void GLBackend::do_glUniformMatrix4fv(Batch& batch, size_t paramOffset) {
if (_pipeline._program == 0) {
// We should call updatePipeline() to bind the program but we are not doing that
// because these uniform setters are deprecated and we don;t want to create side effect
@ -704,7 +704,7 @@ void Batch::_glColor4f(GLfloat red, GLfloat green, GLfloat blue, GLfloat alpha)
DO_IT_NOW(_glColor4f, 4);
}
void GLBackend::do_glColor4f(Batch& batch, uint32 paramOffset) {
void GLBackend::do_glColor4f(Batch& batch, size_t paramOffset) {
glm::vec4 newColor(
batch._params[paramOffset + 3]._float,
@ -720,14 +720,14 @@ void GLBackend::do_glColor4f(Batch& batch, uint32 paramOffset) {
}
void GLBackend::do_pushProfileRange(Batch& batch, uint32 paramOffset) {
void GLBackend::do_pushProfileRange(Batch& batch, size_t paramOffset) {
#if defined(NSIGHT_FOUND)
auto name = batch._profileRanges.get(batch._params[paramOffset]._uint);
nvtxRangePush(name.c_str());
#endif
}
void GLBackend::do_popProfileRange(Batch& batch, uint32 paramOffset) {
void GLBackend::do_popProfileRange(Batch& batch, size_t paramOffset) {
#if defined(NSIGHT_FOUND)
nvtxRangePop();
#endif

View file

@ -195,17 +195,17 @@ public:
static const int MAX_NUM_ATTRIBUTES = Stream::NUM_INPUT_SLOTS;
static const int MAX_NUM_INPUT_BUFFERS = 16;
uint32 getNumInputBuffers() const { return _input._invalidBuffers.size(); }
size_t getNumInputBuffers() const { return _input._invalidBuffers.size(); }
// this is the maximum per shader stage on the low end apple
// TODO make it platform dependant at init time
static const int MAX_NUM_UNIFORM_BUFFERS = 12;
uint32 getMaxNumUniformBuffers() const { return MAX_NUM_UNIFORM_BUFFERS; }
size_t getMaxNumUniformBuffers() const { return MAX_NUM_UNIFORM_BUFFERS; }
// this is the maximum per shader stage on the low end apple
// TODO make it platform dependant at init time
static const int MAX_NUM_RESOURCE_TEXTURES = 16;
uint32 getMaxNumResourceTextures() const { return MAX_NUM_RESOURCE_TEXTURES; }
size_t getMaxNumResourceTextures() const { return MAX_NUM_RESOURCE_TEXTURES; }
// The State setters called by the GLState::Commands when a new state is assigned
void do_setStateFillMode(int32 mode);
@ -248,18 +248,18 @@ protected:
Stats _stats;
// Draw Stage
void do_draw(Batch& batch, uint32 paramOffset);
void do_drawIndexed(Batch& batch, uint32 paramOffset);
void do_drawInstanced(Batch& batch, uint32 paramOffset);
void do_drawIndexedInstanced(Batch& batch, uint32 paramOffset);
void do_multiDrawIndirect(Batch& batch, uint32 paramOffset);
void do_multiDrawIndexedIndirect(Batch& batch, uint32 paramOffset);
void do_draw(Batch& batch, size_t paramOffset);
void do_drawIndexed(Batch& batch, size_t paramOffset);
void do_drawInstanced(Batch& batch, size_t paramOffset);
void do_drawIndexedInstanced(Batch& batch, size_t paramOffset);
void do_multiDrawIndirect(Batch& batch, size_t paramOffset);
void do_multiDrawIndexedIndirect(Batch& batch, size_t paramOffset);
// Input Stage
void do_setInputFormat(Batch& batch, uint32 paramOffset);
void do_setInputBuffer(Batch& batch, uint32 paramOffset);
void do_setIndexBuffer(Batch& batch, uint32 paramOffset);
void do_setIndirectBuffer(Batch& batch, uint32 paramOffset);
void do_setInputFormat(Batch& batch, size_t paramOffset);
void do_setInputBuffer(Batch& batch, size_t paramOffset);
void do_setIndexBuffer(Batch& batch, size_t paramOffset);
void do_setIndirectBuffer(Batch& batch, size_t paramOffset);
void initInput();
void killInput();
@ -310,11 +310,11 @@ protected:
} _input;
// Transform Stage
void do_setModelTransform(Batch& batch, uint32 paramOffset);
void do_setViewTransform(Batch& batch, uint32 paramOffset);
void do_setProjectionTransform(Batch& batch, uint32 paramOffset);
void do_setViewportTransform(Batch& batch, uint32 paramOffset);
void do_setDepthRangeTransform(Batch& batch, uint32 paramOffset);
void do_setModelTransform(Batch& batch, size_t paramOffset);
void do_setViewTransform(Batch& batch, size_t paramOffset);
void do_setProjectionTransform(Batch& batch, size_t paramOffset);
void do_setViewportTransform(Batch& batch, size_t paramOffset);
void do_setDepthRangeTransform(Batch& batch, size_t paramOffset);
void initTransform();
void killTransform();
@ -362,7 +362,7 @@ protected:
// Uniform Stage
void do_setUniformBuffer(Batch& batch, uint32 paramOffset);
void do_setUniformBuffer(Batch& batch, size_t paramOffset);
void releaseUniformBuffer(uint32_t slot);
void resetUniformStage();
@ -375,7 +375,7 @@ protected:
} _uniform;
// Resource Stage
void do_setResourceTexture(Batch& batch, uint32 paramOffset);
void do_setResourceTexture(Batch& batch, size_t paramOffset);
void releaseResourceTexture(uint32_t slot);
void resetResourceStage();
@ -390,9 +390,9 @@ protected:
size_t _commandIndex{ 0 };
// Pipeline Stage
void do_setPipeline(Batch& batch, uint32 paramOffset);
void do_setStateBlendFactor(Batch& batch, uint32 paramOffset);
void do_setStateScissorRect(Batch& batch, uint32 paramOffset);
void do_setPipeline(Batch& batch, size_t paramOffset);
void do_setStateBlendFactor(Batch& batch, size_t paramOffset);
void do_setStateScissorRect(Batch& batch, size_t paramOffset);
// Standard update pipeline check that the current Program and current State or good to go for a
void updatePipeline();
@ -429,9 +429,9 @@ protected:
} _pipeline;
// Output stage
void do_setFramebuffer(Batch& batch, uint32 paramOffset);
void do_clearFramebuffer(Batch& batch, uint32 paramOffset);
void do_blit(Batch& batch, uint32 paramOffset);
void do_setFramebuffer(Batch& batch, size_t paramOffset);
void do_clearFramebuffer(Batch& batch, size_t paramOffset);
void do_blit(Batch& batch, size_t paramOffset);
// Synchronize the state cache of this Backend with the actual real state of the GL Context
void syncOutputStateCache();
@ -446,9 +446,9 @@ protected:
} _output;
// Query section
void do_beginQuery(Batch& batch, uint32 paramOffset);
void do_endQuery(Batch& batch, uint32 paramOffset);
void do_getQuery(Batch& batch, uint32 paramOffset);
void do_beginQuery(Batch& batch, size_t paramOffset);
void do_endQuery(Batch& batch, size_t paramOffset);
void do_getQuery(Batch& batch, size_t paramOffset);
void resetQueryStage();
struct QueryStageState {
@ -456,33 +456,33 @@ protected:
};
// Reset stages
void do_resetStages(Batch& batch, uint32 paramOffset);
void do_resetStages(Batch& batch, size_t paramOffset);
void do_runLambda(Batch& batch, uint32 paramOffset);
void do_runLambda(Batch& batch, size_t paramOffset);
void resetStages();
// TODO: As long as we have gl calls explicitely issued from interface
// code, we need to be able to record and batch these calls. THe long
// term strategy is to get rid of any GL calls in favor of the HIFI GPU API
void do_glActiveBindTexture(Batch& batch, uint32 paramOffset);
void do_glActiveBindTexture(Batch& batch, size_t paramOffset);
void do_glUniform1i(Batch& batch, uint32 paramOffset);
void do_glUniform1f(Batch& batch, uint32 paramOffset);
void do_glUniform2f(Batch& batch, uint32 paramOffset);
void do_glUniform3f(Batch& batch, uint32 paramOffset);
void do_glUniform4f(Batch& batch, uint32 paramOffset);
void do_glUniform3fv(Batch& batch, uint32 paramOffset);
void do_glUniform4fv(Batch& batch, uint32 paramOffset);
void do_glUniform4iv(Batch& batch, uint32 paramOffset);
void do_glUniformMatrix4fv(Batch& batch, uint32 paramOffset);
void do_glUniform1i(Batch& batch, size_t paramOffset);
void do_glUniform1f(Batch& batch, size_t paramOffset);
void do_glUniform2f(Batch& batch, size_t paramOffset);
void do_glUniform3f(Batch& batch, size_t paramOffset);
void do_glUniform4f(Batch& batch, size_t paramOffset);
void do_glUniform3fv(Batch& batch, size_t paramOffset);
void do_glUniform4fv(Batch& batch, size_t paramOffset);
void do_glUniform4iv(Batch& batch, size_t paramOffset);
void do_glUniformMatrix4fv(Batch& batch, size_t paramOffset);
void do_glColor4f(Batch& batch, uint32 paramOffset);
void do_glColor4f(Batch& batch, size_t paramOffset);
void do_pushProfileRange(Batch& batch, uint32 paramOffset);
void do_popProfileRange(Batch& batch, uint32 paramOffset);
void do_pushProfileRange(Batch& batch, size_t paramOffset);
void do_popProfileRange(Batch& batch, size_t paramOffset);
typedef void (GLBackend::*CommandCall)(Batch&, uint32);
typedef void (GLBackend::*CommandCall)(Batch&, size_t);
static CommandCall _commandCalls[Batch::NUM_COMMANDS];
};

View file

@ -46,7 +46,7 @@ GLBackend::GLBuffer* GLBackend::syncGPUObject(const Buffer& buffer) {
glBufferData(GL_ARRAY_BUFFER, buffer.getSysmem().getSize(), buffer.getSysmem().readData(), GL_DYNAMIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, 0);
object->_stamp = buffer.getSysmem().getStamp();
object->_size = buffer.getSysmem().getSize();
object->_size = (GLuint)buffer.getSysmem().getSize();
//}
(void) CHECK_GL_ERROR();

View file

@ -12,7 +12,7 @@
using namespace gpu;
void GLBackend::do_setInputFormat(Batch& batch, uint32 paramOffset) {
void GLBackend::do_setInputFormat(Batch& batch, size_t paramOffset) {
Stream::FormatPointer format = batch._streamFormats.get(batch._params[paramOffset]._uint);
if (format != _input._format) {
@ -21,7 +21,7 @@ void GLBackend::do_setInputFormat(Batch& batch, uint32 paramOffset) {
}
}
void GLBackend::do_setInputBuffer(Batch& batch, uint32 paramOffset) {
void GLBackend::do_setInputBuffer(Batch& batch, size_t paramOffset) {
Offset stride = batch._params[paramOffset + 0]._uint;
Offset offset = batch._params[paramOffset + 1]._uint;
BufferPointer buffer = batch._buffers.get(batch._params[paramOffset + 2]._uint);
@ -232,14 +232,14 @@ void GLBackend::updateInput() {
GLenum type = _elementTypeToGLType[attrib._element.getType()];
// GLenum perLocationStride = strides[bufferNum];
GLenum perLocationStride = attrib._element.getLocationSize();
GLuint stride = strides[bufferNum];
GLuint pointer = attrib._offset + offsets[bufferNum];
GLuint stride = (GLuint)strides[bufferNum];
GLuint pointer = (GLuint)(attrib._offset + offsets[bufferNum]);
GLboolean isNormalized = attrib._element.isNormalized();
for (size_t locNum = 0; locNum < locationCount; ++locNum) {
glVertexAttribPointer(slot + locNum, count, type, isNormalized, stride,
reinterpret_cast<GLvoid*>(pointer + perLocationStride * locNum));
glVertexAttribDivisor(slot + locNum, attrib._frequency);
glVertexAttribPointer(slot + (GLuint)locNum, count, type, isNormalized, stride,
reinterpret_cast<GLvoid*>(pointer + perLocationStride * (GLuint)locNum));
glVertexAttribDivisor(slot + (GLuint)locNum, attrib._frequency);
}
// TODO: Support properly the IAttrib version
@ -287,7 +287,7 @@ void GLBackend::resetInputStage() {
}
void GLBackend::do_setIndexBuffer(Batch& batch, uint32 paramOffset) {
void GLBackend::do_setIndexBuffer(Batch& batch, size_t paramOffset) {
_input._indexBufferType = (Type)batch._params[paramOffset + 2]._uint;
_input._indexBufferOffset = batch._params[paramOffset + 0]._uint;
@ -304,7 +304,7 @@ void GLBackend::do_setIndexBuffer(Batch& batch, uint32 paramOffset) {
(void) CHECK_GL_ERROR();
}
void GLBackend::do_setIndirectBuffer(Batch& batch, uint32 paramOffset) {
void GLBackend::do_setIndirectBuffer(Batch& batch, size_t paramOffset) {
_input._indirectBufferOffset = batch._params[paramOffset + 1]._uint;
_input._indirectBufferStride = batch._params[paramOffset + 2]._uint;

View file

@ -196,7 +196,7 @@ void GLBackend::resetOutputStage() {
}
}
void GLBackend::do_setFramebuffer(Batch& batch, uint32 paramOffset) {
void GLBackend::do_setFramebuffer(Batch& batch, size_t paramOffset) {
auto framebuffer = batch._framebuffers.get(batch._params[paramOffset]._uint);
if (_output._framebuffer != framebuffer) {
auto newFBO = getFramebufferID(framebuffer);
@ -208,7 +208,7 @@ void GLBackend::do_setFramebuffer(Batch& batch, uint32 paramOffset) {
}
}
void GLBackend::do_clearFramebuffer(Batch& batch, uint32 paramOffset) {
void GLBackend::do_clearFramebuffer(Batch& batch, size_t paramOffset) {
if (_stereo._enable && !_pipeline._stateCache.scissorEnable) {
qWarning("Clear without scissor in stereo mode");
}
@ -298,7 +298,7 @@ void GLBackend::do_clearFramebuffer(Batch& batch, uint32 paramOffset) {
(void) CHECK_GL_ERROR();
}
void GLBackend::do_blit(Batch& batch, uint32 paramOffset) {
void GLBackend::do_blit(Batch& batch, size_t paramOffset) {
auto srcframebuffer = batch._framebuffers.get(batch._params[paramOffset]._uint);
Vec4i srcvp;
for (size_t i = 0; i < 4; ++i) {

View file

@ -57,7 +57,7 @@ GLBackend::GLPipeline* GLBackend::syncGPUObject(const Pipeline& pipeline) {
return object;
}
void GLBackend::do_setPipeline(Batch& batch, uint32 paramOffset) {
void GLBackend::do_setPipeline(Batch& batch, size_t paramOffset) {
PipelinePointer pipeline = batch._pipelines.get(batch._params[paramOffset + 0]._uint);
if (_pipeline._pipeline == pipeline) {
@ -168,7 +168,7 @@ void GLBackend::resetUniformStage() {
}
}
void GLBackend::do_setUniformBuffer(Batch& batch, uint32 paramOffset) {
void GLBackend::do_setUniformBuffer(Batch& batch, size_t paramOffset) {
GLuint slot = batch._params[paramOffset + 3]._uint;
BufferPointer uniformBuffer = batch._buffers.get(batch._params[paramOffset + 2]._uint);
GLintptr rangeStart = batch._params[paramOffset + 1]._uint;
@ -237,7 +237,7 @@ void GLBackend::resetResourceStage() {
}
}
void GLBackend::do_setResourceTexture(Batch& batch, uint32 paramOffset) {
void GLBackend::do_setResourceTexture(Batch& batch, size_t paramOffset) {
GLuint slot = batch._params[paramOffset + 1]._uint;
TexturePointer resourceTexture = batch._textures.get(batch._params[paramOffset + 0]._uint);

View file

@ -60,7 +60,7 @@ GLuint GLBackend::getQueryID(const QueryPointer& query) {
}
}
void GLBackend::do_beginQuery(Batch& batch, uint32 paramOffset) {
void GLBackend::do_beginQuery(Batch& batch, size_t paramOffset) {
auto query = batch._queries.get(batch._params[paramOffset]._uint);
GLQuery* glquery = syncGPUObject(*query);
if (glquery) {
@ -74,7 +74,7 @@ void GLBackend::do_beginQuery(Batch& batch, uint32 paramOffset) {
}
}
void GLBackend::do_endQuery(Batch& batch, uint32 paramOffset) {
void GLBackend::do_endQuery(Batch& batch, size_t paramOffset) {
auto query = batch._queries.get(batch._params[paramOffset]._uint);
GLQuery* glquery = syncGPUObject(*query);
if (glquery) {
@ -88,7 +88,7 @@ void GLBackend::do_endQuery(Batch& batch, uint32 paramOffset) {
}
}
void GLBackend::do_getQuery(Batch& batch, uint32 paramOffset) {
void GLBackend::do_getQuery(Batch& batch, size_t paramOffset) {
auto query = batch._queries.get(batch._params[paramOffset]._uint);
GLQuery* glquery = syncGPUObject(*query);
if (glquery) {

View file

@ -763,7 +763,7 @@ void GLBackend::do_setStateColorWriteMask(uint32 mask) {
}
void GLBackend::do_setStateBlendFactor(Batch& batch, uint32 paramOffset) {
void GLBackend::do_setStateBlendFactor(Batch& batch, size_t paramOffset) {
Vec4 factor(batch._params[paramOffset + 0]._float,
batch._params[paramOffset + 1]._float,
@ -774,7 +774,7 @@ void GLBackend::do_setStateBlendFactor(Batch& batch, uint32 paramOffset) {
(void) CHECK_GL_ERROR();
}
void GLBackend::do_setStateScissorRect(Batch& batch, uint32 paramOffset) {
void GLBackend::do_setStateScissorRect(Batch& batch, size_t paramOffset) {
Vec4i rect;
memcpy(&rect, batch.editData(batch._params[paramOffset]._uint), sizeof(Vec4i));

View file

@ -15,22 +15,22 @@
using namespace gpu;
// Transform Stage
void GLBackend::do_setModelTransform(Batch& batch, uint32 paramOffset) {
void GLBackend::do_setModelTransform(Batch& batch, size_t paramOffset) {
_transform._model = batch._transforms.get(batch._params[paramOffset]._uint);
_transform._invalidModel = true;
}
void GLBackend::do_setViewTransform(Batch& batch, uint32 paramOffset) {
void GLBackend::do_setViewTransform(Batch& batch, size_t paramOffset) {
_transform._view = batch._transforms.get(batch._params[paramOffset]._uint);
_transform._invalidView = true;
}
void GLBackend::do_setProjectionTransform(Batch& batch, uint32 paramOffset) {
void GLBackend::do_setProjectionTransform(Batch& batch, size_t paramOffset) {
memcpy(&_transform._projection, batch.editData(batch._params[paramOffset]._uint), sizeof(Mat4));
_transform._invalidProj = true;
}
void GLBackend::do_setViewportTransform(Batch& batch, uint32 paramOffset) {
void GLBackend::do_setViewportTransform(Batch& batch, size_t paramOffset) {
memcpy(&_transform._viewport, batch.editData(batch._params[paramOffset]._uint), sizeof(Vec4i));
ivec4& vp = _transform._viewport;
@ -49,7 +49,7 @@ void GLBackend::do_setViewportTransform(Batch& batch, uint32 paramOffset) {
_transform._invalidViewport = true;
}
void GLBackend::do_setDepthRangeTransform(Batch& batch, uint32 paramOffset) {
void GLBackend::do_setDepthRangeTransform(Batch& batch, size_t paramOffset) {
Vec2 depthRange(batch._params[paramOffset + 0]._float, batch._params[paramOffset + 1]._float);
@ -142,7 +142,8 @@ void GLBackend::TransformStageState::preUpdate(size_t commandIndex, const Stereo
}
void GLBackend::TransformStageState::transfer() const {
static QByteArray bufferData;
// FIXME not thread safe
static std::vector<uint8_t> bufferData;
if (!_cameras.empty()) {
glBindBuffer(GL_UNIFORM_BUFFER, _cameraBuffer);
bufferData.resize(_cameraUboSize * _cameras.size());
@ -168,22 +169,23 @@ void GLBackend::TransformStageState::transfer() const {
}
void GLBackend::TransformStageState::update(size_t commandIndex, const StereoState& stereo) const {
int offset = -1;
static const size_t INVALID_OFFSET = (size_t)-1;
size_t offset = INVALID_OFFSET;
while ((_objectsItr != _objectOffsets.end()) && (commandIndex >= (*_objectsItr).first)) {
offset = (*_objectsItr).second;
++_objectsItr;
}
if (offset >= 0) {
if (offset != INVALID_OFFSET) {
glBindBufferRange(GL_UNIFORM_BUFFER, TRANSFORM_OBJECT_SLOT,
_objectBuffer, offset, sizeof(Backend::TransformObject));
}
offset = -1;
offset = INVALID_OFFSET;
while ((_camerasItr != _cameraOffsets.end()) && (commandIndex >= (*_camerasItr).first)) {
offset = (*_camerasItr).second;
++_camerasItr;
}
if (offset >= 0) {
if (offset != INVALID_OFFSET) {
// We include both camera offsets for stereo
if (stereo._enable && stereo._pass) {
offset += _cameraUboSize;

View file

@ -26,9 +26,9 @@ namespace gpu {
class Resource {
public:
typedef unsigned int Size;
typedef size_t Size;
static const Size NOT_ALLOCATED = -1;
static const Size NOT_ALLOCATED = (Size)-1;
// The size in bytes of data stored in the resource
virtual Size getSize() const = 0;

View file

@ -93,19 +93,14 @@ public:
};
typedef std::map< Slot, ChannelInfo > ChannelMap;
Format() :
_attributes(),
_elementTotalSize(0) {}
~Format() {}
uint32 getNumAttributes() const { return _attributes.size(); }
size_t getNumAttributes() const { return _attributes.size(); }
const AttributeMap& getAttributes() const { return _attributes; }
uint8 getNumChannels() const { return _channels.size(); }
size_t getNumChannels() const { return _channels.size(); }
const ChannelMap& getChannels() const { return _channels; }
Offset getChannelStride(Slot channel) const { return _channels.at(channel)._stride; }
uint32 getElementTotalSize() const { return _elementTotalSize; }
size_t getElementTotalSize() const { return _elementTotalSize; }
bool setAttribute(Slot slot, Slot channel, Element element, Offset offset = 0, Frequency frequency = PER_VERTEX);
bool setAttribute(Slot slot, Frequency frequency = PER_VERTEX);
@ -115,7 +110,7 @@ public:
protected:
AttributeMap _attributes;
ChannelMap _channels;
uint32 _elementTotalSize;
uint32 _elementTotalSize { 0 };
void evaluateCache();
};
@ -140,7 +135,7 @@ public:
const Buffers& getBuffers() const { return _buffers; }
const Offsets& getOffsets() const { return _offsets; }
const Strides& getStrides() const { return _strides; }
uint32 getNumBuffers() const { return _buffers.size(); }
size_t getNumBuffers() const { return _buffers.size(); }
BufferStream makeRangedStream(uint32 offset, uint32 count = -1) const;