Merge pull request #3787 from samcake/temp0

Simplified the Transform usage in the gpu::api
This commit is contained in:
AndrewMeadows 2014-11-13 14:10:55 -08:00
commit e509a4c41a
9 changed files with 161 additions and 149 deletions

View file

@ -160,7 +160,7 @@ Application::Application(int& argc, char** argv, QElapsedTimer &startup_time) :
_lastQueriedViewFrustum(),
_lastQueriedTime(usecTimestampNow()),
_mirrorViewRect(QRect(MIRROR_VIEW_LEFT_PADDING, MIRROR_VIEW_TOP_PADDING, MIRROR_VIEW_WIDTH, MIRROR_VIEW_HEIGHT)),
_viewTransform(new gpu::Transform()),
_viewTransform(),
_scaleMirror(1.0f),
_rotateMirror(0.0f),
_raiseMirror(0.0f),
@ -2911,13 +2911,13 @@ void Application::displaySide(Camera& whichCamera, bool selfAvatarOnly) {
// Equivalent to what is happening with _untranslatedViewMatrix and the _viewMatrixTranslation
// the viewTransofmr object is updatded with the correct values and saved,
// this is what is used for rendering the Entities and avatars
gpu::Transform viewTransform;
Transform viewTransform;
viewTransform.setTranslation(whichCamera.getPosition());
viewTransform.setRotation(rotation);
viewTransform.postTranslate(eyeOffsetPos);
viewTransform.postRotate(eyeOffsetOrient);
if (whichCamera.getMode() == CAMERA_MODE_MIRROR) {
viewTransform.setScale(gpu::Transform::Vec3(-1.0f, 1.0f, 1.0f));
viewTransform.setScale(Transform::Vec3(-1.0f, 1.0f, 1.0f));
}
setViewTransform(viewTransform);
@ -3117,8 +3117,8 @@ void Application::updateUntranslatedViewMatrix(const glm::vec3& viewMatrixTransl
_viewMatrixTranslation = viewMatrixTranslation;
}
void Application::setViewTransform(const gpu::Transform& view) {
(*_viewTransform) = view;
void Application::setViewTransform(const Transform& view) {
_viewTransform = view;
}
void Application::loadTranslatedViewMatrix(const glm::vec3& translation) {

View file

@ -232,8 +232,8 @@ public:
const glm::vec3& getViewMatrixTranslation() const { return _viewMatrixTranslation; }
void setViewMatrixTranslation(const glm::vec3& translation) { _viewMatrixTranslation = translation; }
const gpu::TransformPointer& getViewTransform() const { return _viewTransform; }
void setViewTransform(const gpu::Transform& view);
const Transform& getViewTransform() const { return _viewTransform; }
void setViewTransform(const Transform& view);
/// if you need to access the application settings, use lockSettings()/unlockSettings()
QSettings* lockSettings() { _settingsMutex.lock(); return _settings; }
@ -526,7 +526,7 @@ private:
QRect _mirrorViewRect;
RearMirrorTools* _rearMirrorTools;
gpu::TransformPointer _viewTransform;
Transform _viewTransform;
glm::mat4 _untranslatedViewMatrix;
glm::vec3 _viewMatrixTranslation;
glm::mat4 _projectionMatrix;

View file

@ -135,19 +135,19 @@ void Batch::setIndexBuffer(Type type, const BufferPointer& buffer, Offset offset
_params.push_back(type);
}
void Batch::setModelTransform(const TransformPointer& model) {
void Batch::setModelTransform(const Transform& model) {
ADD_COMMAND(setModelTransform);
_params.push_back(_transforms.cache(model));
}
void Batch::setViewTransform(const TransformPointer& view) {
void Batch::setViewTransform(const Transform& view) {
ADD_COMMAND(setViewTransform);
_params.push_back(_transforms.cache(view));
}
void Batch::setProjectionTransform(const TransformPointer& proj) {
void Batch::setProjectionTransform(const Transform& proj) {
ADD_COMMAND(setProjectionTransform);
_params.push_back(_transforms.cache(proj));

View file

@ -50,10 +50,6 @@ enum Primitive {
NUM_PRIMITIVES,
};
typedef ::Transform Transform;
typedef QSharedPointer< ::gpu::Transform > TransformPointer;
typedef std::vector< TransformPointer > Transforms;
class Batch {
public:
typedef Stream::Slot Slot;
@ -87,9 +83,9 @@ public:
// finaly projected into the clip space by the projection transform
// WARNING: ViewTransform transform from eye space to world space, its inverse is composed
// with the ModelTransformu to create the equivalent of the glModelViewMatrix
void setModelTransform(const TransformPointer& model);
void setViewTransform(const TransformPointer& view);
void setProjectionTransform(const TransformPointer& proj);
void setModelTransform(const Transform& model);
void setViewTransform(const Transform& view);
void setProjectionTransform(const Transform& proj);
// TODO: As long as we have gl calls explicitely issued from interface
@ -258,35 +254,35 @@ public:
template <typename T>
class Cache {
public:
typedef QSharedPointer<T> Pointer;
Pointer _pointer;
Cache<T>(const Pointer& pointer) : _pointer(pointer) {}
typedef T Data;
Data _data;
Cache<T>(const Data& data) : _data(data) {}
class Vector {
public:
std::vector< Cache<T> > _pointers;
std::vector< Cache<T> > _items;
uint32 cache(const Pointer& pointer) {
uint32 offset = _pointers.size();
_pointers.push_back(Cache<T>(pointer));
uint32 cache(const Data& data) {
uint32 offset = _items.size();
_items.push_back(Cache<T>(data));
return offset;
}
Pointer get(uint32 offset) {
if (offset >= _pointers.size()) {
return Pointer();
Data get(uint32 offset) {
if (offset >= _items.size()) {
return Data();
}
return (_pointers.data() + offset)->_pointer;
return (_items.data() + offset)->_data;
}
void clear() {
_pointers.clear();
_items.clear();
}
};
};
typedef Cache<Buffer>::Vector BufferCaches;
typedef Cache<Stream::Format>::Vector StreamFormatCaches;
typedef Cache<BufferPointer>::Vector BufferCaches;
typedef Cache<Stream::FormatPointer>::Vector StreamFormatCaches;
typedef Cache<Transform>::Vector TransformCaches;
typedef unsigned char Byte;

View file

@ -113,15 +113,7 @@ static const GLenum _elementTypeToGLType[NUM_TYPES]= {
GLBackend::GLBackend() :
_needInputFormatUpdate(true),
_inputFormat(0),
_inputBuffersState(0),
_inputBuffers(_inputBuffersState.size(), BufferPointer(0)),
_inputBufferOffsets(_inputBuffersState.size(), 0),
_inputBufferStrides(_inputBuffersState.size(), 0),
_indexBuffer(0),
_indexBufferOffset(0),
_inputAttributeActivation(0),
_input(),
_transform()
{
@ -203,9 +195,9 @@ void GLBackend::do_drawIndexed(Batch& batch, uint32 paramOffset) {
uint32 numIndices = batch._params[paramOffset + 1]._uint;
uint32 startIndex = batch._params[paramOffset + 0]._uint;
GLenum glType = _elementTypeToGLType[_indexBufferType];
GLenum glType = _elementTypeToGLType[_input._indexBufferType];
glDrawElements(mode, numIndices, glType, reinterpret_cast<GLvoid*>(startIndex + _indexBufferOffset));
glDrawElements(mode, numIndices, glType, reinterpret_cast<GLvoid*>(startIndex + _input._indexBufferOffset));
CHECK_GL_ERROR();
}
@ -220,9 +212,9 @@ void GLBackend::do_drawIndexedInstanced(Batch& batch, uint32 paramOffset) {
void GLBackend::do_setInputFormat(Batch& batch, uint32 paramOffset) {
Stream::FormatPointer format = batch._streamFormats.get(batch._params[paramOffset]._uint);
if (format != _inputFormat) {
_inputFormat = format;
_needInputFormatUpdate = true;
if (format != _input._format) {
_input._format = format;
_input._invalidFormat = true;
}
}
@ -233,10 +225,10 @@ void GLBackend::do_setInputBuffer(Batch& batch, uint32 paramOffset) {
uint32 channel = batch._params[paramOffset + 3]._uint;
if (channel < getNumInputBuffers()) {
_inputBuffers[channel] = buffer;
_inputBufferOffsets[channel] = offset;
_inputBufferStrides[channel] = stride;
_inputBuffersState.set(channel);
_input._buffers[channel] = buffer;
_input._bufferOffsets[channel] = offset;
_input._bufferStrides[channel] = stride;
_input._buffersState.set(channel);
}
}
@ -252,14 +244,14 @@ static const GLenum attributeSlotToClassicAttribName[NUM_CLASSIC_ATTRIBS] = {
#endif
void GLBackend::updateInput() {
if (_needInputFormatUpdate || _inputBuffersState.any()) {
if (_input._invalidFormat || _input._buffersState.any()) {
if (_needInputFormatUpdate) {
InputActivationCache newActivation;
if (_input._invalidFormat) {
InputStageState::ActivationCache newActivation;
// Check expected activation
if (_inputFormat) {
const Stream::Format::AttributeMap& attributes = _inputFormat->getAttributes();
if (_input._format) {
const Stream::Format::AttributeMap& attributes = _input._format->getAttributes();
for (Stream::Format::AttributeMap::const_iterator it = attributes.begin(); it != attributes.end(); it++) {
const Stream::Attribute& attrib = (*it).second;
newActivation.set(attrib._slot);
@ -269,7 +261,7 @@ void GLBackend::updateInput() {
// Manage Activation what was and what is expected now
for (unsigned int i = 0; i < newActivation.size(); i++) {
bool newState = newActivation[i];
if (newState != _inputAttributeActivation[i]) {
if (newState != _input._attributeActivation[i]) {
#if defined(SUPPORT_LEGACY_OPENGL)
if (i < NUM_CLASSIC_ATTRIBS) {
if (newState) {
@ -290,31 +282,31 @@ void GLBackend::updateInput() {
}
CHECK_GL_ERROR();
_inputAttributeActivation.flip(i);
_input._attributeActivation.flip(i);
}
}
}
// now we need to bind the buffers and assign the attrib pointers
if (_inputFormat) {
const Buffers& buffers = _inputBuffers;
const Offsets& offsets = _inputBufferOffsets;
const Offsets& strides = _inputBufferStrides;
if (_input._format) {
const Buffers& buffers = _input._buffers;
const Offsets& offsets = _input._bufferOffsets;
const Offsets& strides = _input._bufferStrides;
const Stream::Format::AttributeMap& attributes = _inputFormat->getAttributes();
const Stream::Format::AttributeMap& attributes = _input._format->getAttributes();
for (Stream::Format::ChannelMap::const_iterator channelIt = _inputFormat->getChannels().begin();
channelIt != _inputFormat->getChannels().end();
for (Stream::Format::ChannelMap::const_iterator channelIt = _input._format->getChannels().begin();
channelIt != _input._format->getChannels().end();
channelIt++) {
const Stream::Format::ChannelMap::value_type::second_type& channel = (*channelIt).second;
if ((*channelIt).first < buffers.size()) {
int bufferNum = (*channelIt).first;
if (_inputBuffersState.test(bufferNum) || _needInputFormatUpdate) {
if (_input._buffersState.test(bufferNum) || _input._invalidFormat) {
GLuint vbo = gpu::GLBackend::getBufferID((*buffers[bufferNum]));
glBindBuffer(GL_ARRAY_BUFFER, vbo);
CHECK_GL_ERROR();
_inputBuffersState[bufferNum] = false;
_input._buffersState[bufferNum] = false;
for (unsigned int i = 0; i < channel._slots.size(); i++) {
const Stream::Attribute& attrib = attributes.at(channel._slots[i]);
@ -354,7 +346,7 @@ void GLBackend::updateInput() {
}
}
// everything format related should be in sync now
_needInputFormatUpdate = false;
_input._invalidFormat = false;
}
/* TODO: Fancy version GL4.4
@ -415,10 +407,10 @@ void GLBackend::updateInput() {
void GLBackend::do_setIndexBuffer(Batch& batch, uint32 paramOffset) {
_indexBufferType = (Type) batch._params[paramOffset + 2]._uint;
_input._indexBufferType = (Type) batch._params[paramOffset + 2]._uint;
BufferPointer indexBuffer = batch._buffers.get(batch._params[paramOffset + 1]._uint);
_indexBufferOffset = batch._params[paramOffset + 0]._uint;
_indexBuffer = indexBuffer;
_input._indexBufferOffset = batch._params[paramOffset + 0]._uint;
_input._indexBuffer = indexBuffer;
if (indexBuffer) {
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, getBufferID(*indexBuffer));
} else {
@ -430,30 +422,18 @@ void GLBackend::do_setIndexBuffer(Batch& batch, uint32 paramOffset) {
// Transform Stage
void GLBackend::do_setModelTransform(Batch& batch, uint32 paramOffset) {
TransformPointer modelTransform = batch._transforms.get(batch._params[paramOffset]._uint);
if (_transform._model.isNull() || (modelTransform != _transform._model)) {
_transform._model = modelTransform;
_transform._invalidModel = true;
}
_transform._model = batch._transforms.get(batch._params[paramOffset]._uint);
_transform._invalidModel = true;
}
void GLBackend::do_setViewTransform(Batch& batch, uint32 paramOffset) {
TransformPointer viewTransform = batch._transforms.get(batch._params[paramOffset]._uint);
if (_transform._view.isNull() || (viewTransform != _transform._view)) {
_transform._view = viewTransform;
_transform._invalidView = true;
}
_transform._view = batch._transforms.get(batch._params[paramOffset]._uint);
_transform._invalidView = true;
}
void GLBackend::do_setProjectionTransform(Batch& batch, uint32 paramOffset) {
TransformPointer projectionTransform = batch._transforms.get(batch._params[paramOffset]._uint);
if (_transform._projection.isNull() || (projectionTransform != _transform._projection)) {
_transform._projection = projectionTransform;
_transform._invalidProj = true;
}
_transform._projection = batch._transforms.get(batch._params[paramOffset]._uint);
_transform._invalidProj = true;
}
void GLBackend::updateTransform() {
@ -468,28 +448,28 @@ void GLBackend::updateTransform() {
}
if (_transform._invalidModel || _transform._invalidView) {
if (!_transform._model.isNull()) {
if (!_transform._model.isIdentity()) {
if (_transform._lastMode != GL_MODELVIEW) {
glMatrixMode(GL_MODELVIEW);
_transform._lastMode = GL_MODELVIEW;
}
Transform::Mat4 modelView;
if (!_transform._view.isNull()) {
if (!_transform._view.isIdentity()) {
Transform mvx;
Transform::inverseMult(mvx, (*_transform._view), (*_transform._model));
Transform::inverseMult(mvx, _transform._view, _transform._model);
mvx.getMatrix(modelView);
} else {
_transform._model->getMatrix(modelView);
_transform._model.getMatrix(modelView);
}
glLoadMatrixf(reinterpret_cast< const GLfloat* >(&modelView));
} else {
if (!_transform._view.isNull()) {
if (!_transform._view.isIdentity()) {
if (_transform._lastMode != GL_MODELVIEW) {
glMatrixMode(GL_MODELVIEW);
_transform._lastMode = GL_MODELVIEW;
}
Transform::Mat4 modelView;
_transform._view->getInverseMatrix(modelView);
_transform._view.getInverseMatrix(modelView);
glLoadMatrixf(reinterpret_cast< const GLfloat* >(&modelView));
} else {
// TODO: eventually do something about the matrix when neither view nor model is specified?

View file

@ -48,7 +48,7 @@ public:
static const int MAX_NUM_ATTRIBUTES = Stream::NUM_INPUT_SLOTS;
static const int MAX_NUM_INPUT_BUFFERS = 16;
uint32 getNumInputBuffers() const { return _inputBuffersState.size(); }
uint32 getNumInputBuffers() const { return _input._buffersState.size(); }
protected:
@ -62,22 +62,39 @@ protected:
void do_setInputFormat(Batch& batch, uint32 paramOffset);
void do_setInputBuffer(Batch& batch, uint32 paramOffset);
void do_setIndexBuffer(Batch& batch, uint32 paramOffset);
void updateInput();
bool _needInputFormatUpdate;
Stream::FormatPointer _inputFormat;
typedef std::bitset<MAX_NUM_INPUT_BUFFERS> InputBuffersState;
InputBuffersState _inputBuffersState;
struct InputStageState {
bool _invalidFormat;
Stream::FormatPointer _format;
Buffers _inputBuffers;
Offsets _inputBufferOffsets;
Offsets _inputBufferStrides;
typedef std::bitset<MAX_NUM_INPUT_BUFFERS> BuffersState;
BuffersState _buffersState;
BufferPointer _indexBuffer;
Offset _indexBufferOffset;
Type _indexBufferType;
Buffers _buffers;
Offsets _bufferOffsets;
Offsets _bufferStrides;
typedef std::bitset<MAX_NUM_ATTRIBUTES> InputActivationCache;
InputActivationCache _inputAttributeActivation;
BufferPointer _indexBuffer;
Offset _indexBufferOffset;
Type _indexBufferType;
typedef std::bitset<MAX_NUM_ATTRIBUTES> ActivationCache;
ActivationCache _attributeActivation;
InputStageState() :
_invalidFormat(true),
_format(0),
_buffersState(0),
_buffers(_buffersState.size(), BufferPointer(0)),
_bufferOffsets(_buffersState.size(), 0),
_bufferStrides(_buffersState.size(), 0),
_indexBuffer(0),
_indexBufferOffset(0),
_indexBufferType(UINT32),
_attributeActivation(0)
{}
} _input;
// Transform Stage
void do_setModelTransform(Batch& batch, uint32 paramOffset);
@ -86,9 +103,9 @@ protected:
void updateTransform();
struct TransformStageState {
TransformPointer _model;
TransformPointer _view;
TransformPointer _projection;
Transform _model;
Transform _view;
Transform _projection;
bool _invalidModel;
bool _invalidView;
bool _invalidProj;
@ -96,9 +113,9 @@ protected:
GLenum _lastMode;
TransformStageState() :
_model(0),
_view(0),
_projection(0),
_model(),
_view(),
_projection(),
_invalidModel(true),
_invalidView(true),
_invalidProj(true),

View file

@ -567,11 +567,11 @@ bool Model::renderCore(float alpha, RenderMode mode, RenderArgs* args) {
// Capture the view matrix once for the rendering of this model
if (_transforms.empty()) {
_transforms.push_back(gpu::TransformPointer(new gpu::Transform()));
_transforms.push_back(Transform());
}
(*_transforms[0]) = gpu::Transform((*Application::getInstance()->getViewTransform()));
_transforms[0] = Application::getInstance()->getViewTransform();
// apply entity translation offset to the viewTransform in one go (it's a preTranslate because viewTransform goes from world to eye space)
_transforms[0]->preTranslate(-_translation);
_transforms[0].preTranslate(-_translation);
batch.setViewTransform(_transforms[0]);
@ -1493,10 +1493,10 @@ void Model::setupBatchTransform(gpu::Batch& batch) {
// Capture the view matrix once for the rendering of this model
if (_transforms.empty()) {
_transforms.push_back(gpu::TransformPointer(new gpu::Transform()));
_transforms.push_back(Transform());
}
(*_transforms[0]) = gpu::Transform((*Application::getInstance()->getViewTransform()));
_transforms[0]->preTranslate(-_translation);
_transforms[0] = Application::getInstance()->getViewTransform();
_transforms[0].preTranslate(-_translation);
batch.setViewTransform(_transforms[0]);
}
@ -2149,10 +2149,9 @@ int Model::renderMeshes(gpu::Batch& batch, RenderMode mode, bool translucent, fl
if (state.clusterMatrices.size() > 1) {
GLBATCH(glUniformMatrix4fv)(skinLocations->clusterMatrices, state.clusterMatrices.size(), false,
(const float*)state.clusterMatrices.constData());
batch.setModelTransform(gpu::TransformPointer());
batch.setModelTransform(Transform());
} else {
gpu::TransformPointer modelTransform(new gpu::Transform(state.clusterMatrices[0]));
batch.setModelTransform(modelTransform);
batch.setModelTransform(Transform(state.clusterMatrices[0]));
}
if (mesh.blendshapes.isEmpty()) {

View file

@ -283,7 +283,7 @@ private:
QUrl _url;
gpu::Buffers _blendedVertexBuffers;
gpu::Transforms _transforms;
std::vector<Transform> _transforms;
gpu::Batch _renderBatch;
QVector<QVector<QSharedPointer<Texture> > > _dilatedTextures;

View file

@ -20,6 +20,8 @@
#include <bitset>
#include <memory>
class Transform {
public:
typedef glm::mat4 Mat4;
@ -30,16 +32,16 @@ public:
typedef glm::quat Quat;
Transform() :
_translation(0),
_rotation(1.0f, 0, 0, 0),
_scale(1.0f),
_translation(0),
_flags(FLAG_CACHE_INVALID_BITSET) // invalid cache
{
}
Transform(const Transform& transform) :
_translation(transform._translation),
_rotation(transform._rotation),
_scale(transform._scale),
_translation(transform._translation),
_flags(transform._flags)
{
invalidCache();
@ -49,6 +51,15 @@ public:
}
~Transform() {}
Transform& operator=(const Transform& transform) {
_rotation = transform._rotation;
_scale = transform._scale;
_translation = transform._translation;
_flags = transform._flags;
invalidCache();
return (*this);
}
void setIdentity();
const Vec3& getTranslation() const;
@ -89,7 +100,6 @@ public:
// Left will be inversed before the multiplication
static Transform& inverseMult(Transform& result, const Transform& left, const Transform& right);
protected:
enum Flag {
@ -111,14 +121,15 @@ protected:
// TRS
Vec3 _translation;
Quat _rotation;
Vec3 _scale;
Vec3 _translation;
mutable Flags _flags;
// Cached transform
mutable Mat4 _matrix;
// TODO: replace this auto ptr by a "unique ptr" as soon as we are compiling in C++11
mutable std::auto_ptr<Mat4> _matrix;
bool isCacheInvalid() const { return _flags[FLAG_CACHE_INVALID]; }
void validCache() const { _flags.set(FLAG_CACHE_INVALID, false); }
@ -135,6 +146,7 @@ protected:
void flagNonUniform() { _flags.set(FLAG_NON_UNIFORM, true); }
void updateCache() const;
Mat4& getCachedMatrix(Mat4& result) const;
};
inline void Transform::setIdentity() {
@ -271,8 +283,25 @@ inline void Transform::postScale(const Vec3& scale) {
}
inline Transform::Mat4& Transform::getMatrix(Transform::Mat4& result) const {
updateCache();
result = _matrix;
if (isRotating()) {
Mat3 rot = glm::mat3_cast(_rotation);
if (isScaling()) {
rot[0] *= _scale.x;
rot[1] *= _scale.y;
rot[2] *= _scale.z;
}
result[0] = Vec4(rot[0], 0.f);
result[1] = Vec4(rot[1], 0.f);
result[2] = Vec4(rot[2], 0.f);
} else {
result[0] = Vec4(_scale.x, 0.f, 0.f, 0.f);
result[1] = Vec4(0.f, _scale.y, 0.f, 0.f);
result[2] = Vec4(0.f, 0.f, _scale.z, 0.f);
}
result[3] = Vec4(_translation, 1.0f);
return result;
}
@ -369,27 +398,18 @@ inline Transform& Transform::inverseMult( Transform& result, const Transform& le
return result;
}
inline Transform::Mat4& Transform::getCachedMatrix(Transform::Mat4& result) const {
updateCache();
result = (*_matrix);
return result;
}
inline void Transform::updateCache() const {
if (isCacheInvalid()) {
if (isRotating()) {
Mat3 rot = glm::mat3_cast(_rotation);
if (isScaling()) {
rot[0] *= _scale.x;
rot[1] *= _scale.y;
rot[2] *= _scale.z;
}
_matrix[0] = Vec4(rot[0], 0.f);
_matrix[1] = Vec4(rot[1], 0.f);
_matrix[2] = Vec4(rot[2], 0.f);
} else {
_matrix[0] = Vec4(_scale.x, 0.f, 0.f, 0.f);
_matrix[1] = Vec4(0.f, _scale.y, 0.f, 0.f);
_matrix[2] = Vec4(0.f, 0.f, _scale.z, 0.f);
if (!_matrix.get()) {
_matrix.reset(new Mat4());
}
_matrix[3] = Vec4(_translation, 1.0f);
getMatrix((*_matrix));
validCache();
}
}