defined the ViewTransform as the matrix transforming from eye space to world space (the opposite of before this commit) since it seems a better design

This commit is contained in:
Sam Gateau 2014-11-12 16:40:45 -08:00
parent 7fa4ea7527
commit 8fb04af59d
6 changed files with 60 additions and 18 deletions

View file

@ -839,12 +839,14 @@ void Application::controlledBroadcastToNodes(const QByteArray& packet, const Nod
}
bool Application::event(QEvent* event) {
// handle custom URL
if (event->type() == QEvent::FileOpen) {
QFileOpenEvent* fileEvent = static_cast<QFileOpenEvent*>(event);
if (fileEvent->url().isValid()) {
openUrl(fileEvent->url());
if (!fileEvent->url().isEmpty()) {
AddressManager::getInstance().handleLookupString(fileEvent->url().toLocalFile());
}
return false;
@ -2899,14 +2901,16 @@ void Application::displaySide(Camera& whichCamera, bool selfAvatarOnly) {
glFrontFace(GL_CCW);
}
glm::vec3 eyeOffsetPos = whichCamera.getEyeOffsetPosition();
/* glm::vec3 eyeOffsetPos = whichCamera.getEyeOffsetPosition();
glm::quat eyeOffsetOrient = whichCamera.getEyeOffsetOrientation();
glm::vec3 eyeOffsetAxis = glm::axis(eyeOffsetOrient);
glRotatef(-glm::degrees(glm::angle(eyeOffsetOrient)), eyeOffsetAxis.x, eyeOffsetAxis.y, eyeOffsetAxis.z);
viewTransform.postRotate(glm::conjugate(eyeOffsetOrient));
// viewTransform.postRotate(glm::conjugate(eyeOffsetOrient));
viewTransform.preRotate(eyeOffsetOrient);
glTranslatef(-eyeOffsetPos.x, -eyeOffsetPos.y, -eyeOffsetPos.z);
viewTransform.postTranslate(-eyeOffsetPos);
//viewTransform.postTranslate(-eyeOffsetPos);
viewTransform.preTranslate(eyeOffsetPos);
*/
// transform view according to whichCamera
// could be myCamera (if in normal mode)
// or could be viewFrustumOffsetCamera if in offset mode
@ -2914,14 +2918,16 @@ void Application::displaySide(Camera& whichCamera, bool selfAvatarOnly) {
glm::quat rotation = whichCamera.getRotation();
glm::vec3 axis = glm::axis(rotation);
glRotatef(-glm::degrees(glm::angle(rotation)), axis.x, axis.y, axis.z);
viewTransform.postRotate(glm::conjugate(rotation));
// viewTransform.postRotate(glm::conjugate(rotation));
viewTransform.setRotation(rotation);
// store view matrix without translation, which we'll use for precision-sensitive objects
updateUntranslatedViewMatrix(-whichCamera.getPosition());
// Equivalent to what is happening with _untranslatedViewMatrix and the _viewMatrixTranslation
// the viewTransofmr object is updated with the correct value and saved,
// the viewTransofmr object is updatded with the correct value and saved,
// this is what is used for rendering the ENtities and avatars
viewTransform.setTranslation(whichCamera.getPosition());
setViewTransform(viewTransform);
glTranslatef(_viewMatrixTranslation.x, _viewMatrixTranslation.y, _viewMatrixTranslation.z);

View file

@ -82,6 +82,11 @@ public:
void setIndexBuffer(Type type, const BufferPointer& buffer, Offset offset);
// Transform Stage
// Vertex position is transformed by ModelTransform from object space to world space
// Then by the inverse of the ViewTransform from world space to eye space
// finaly projected into the clip space by the projection transform
// WARNING: ViewTransform transform from eye space to world space, its inverse is composed
// with the ModelTransformu to create the equivalent of the glModelViewMatrix
void setModelTransform(const TransformPointer& model);
void setViewTransform(const TransformPointer& view);
void setProjectionTransform(const TransformPointer& proj);

View file

@ -476,7 +476,7 @@ void GLBackend::updateTransform() {
Transform::Mat4 modelView;
if (!_transform._view.isNull()) {
Transform mvx;
Transform::mult(mvx, (*_transform._view), (*_transform._model));
Transform::inverseMult(mvx, (*_transform._view), (*_transform._model));
mvx.getMatrix(modelView);
} else {
_transform._model->getMatrix(modelView);
@ -489,7 +489,7 @@ void GLBackend::updateTransform() {
_transform._lastMode = GL_MODELVIEW;
}
Transform::Mat4 modelView;
_transform._view->getMatrix(modelView);
_transform._view->getInverseMatrix(modelView);
glLoadMatrixf(reinterpret_cast< const GLfloat* >(&modelView));
} else {
// TODO: eventually do something about the matrix when neither view nor model is specified?

View file

@ -560,8 +560,8 @@ bool Model::render(float alpha, RenderMode mode, RenderArgs* args) {
_transforms.push_back(gpu::TransformPointer(new gpu::Transform()));
}
(*_transforms[0]) = gpu::Transform((*Application::getInstance()->getViewTransform()));
// apply entity translation + camera position offset to the viewTransform in one go
_transforms[0]->postTranslate(Application::getInstance()->getViewMatrixTranslation() + _translation);
// apply entity translation offset to the viewTransform in one go (it's a preTranslate because viewTransform goes from world to eye space)
_transforms[0]->preTranslate(-_translation);
batch.setViewTransform(_transforms[0]);

View file

@ -27,12 +27,6 @@ void Transform::evalRotationScale(Quat& rotation, Vec3& scale, const Mat3& rotat
Mat3 currInvTranspose = glm::inverse(glm::transpose(rotationMat));
Mat3 nextRotation = 0.5f * (rotationMat + currInvTranspose);
// Go through every component in the matrices and find the next matrix
/* for (int i = 0; i < 3; i++) {
for (int j = 0; j <3; j++) {
nextRotation[j][i] = 0.5f * (rotationMat[j][i] + currInvTranspose[j][i]);
}
}*/
norm = 0.0;
for (int i = 0; i < 3; i++) {

View file

@ -78,12 +78,17 @@ public:
void evalFromRawMatrix(const Mat3& rotationScalematrix);
Mat4& getMatrix(Mat4& result) const;
Mat4& getInverseMatrix(Mat4& result) const;
Transform& evalInverse(Transform& result) const;
static void evalRotationScale(Quat& rotation, Vec3& scale, const Mat3& rotationScaleMatrix);
static Transform& mult(Transform& result, const Transform& left, const Transform& right);
// Left will be inversed before the multiplication
static Transform& inverseMult(Transform& result, const Transform& left, const Transform& right);
protected:
@ -271,6 +276,10 @@ inline Transform::Mat4& Transform::getMatrix(Transform::Mat4& result) const {
return result;
}
inline Transform::Mat4& Transform::getInverseMatrix(Transform::Mat4& result) const {
return evalInverse(Transform()).getMatrix(result);
}
inline void Transform::evalFromRawMatrix(const Mat4& matrix) {
// for now works only in the case of TRS transformation
if ((matrix[0][3] == 0) && (matrix[1][3] == 0) && (matrix[2][3] == 0) && (matrix[3][3] == 1.f)) {
@ -330,6 +339,34 @@ inline Transform& Transform::mult( Transform& result, const Transform& left, con
return result;
}
inline Transform& Transform::inverseMult( Transform& result, const Transform& left, const Transform& right) {
result.setIdentity();
if (left.isScaling()) {
const Vec3& s = left.getScale();
result.setScale(Vec3(1.0f / s.x, 1.0f / s.y, 1.0f / s.z));
}
if (left.isRotating()) {
result.postRotate(glm::conjugate(left.getRotation()));
}
if (left.isTranslating() || right.isTranslating()) {
result.postTranslate(right.getTranslation() - left.getTranslation());
}
if (right.isRotating()) {
result.postRotate(right.getRotation());
}
if (right.isScaling()) {
result.postScale(right.getScale());
}
// HACK: In case of an issue in the Transform multiplication results, to make sure this code is
// working properly uncomment the next 2 lines and compare the results, they should be the same...
// Transform::Mat4 mv = left.getMatrix() * right.getMatrix();
// Transform::Mat4 mv2 = result.getMatrix();
return result;
}
inline void Transform::updateCache() const {
if (isCacheInvalid()) {
if (isRotating()) {