mirror of
https://github.com/overte-org/overte.git
synced 2025-04-20 01:24:03 +02:00
Merge pull request #411 from ey6es/master
Fix for starfield rendering on Oculus.
This commit is contained in:
commit
05ad7309a1
5 changed files with 136 additions and 161 deletions
|
@ -283,127 +283,87 @@ void Application::paintGL() {
|
|||
|
||||
glEnable(GL_LINE_SMOOTH);
|
||||
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
|
||||
glMatrixMode(GL_MODELVIEW);
|
||||
|
||||
glPushMatrix(); {
|
||||
glLoadIdentity();
|
||||
|
||||
if (_myCamera.getMode() == CAMERA_MODE_MIRROR) {
|
||||
_myCamera.setTightness (100.0f);
|
||||
_myCamera.setTargetPosition(_myAvatar.getSpringyHeadPosition());
|
||||
_myCamera.setTargetRotation(_myAvatar.getBodyYaw() - 180.0f,
|
||||
0.0f,
|
||||
0.0f);
|
||||
} else if (OculusManager::isConnected()) {
|
||||
_myCamera.setUpShift (0.0f);
|
||||
_myCamera.setDistance (0.0f);
|
||||
_myCamera.setTightness (100.0f);
|
||||
_myCamera.setTargetPosition(_myAvatar.getHeadPosition());
|
||||
_myCamera.setTargetRotation(_myAvatar.getAbsoluteHeadYaw(),
|
||||
_myAvatar.getHead().getPitch(),
|
||||
-_myAvatar.getHead().getRoll());
|
||||
} else if (_myCamera.getMode() == CAMERA_MODE_FIRST_PERSON) {
|
||||
_myCamera.setTargetPosition(_myAvatar.getSpringyHeadPosition());
|
||||
_myCamera.setTargetRotation(_myAvatar.getAbsoluteHeadYaw(),
|
||||
_myAvatar.getAbsoluteHeadPitch(),
|
||||
0.0f);
|
||||
// Take a look at whether we are inside head, don't render it if so.
|
||||
const float HEAD_RENDER_DISTANCE = 0.5;
|
||||
glm::vec3 distanceToHead(_myCamera.getPosition() - _myAvatar.getSpringyHeadPosition());
|
||||
|
||||
if (glm::length(distanceToHead) < HEAD_RENDER_DISTANCE) {
|
||||
}
|
||||
} else if (_myCamera.getMode() == CAMERA_MODE_THIRD_PERSON) {
|
||||
_myCamera.setTargetPosition(_myAvatar.getHeadPosition());
|
||||
_myCamera.setTargetRotation(_myAvatar.getAbsoluteHeadYaw(),
|
||||
_myAvatar.getAbsoluteHeadPitch(),
|
||||
0.0f);
|
||||
}
|
||||
|
||||
// important...
|
||||
_myCamera.update( 1.f/_fps );
|
||||
|
||||
// Render anything (like HUD items) that we want to be in 3D but not in worldspace
|
||||
/*
|
||||
const float HUD_Z_OFFSET = -5.f;
|
||||
glPushMatrix();
|
||||
glm::vec3 test(0.5, 0.5, 0.5);
|
||||
glTranslatef(1, 1, HUD_Z_OFFSET);
|
||||
drawVector(&test);
|
||||
glPopMatrix();
|
||||
*/
|
||||
|
||||
|
||||
// Note: whichCamera is used to pick between the normal camera myCamera for our
|
||||
// main camera, vs, an alternate camera. The alternate camera we support right now
|
||||
// is the viewFrustumOffsetCamera. But theoretically, we could use this same mechanism
|
||||
// to add other cameras.
|
||||
//
|
||||
// Why have two cameras? Well, one reason is that because in the case of the renderViewFrustum()
|
||||
// code, we want to keep the state of "myCamera" intact, so we can render what the view frustum of
|
||||
// myCamera is. But we also want to do meaningful camera transforms on OpenGL for the offset camera
|
||||
Camera whichCamera = _myCamera;
|
||||
|
||||
if (_viewFrustumFromOffset->isChecked() && _frustumOn->isChecked()) {
|
||||
|
||||
// set the camera to third-person view but offset so we can see the frustum
|
||||
_viewFrustumOffsetCamera.setTargetYaw(_viewFrustumOffsetYaw + _myAvatar.getBodyYaw());
|
||||
_viewFrustumOffsetCamera.setPitch (_viewFrustumOffsetPitch );
|
||||
_viewFrustumOffsetCamera.setRoll (_viewFrustumOffsetRoll );
|
||||
_viewFrustumOffsetCamera.setUpShift (_viewFrustumOffsetUp );
|
||||
_viewFrustumOffsetCamera.setDistance (_viewFrustumOffsetDistance);
|
||||
_viewFrustumOffsetCamera.update(1.f/_fps);
|
||||
whichCamera = _viewFrustumOffsetCamera;
|
||||
}
|
||||
|
||||
// transform by eye offset
|
||||
|
||||
glm::vec3 eyeOffsetPos = whichCamera.getEyeOffsetPosition();
|
||||
glm::quat eyeOffsetOrient = whichCamera.getEyeOffsetOrientation();
|
||||
glm::vec3 eyeOffsetAxis = glm::axis(eyeOffsetOrient);
|
||||
glRotatef(-glm::angle(eyeOffsetOrient), eyeOffsetAxis.x, eyeOffsetAxis.y, eyeOffsetAxis.z);
|
||||
glTranslatef(-eyeOffsetPos.x, -eyeOffsetPos.y, -eyeOffsetPos.z);
|
||||
|
||||
// transform view according to whichCamera
|
||||
// could be myCamera (if in normal mode)
|
||||
// or could be viewFrustumOffsetCamera if in offset mode
|
||||
// I changed the ordering here - roll is FIRST (JJV)
|
||||
|
||||
glRotatef ( whichCamera.getRoll(), IDENTITY_FRONT.x, IDENTITY_FRONT.y, IDENTITY_FRONT.z);
|
||||
glRotatef ( whichCamera.getPitch(), IDENTITY_RIGHT.x, IDENTITY_RIGHT.y, IDENTITY_RIGHT.z);
|
||||
glRotatef (180.0 - whichCamera.getYaw(), IDENTITY_UP.x, IDENTITY_UP.y, IDENTITY_UP.z );
|
||||
|
||||
glTranslatef(-whichCamera.getPosition().x, -whichCamera.getPosition().y, -whichCamera.getPosition().z);
|
||||
|
||||
// Setup 3D lights (after the camera transform, so that they are positioned in world space)
|
||||
glEnable(GL_COLOR_MATERIAL);
|
||||
glColorMaterial(GL_FRONT_AND_BACK, GL_AMBIENT_AND_DIFFUSE);
|
||||
|
||||
glm::vec3 relativeSunLoc = glm::normalize(_environment.getClosestData(whichCamera.getPosition()).getSunLocation() -
|
||||
whichCamera.getPosition());
|
||||
GLfloat light_position0[] = { relativeSunLoc.x, relativeSunLoc.y, relativeSunLoc.z, 0.0 };
|
||||
glLightfv(GL_LIGHT0, GL_POSITION, light_position0);
|
||||
GLfloat ambient_color[] = { 0.7, 0.7, 0.8 };
|
||||
glLightfv(GL_LIGHT0, GL_AMBIENT, ambient_color);
|
||||
GLfloat diffuse_color[] = { 0.8, 0.7, 0.7 };
|
||||
glLightfv(GL_LIGHT0, GL_DIFFUSE, diffuse_color);
|
||||
GLfloat specular_color[] = { 1.0, 1.0, 1.0, 1.0};
|
||||
glLightfv(GL_LIGHT0, GL_SPECULAR, specular_color);
|
||||
|
||||
glMaterialfv(GL_FRONT, GL_SPECULAR, specular_color);
|
||||
glMateriali(GL_FRONT, GL_SHININESS, 96);
|
||||
|
||||
if (_oculusOn->isChecked()) {
|
||||
displayOculus(whichCamera);
|
||||
|
||||
} else {
|
||||
displaySide(whichCamera);
|
||||
glPopMatrix();
|
||||
|
||||
displayOverlay();
|
||||
if (_myCamera.getMode() == CAMERA_MODE_MIRROR) {
|
||||
_myCamera.setTightness (100.0f);
|
||||
_myCamera.setTargetPosition(_myAvatar.getSpringyHeadPosition());
|
||||
_myCamera.setTargetRotation(_myAvatar.getBodyYaw() - 180.0f,
|
||||
0.0f,
|
||||
0.0f);
|
||||
} else if (OculusManager::isConnected()) {
|
||||
_myCamera.setUpShift (0.0f);
|
||||
_myCamera.setDistance (0.0f);
|
||||
_myCamera.setTightness (100.0f);
|
||||
_myCamera.setTargetPosition(_myAvatar.getHeadPosition());
|
||||
_myCamera.setTargetRotation(_myAvatar.getAbsoluteHeadYaw(),
|
||||
_myAvatar.getHead().getPitch(),
|
||||
-_myAvatar.getHead().getRoll());
|
||||
} else if (_myCamera.getMode() == CAMERA_MODE_FIRST_PERSON) {
|
||||
_myCamera.setTargetPosition(_myAvatar.getSpringyHeadPosition());
|
||||
_myCamera.setTargetRotation(_myAvatar.getAbsoluteHeadYaw(),
|
||||
_myAvatar.getAbsoluteHeadPitch(),
|
||||
0.0f);
|
||||
// Take a look at whether we are inside head, don't render it if so.
|
||||
const float HEAD_RENDER_DISTANCE = 0.5;
|
||||
glm::vec3 distanceToHead(_myCamera.getPosition() - _myAvatar.getSpringyHeadPosition());
|
||||
|
||||
if (glm::length(distanceToHead) < HEAD_RENDER_DISTANCE) {
|
||||
}
|
||||
} else if (_myCamera.getMode() == CAMERA_MODE_THIRD_PERSON) {
|
||||
_myCamera.setTargetPosition(_myAvatar.getHeadPosition());
|
||||
_myCamera.setTargetRotation(_myAvatar.getAbsoluteHeadYaw(),
|
||||
_myAvatar.getAbsoluteHeadPitch(),
|
||||
0.0f);
|
||||
}
|
||||
|
||||
// important...
|
||||
_myCamera.update( 1.f/_fps );
|
||||
|
||||
// Render anything (like HUD items) that we want to be in 3D but not in worldspace
|
||||
/*
|
||||
const float HUD_Z_OFFSET = -5.f;
|
||||
glPushMatrix();
|
||||
glm::vec3 test(0.5, 0.5, 0.5);
|
||||
glTranslatef(1, 1, HUD_Z_OFFSET);
|
||||
drawVector(&test);
|
||||
glPopMatrix();
|
||||
*/
|
||||
|
||||
|
||||
// Note: whichCamera is used to pick between the normal camera myCamera for our
|
||||
// main camera, vs, an alternate camera. The alternate camera we support right now
|
||||
// is the viewFrustumOffsetCamera. But theoretically, we could use this same mechanism
|
||||
// to add other cameras.
|
||||
//
|
||||
// Why have two cameras? Well, one reason is that because in the case of the renderViewFrustum()
|
||||
// code, we want to keep the state of "myCamera" intact, so we can render what the view frustum of
|
||||
// myCamera is. But we also want to do meaningful camera transforms on OpenGL for the offset camera
|
||||
Camera whichCamera = _myCamera;
|
||||
|
||||
if (_viewFrustumFromOffset->isChecked() && _frustumOn->isChecked()) {
|
||||
|
||||
// set the camera to third-person view but offset so we can see the frustum
|
||||
_viewFrustumOffsetCamera.setTargetYaw(_viewFrustumOffsetYaw + _myAvatar.getBodyYaw());
|
||||
_viewFrustumOffsetCamera.setPitch (_viewFrustumOffsetPitch );
|
||||
_viewFrustumOffsetCamera.setRoll (_viewFrustumOffsetRoll );
|
||||
_viewFrustumOffsetCamera.setUpShift (_viewFrustumOffsetUp );
|
||||
_viewFrustumOffsetCamera.setDistance (_viewFrustumOffsetDistance);
|
||||
_viewFrustumOffsetCamera.update(1.f/_fps);
|
||||
whichCamera = _viewFrustumOffsetCamera;
|
||||
}
|
||||
|
||||
if (_oculusOn->isChecked()) {
|
||||
displayOculus(whichCamera);
|
||||
|
||||
} else {
|
||||
glMatrixMode(GL_MODELVIEW);
|
||||
glPushMatrix();
|
||||
glLoadIdentity();
|
||||
displaySide(whichCamera);
|
||||
glPopMatrix();
|
||||
|
||||
displayOverlay();
|
||||
}
|
||||
|
||||
_frameCount++;
|
||||
}
|
||||
|
@ -1042,6 +1002,7 @@ void Application::setNoise(bool noise) {
|
|||
void Application::setFullscreen(bool fullscreen) {
|
||||
_window->setWindowState(fullscreen ? (_window->windowState() | Qt::WindowFullScreen) :
|
||||
(_window->windowState() & ~Qt::WindowFullScreen));
|
||||
updateCursor();
|
||||
}
|
||||
|
||||
void Application::setRenderFirstPerson(bool firstPerson) {
|
||||
|
@ -1050,6 +1011,7 @@ void Application::setRenderFirstPerson(bool firstPerson) {
|
|||
|
||||
void Application::setOculus(bool oculus) {
|
||||
resizeGL(_glWidget->width(), _glWidget->height());
|
||||
updateCursor();
|
||||
}
|
||||
|
||||
void Application::setFrustumOffset(bool frustumOffset) {
|
||||
|
@ -1209,7 +1171,6 @@ void Application::initMenu() {
|
|||
_renderVoxels->setChecked(true);
|
||||
_renderVoxels->setShortcut(Qt::Key_V);
|
||||
(_renderVoxelTextures = renderMenu->addAction("Voxel Textures"))->setCheckable(true);
|
||||
_renderVoxelTextures->setChecked(true);
|
||||
(_renderStarsOn = renderMenu->addAction("Stars"))->setCheckable(true);
|
||||
_renderStarsOn->setChecked(true);
|
||||
_renderStarsOn->setShortcut(Qt::Key_Asterisk);
|
||||
|
@ -1532,10 +1493,13 @@ void Application::displayOculus(Camera& whichCamera) {
|
|||
glTranslatef(0.151976, 0, 0); // +h, see Oculus SDK docs p. 26
|
||||
gluPerspective(whichCamera.getFieldOfView(), whichCamera.getAspectRatio(),
|
||||
whichCamera.getNearClip(), whichCamera.getFarClip());
|
||||
|
||||
glViewport(0, 0, _glWidget->width() / 2, _glWidget->height());
|
||||
glMatrixMode(GL_MODELVIEW);
|
||||
glPushMatrix();
|
||||
glLoadIdentity();
|
||||
glTranslatef(0.032, 0, 0); // dip/2, see p. 27
|
||||
|
||||
glMatrixMode(GL_MODELVIEW);
|
||||
glViewport(0, 0, _glWidget->width() / 2, _glWidget->height());
|
||||
displaySide(whichCamera);
|
||||
|
||||
// and the right eye to the right side
|
||||
|
@ -1544,10 +1508,12 @@ void Application::displayOculus(Camera& whichCamera) {
|
|||
glTranslatef(-0.151976, 0, 0); // -h
|
||||
gluPerspective(whichCamera.getFieldOfView(), whichCamera.getAspectRatio(),
|
||||
whichCamera.getNearClip(), whichCamera.getFarClip());
|
||||
|
||||
glViewport(_glWidget->width() / 2, 0, _glWidget->width() / 2, _glWidget->height());
|
||||
glMatrixMode(GL_MODELVIEW);
|
||||
glLoadIdentity();
|
||||
glTranslatef(-0.032, 0, 0);
|
||||
|
||||
glMatrixMode(GL_MODELVIEW);
|
||||
glViewport(_glWidget->width() / 2, 0, _glWidget->width() / 2, _glWidget->height());
|
||||
displaySide(whichCamera);
|
||||
|
||||
glPopMatrix();
|
||||
|
@ -1633,7 +1599,42 @@ void Application::displayOculus(Camera& whichCamera) {
|
|||
}
|
||||
|
||||
void Application::displaySide(Camera& whichCamera) {
|
||||
glPushMatrix();
|
||||
// transform by eye offset
|
||||
|
||||
glm::vec3 eyeOffsetPos = whichCamera.getEyeOffsetPosition();
|
||||
glm::quat eyeOffsetOrient = whichCamera.getEyeOffsetOrientation();
|
||||
glm::vec3 eyeOffsetAxis = glm::axis(eyeOffsetOrient);
|
||||
glRotatef(-glm::angle(eyeOffsetOrient), eyeOffsetAxis.x, eyeOffsetAxis.y, eyeOffsetAxis.z);
|
||||
glTranslatef(-eyeOffsetPos.x, -eyeOffsetPos.y, -eyeOffsetPos.z);
|
||||
|
||||
// transform view according to whichCamera
|
||||
// could be myCamera (if in normal mode)
|
||||
// or could be viewFrustumOffsetCamera if in offset mode
|
||||
// I changed the ordering here - roll is FIRST (JJV)
|
||||
|
||||
glRotatef ( whichCamera.getRoll(), IDENTITY_FRONT.x, IDENTITY_FRONT.y, IDENTITY_FRONT.z);
|
||||
glRotatef ( whichCamera.getPitch(), IDENTITY_RIGHT.x, IDENTITY_RIGHT.y, IDENTITY_RIGHT.z);
|
||||
glRotatef (180.0 - whichCamera.getYaw(), IDENTITY_UP.x, IDENTITY_UP.y, IDENTITY_UP.z );
|
||||
|
||||
glTranslatef(-whichCamera.getPosition().x, -whichCamera.getPosition().y, -whichCamera.getPosition().z);
|
||||
|
||||
// Setup 3D lights (after the camera transform, so that they are positioned in world space)
|
||||
glEnable(GL_COLOR_MATERIAL);
|
||||
glColorMaterial(GL_FRONT_AND_BACK, GL_AMBIENT_AND_DIFFUSE);
|
||||
|
||||
glm::vec3 relativeSunLoc = glm::normalize(_environment.getClosestData(whichCamera.getPosition()).getSunLocation() -
|
||||
whichCamera.getPosition());
|
||||
GLfloat light_position0[] = { relativeSunLoc.x, relativeSunLoc.y, relativeSunLoc.z, 0.0 };
|
||||
glLightfv(GL_LIGHT0, GL_POSITION, light_position0);
|
||||
GLfloat ambient_color[] = { 0.7, 0.7, 0.8 };
|
||||
glLightfv(GL_LIGHT0, GL_AMBIENT, ambient_color);
|
||||
GLfloat diffuse_color[] = { 0.8, 0.7, 0.7 };
|
||||
glLightfv(GL_LIGHT0, GL_DIFFUSE, diffuse_color);
|
||||
GLfloat specular_color[] = { 1.0, 1.0, 1.0, 1.0};
|
||||
glLightfv(GL_LIGHT0, GL_SPECULAR, specular_color);
|
||||
|
||||
glMaterialfv(GL_FRONT, GL_SPECULAR, specular_color);
|
||||
glMateriali(GL_FRONT, GL_SHININESS, 96);
|
||||
|
||||
if (_renderStarsOn->isChecked()) {
|
||||
// should be the first rendering pass - w/o depth buffer / lighting
|
||||
|
@ -1722,8 +1723,6 @@ void Application::displaySide(Camera& whichCamera) {
|
|||
|
||||
// brad's frustum for debugging
|
||||
if (_frustumOn->isChecked()) renderViewFrustum(_viewFrustum);
|
||||
|
||||
glPopMatrix();
|
||||
}
|
||||
|
||||
void Application::displayOverlay() {
|
||||
|
@ -2083,6 +2082,11 @@ void Application::setMenuShortcutsEnabled(bool enabled) {
|
|||
setShortcutsEnabled(_window->menuBar(), enabled);
|
||||
}
|
||||
|
||||
void Application::updateCursor() {
|
||||
_glWidget->setCursor(_oculusOn->isChecked() && _window->windowState().testFlag(Qt::WindowFullScreen) ?
|
||||
Qt::BlankCursor : Qt::ArrowCursor);
|
||||
}
|
||||
|
||||
// when QActionGroup is set to non-exclusive, it doesn't return anything as checked;
|
||||
// hence, we must check ourselves
|
||||
QAction* Application::checkedVoxelModeAction() const {
|
||||
|
|
|
@ -128,6 +128,8 @@ private:
|
|||
|
||||
void setMenuShortcutsEnabled(bool enabled);
|
||||
|
||||
void updateCursor();
|
||||
|
||||
QAction* checkedVoxelModeAction() const;
|
||||
|
||||
static void attachNewHeadToAgent(Agent *newAgent);
|
||||
|
|
|
@ -116,19 +116,6 @@ namespace starfield {
|
|||
|
||||
float halfPersp = perspective * 0.5f;
|
||||
|
||||
// define diagonal and near distance
|
||||
float halfDiag = std::sin(halfPersp);
|
||||
float nearClip = std::cos(halfPersp);
|
||||
|
||||
// determine half dimensions based on the screen diagonal
|
||||
//
|
||||
// ww + hh = dd
|
||||
// a = w / h => w = ha
|
||||
// hh + hh aa = dd
|
||||
// hh = dd / (1 + aa)
|
||||
float hh = sqrt(halfDiag * halfDiag / (1.0f + aspect * aspect));
|
||||
float hw = hh * aspect;
|
||||
|
||||
// cancel all translation
|
||||
mat4 matrix = orientation;
|
||||
matrix[3][0] = 0.0f;
|
||||
|
@ -149,15 +136,14 @@ namespace starfield {
|
|||
#endif
|
||||
|
||||
#if STARFIELD_DEBUG_CULLING
|
||||
mat4 matrix_debug = glm::translate(glm::frustum(-hw, hw, -hh, hh, nearClip, 10.0f),
|
||||
vec3(0.0f, 0.0f, -4.0f)) *
|
||||
mat4 matrix_debug = glm::translate(vec3(0.0f, 0.0f, -4.0f)) *
|
||||
glm::affineInverse(matrix);
|
||||
#endif
|
||||
|
||||
matrix = glm::frustum(-hw,hw, -hh,hh, nearClip,10.0f) * glm::affineInverse(matrix);
|
||||
matrix = glm::affineInverse(matrix);
|
||||
|
||||
this->_outIndexPos = (unsigned*) _batchOffs;
|
||||
this->_wRowVec = vec3(row(matrix, 3));
|
||||
this->_wRowVec = -vec3(row(matrix, 2));
|
||||
this->_halfPerspectiveAngle = halfPersp;
|
||||
this->_minBright = minBright;
|
||||
|
||||
|
@ -498,13 +484,7 @@ namespace starfield {
|
|||
glDisable(GL_DEPTH_TEST);
|
||||
glDisable(GL_LIGHTING);
|
||||
|
||||
// setup modelview matrix (identity)
|
||||
glMatrixMode(GL_MODELVIEW);
|
||||
glPushMatrix();
|
||||
glLoadIdentity();
|
||||
|
||||
// set projection matrix
|
||||
glMatrixMode(GL_PROJECTION);
|
||||
// setup modelview matrix
|
||||
glPushMatrix();
|
||||
glLoadMatrixf(matrix);
|
||||
|
||||
|
@ -529,8 +509,7 @@ namespace starfield {
|
|||
_program.release();
|
||||
glDisable(GL_VERTEX_PROGRAM_POINT_SIZE);
|
||||
glDisable(GL_POINT_SMOOTH);
|
||||
glPopMatrix();
|
||||
glMatrixMode(GL_MODELVIEW);
|
||||
|
||||
glPopMatrix();
|
||||
}
|
||||
|
||||
|
|
|
@ -27,12 +27,10 @@ ViewFrustum::ViewFrustum() :
|
|||
_aspectRatio(1.0),
|
||||
_nearClip(0.1),
|
||||
_farClip(500.0),
|
||||
_farCenter(glm::vec3(0,0,0)),
|
||||
_farTopLeft(glm::vec3(0,0,0)),
|
||||
_farTopRight(glm::vec3(0,0,0)),
|
||||
_farBottomLeft(glm::vec3(0,0,0)),
|
||||
_farBottomRight(glm::vec3(0,0,0)),
|
||||
_nearCenter(glm::vec3(0,0,0)),
|
||||
_nearTopLeft(glm::vec3(0,0,0)),
|
||||
_nearTopRight(glm::vec3(0,0,0)),
|
||||
_nearBottomLeft(glm::vec3(0,0,0)),
|
||||
|
@ -123,8 +121,6 @@ void ViewFrustum::dump() const {
|
|||
printLog("eyeOffsetOrientation=%f,%f,%f,%f\n", _eyeOffsetOrientation.x, _eyeOffsetOrientation.y,
|
||||
_eyeOffsetOrientation.z, _eyeOffsetOrientation.w);
|
||||
|
||||
printLog("farCenter.x=%f, farCenter.y=%f, farCenter.z=%f\n",
|
||||
_farCenter.x, _farCenter.y, _farCenter.z);
|
||||
printLog("farTopLeft.x=%f, farTopLeft.y=%f, farTopLeft.z=%f\n",
|
||||
_farTopLeft.x, _farTopLeft.y, _farTopLeft.z);
|
||||
printLog("farTopRight.x=%f, farTopRight.y=%f, farTopRight.z=%f\n",
|
||||
|
@ -134,8 +130,6 @@ void ViewFrustum::dump() const {
|
|||
printLog("farBottomRight.x=%f, farBottomRight.y=%f, farBottomRight.z=%f\n",
|
||||
_farBottomRight.x, _farBottomRight.y, _farBottomRight.z);
|
||||
|
||||
printLog("nearCenter.x=%f, nearCenter.y=%f, nearCenter.z=%f\n",
|
||||
_nearCenter.x, _nearCenter.y, _nearCenter.z);
|
||||
printLog("nearTopLeft.x=%f, nearTopLeft.y=%f, nearTopLeft.z=%f\n",
|
||||
_nearTopLeft.x, _nearTopLeft.y, _nearTopLeft.z);
|
||||
printLog("nearTopRight.x=%f, nearTopRight.y=%f, nearTopRight.z=%f\n",
|
||||
|
|
|
@ -38,12 +38,10 @@ private:
|
|||
glm::vec3 _offsetDirection;
|
||||
glm::vec3 _offsetUp;
|
||||
glm::vec3 _offsetRight;
|
||||
glm::vec3 _farCenter;
|
||||
glm::vec3 _farTopLeft;
|
||||
glm::vec3 _farTopRight;
|
||||
glm::vec3 _farBottomLeft;
|
||||
glm::vec3 _farBottomRight;
|
||||
glm::vec3 _nearCenter;
|
||||
glm::vec3 _nearTopLeft;
|
||||
glm::vec3 _nearTopRight;
|
||||
glm::vec3 _nearBottomLeft;
|
||||
|
@ -87,13 +85,11 @@ public:
|
|||
const glm::vec3& getOffsetUp() const { return _offsetUp; };
|
||||
const glm::vec3& getOffsetRight() const { return _offsetRight; };
|
||||
|
||||
const glm::vec3& getFarCenter() const { return _farCenter; };
|
||||
const glm::vec3& getFarTopLeft() const { return _farTopLeft; };
|
||||
const glm::vec3& getFarTopRight() const { return _farTopRight; };
|
||||
const glm::vec3& getFarBottomLeft() const { return _farBottomLeft; };
|
||||
const glm::vec3& getFarBottomRight() const { return _farBottomRight; };
|
||||
|
||||
const glm::vec3& getNearCenter() const { return _nearCenter; };
|
||||
const glm::vec3& getNearTopLeft() const { return _nearTopLeft; };
|
||||
const glm::vec3& getNearTopRight() const { return _nearTopRight; };
|
||||
const glm::vec3& getNearBottomLeft() const { return _nearBottomLeft; };
|
||||
|
|
Loading…
Reference in a new issue