Merge pull request from PhilipRosedale/master

Fix bug with first person camera being laggy when flying
This commit is contained in:
Stephen Birarda 2014-08-12 14:42:07 -07:00
commit 1b5c229dd6
5 changed files with 48 additions and 12 deletions

View file

@ -599,7 +599,7 @@ void Application::paintGL() {
if (_myCamera.getMode() == CAMERA_MODE_FIRST_PERSON) {
_myCamera.setTightness(0.0f); // In first person, camera follows (untweaked) head exactly without delay
_myCamera.setTargetPosition(_myAvatar->getHead()->getFilteredEyePosition());
_myCamera.setTargetPosition(_myAvatar->getHead()->getEyePosition());
_myCamera.setTargetRotation(_myAvatar->getHead()->getCameraOrientation());
} else if (_myCamera.getMode() == CAMERA_MODE_THIRD_PERSON) {
@ -621,7 +621,7 @@ void Application::paintGL() {
_myCamera.setTargetPosition(_myAvatar->getHead()->getEyePosition() + glm::vec3(0, _raiseMirror * _myAvatar->getScale(), 0));
} else {
_myCamera.setTightness(0.0f);
glm::vec3 eyePosition = _myAvatar->getHead()->getFilteredEyePosition();
glm::vec3 eyePosition = _myAvatar->getHead()->getEyePosition();
float headHeight = eyePosition.y - _myAvatar->getPosition().y;
_myCamera.setDistance(MIRROR_FULLSCREEN_DISTANCE * _scaleMirror);
_myCamera.setTargetPosition(_myAvatar->getPosition() + glm::vec3(0, headHeight + (_raiseMirror * _myAvatar->getScale()), 0));

View file

@ -163,9 +163,6 @@ void Head::simulate(float deltaTime, bool isMine, bool billboard) {
}
_eyePosition = calculateAverageEyePosition();
float velocityFilter = glm::clamp(1.0f - glm::length(_filteredEyePosition - _eyePosition), 0.0f, 1.0f);
_filteredEyePosition = velocityFilter * _filteredEyePosition + (1.0f - velocityFilter) * _eyePosition;
}
void Head::relaxLean(float deltaTime) {

View file

@ -88,7 +88,6 @@ public:
const bool getReturnToCenter() const { return _returnHeadToCenter; } // Do you want head to try to return to center (depends on interface detected)
float getAverageLoudness() const { return _averageLoudness; }
glm::vec3 getFilteredEyePosition() const { return _filteredEyePosition; }
/// \return the point about which scaling occurs.
glm::vec3 getScalePivot() const;
@ -121,7 +120,6 @@ private:
glm::vec3 _leftEyePosition;
glm::vec3 _rightEyePosition;
glm::vec3 _eyePosition;
glm::vec3 _filteredEyePosition; // velocity filtered world space eye position
float _scale;
float _lastLoudness;

View file

@ -178,6 +178,31 @@ float rescaleCoef(float ddeCoef) {
return (ddeCoef - DDE_MIN_RANGE) / (DDE_MAX_RANGE - DDE_MIN_RANGE);
}
const int MIN = 0;
const int AVG = 1;
const int MAX = 2;
const float LONG_TERM_AVERAGE = 0.999f;
void resetCoefficient(float * coefficient, float currentValue) {
coefficient[MIN] = coefficient[MAX] = coefficient[AVG] = currentValue;
}
float updateAndGetCoefficient(float * coefficient, float currentValue, bool scaleToRange = false) {
coefficient[MIN] = (currentValue < coefficient[MIN]) ? currentValue : coefficient[MIN];
coefficient[MAX] = (currentValue > coefficient[MAX]) ? currentValue : coefficient[MAX];
coefficient[AVG] = LONG_TERM_AVERAGE * coefficient[AVG] + (1.f - LONG_TERM_AVERAGE) * currentValue;
if (coefficient[MAX] > coefficient[MIN]) {
if (scaleToRange) {
return glm::clamp((currentValue - coefficient[AVG]) / (coefficient[MAX] - coefficient[MIN]), 0.f, 1.f);
} else {
return glm::clamp(currentValue - coefficient[AVG], 0.f, 1.f);
}
} else {
return 0.f;
}
}
void DdeFaceTracker::decodePacket(const QByteArray& buffer) {
if(buffer.size() > MIN_PACKET_SIZE) {
Packet packet;
@ -189,14 +214,17 @@ void DdeFaceTracker::decodePacket(const QByteArray& buffer) {
memcpy(&translation, packet.translation, sizeof(packet.translation));
glm::quat rotation;
memcpy(&rotation, &packet.rotation, sizeof(packet.rotation));
if (_reset) {
if (_reset || (_lastReceiveTimestamp == 0)) {
memcpy(&_referenceTranslation, &translation, sizeof(glm::vec3));
memcpy(&_referenceRotation, &rotation, sizeof(glm::quat));
resetCoefficient(_rightEye, packet.expressions[0]);
resetCoefficient(_leftEye, packet.expressions[1]);
_reset = false;
}
// Compute relative translation
float LEAN_DAMPING_FACTOR = 40;
float LEAN_DAMPING_FACTOR = 200.0f;
translation -= _referenceTranslation;
translation /= LEAN_DAMPING_FACTOR;
translation.x *= -1;
@ -208,10 +236,19 @@ void DdeFaceTracker::decodePacket(const QByteArray& buffer) {
_headTranslation = translation;
_headRotation = rotation;
if (_lastReceiveTimestamp == 0) {
// On first packet, reset coefficients
}
// Set blendshapes
float BLINK_MAGNIFIER = 2.0f;
_blendshapeCoefficients[_leftBlinkIndex] = rescaleCoef(packet.expressions[1]) * BLINK_MAGNIFIER;
_blendshapeCoefficients[_rightBlinkIndex] = rescaleCoef(packet.expressions[0]) * BLINK_MAGNIFIER;
float EYE_MAGNIFIER = 4.0f;
float rightEye = (updateAndGetCoefficient(_rightEye, packet.expressions[0])) * EYE_MAGNIFIER;
_blendshapeCoefficients[_rightBlinkIndex] = rightEye;
float leftEye = (updateAndGetCoefficient(_leftEye, packet.expressions[1])) * EYE_MAGNIFIER;
_blendshapeCoefficients[_leftBlinkIndex] = leftEye;
// Right eye = packet.expressions[0];
float leftBrow = 1.0f - rescaleCoef(packet.expressions[14]);
if (leftBrow < 0.5f) {

View file

@ -84,6 +84,10 @@ private:
int _mouthSmileRightIndex;
int _jawOpenIndex;
float _leftEye[3];
float _rightEye[3];
};
#endif // hifi_DdeFaceTracker_h