From 99bd0f3d1341ce4bdee1049912d44b567796d92c Mon Sep 17 00:00:00 2001 From: "Anthony J. Thibault" Date: Mon, 29 Feb 2016 16:49:35 -0800 Subject: [PATCH 1/5] MyAvatar: fixes for eye look at Users in desktop mode should now see the eyes change focus between the left eye, right eye and the mouth. Users in mirror mode, or third person camera, should more accurately determine which avatar to look at. --- interface/src/avatar/MyAvatar.cpp | 40 ++++++++++++------------------- 1 file changed, 15 insertions(+), 25 deletions(-) diff --git a/interface/src/avatar/MyAvatar.cpp b/interface/src/avatar/MyAvatar.cpp index fc715eebe9..1b04fa4fa4 100644 --- a/interface/src/avatar/MyAvatar.cpp +++ b/interface/src/avatar/MyAvatar.cpp @@ -849,7 +849,7 @@ void MyAvatar::updateLookAtTargetAvatar() { avatar->setIsLookAtTarget(false); if (!avatar->isMyAvatar() && avatar->isInitialized() && (distanceTo < GREATEST_LOOKING_AT_DISTANCE * getUniformScale())) { - float angleTo = glm::angle(lookForward, glm::normalize(avatar->getHead()->getEyePosition() - cameraPosition)); + float angleTo = glm::angle(lookForward, glm::normalize(avatar->getHead()->getEyePosition() - getHead()->getEyePosition())); if (angleTo < (smallestAngleTo * (isCurrentTarget ? KEEP_LOOKING_AT_CURRENT_ANGLE_FACTOR : 1.0f))) { _lookAtTargetAvatar = avatarPointer; _targetAvatarPosition = avatarPointer->getPosition(); @@ -864,36 +864,26 @@ void MyAvatar::updateLookAtTargetAvatar() { // Let's get everything to world space: glm::vec3 avatarLeftEye = getHead()->getLeftEyePosition(); glm::vec3 avatarRightEye = getHead()->getRightEyePosition(); - // When not in HMD, these might both answer identity (i.e., the bridge of the nose). That's ok. - // By my inpsection of the code and live testing, getEyeOffset and getEyePose are the same. (Application hands identity as offset matrix.) - // This might be more work than needed for any given use, but as we explore different formulations, we go mad if we don't work in world space. - glm::mat4 leftEye = qApp->getEyeOffset(Eye::Left); - glm::mat4 rightEye = qApp->getEyeOffset(Eye::Right); - glm::vec3 leftEyeHeadLocal = glm::vec3(leftEye[3]); - glm::vec3 rightEyeHeadLocal = glm::vec3(rightEye[3]); - auto humanSystem = qApp->getViewFrustum(); - glm::vec3 humanLeftEye = humanSystem->getPosition() + (humanSystem->getOrientation() * leftEyeHeadLocal); - glm::vec3 humanRightEye = humanSystem->getPosition() + (humanSystem->getOrientation() * rightEyeHeadLocal); // First find out where (in world space) the person is looking relative to that bridge-of-the-avatar point. // (We will be adding that offset to the camera position, after making some other adjustments.) glm::vec3 gazeOffset = lookAtPosition - getHead()->getEyePosition(); - // Scale by proportional differences between avatar and human. - float humanEyeSeparationInModelSpace = glm::length(humanLeftEye - humanRightEye); - float avatarEyeSeparation = glm::length(avatarLeftEye - avatarRightEye); - gazeOffset = gazeOffset * humanEyeSeparationInModelSpace / avatarEyeSeparation; + // scale gazeOffset by IPD, if wearing an HMD. + if (qApp->isHMDMode()) { + glm::mat4 leftEye = qApp->getEyeOffset(Eye::Left); + glm::mat4 rightEye = qApp->getEyeOffset(Eye::Right); + glm::vec3 leftEyeHeadLocal = glm::vec3(leftEye[3]); + glm::vec3 rightEyeHeadLocal = glm::vec3(rightEye[3]); + auto humanSystem = qApp->getViewFrustum(); + glm::vec3 humanLeftEye = humanSystem->getPosition() + (humanSystem->getOrientation() * leftEyeHeadLocal); + glm::vec3 humanRightEye = humanSystem->getPosition() + (humanSystem->getOrientation() * rightEyeHeadLocal); - // If the camera is also not oriented with the head, adjust by getting the offset in head-space... - /* Not needed (i.e., code is a no-op), but I'm leaving the example code here in case something like this is needed someday. - glm::quat avatarHeadOrientation = getHead()->getOrientation(); - glm::vec3 gazeOffsetLocalToHead = glm::inverse(avatarHeadOrientation) * gazeOffset; - // ... and treat that as though it were in camera space, bringing it back to world space. - // But camera is fudged to make the picture feel like the avatar's orientation. - glm::quat humanOrientation = humanSystem->getOrientation(); // or just avatar getOrienation() ? - gazeOffset = humanOrientation * gazeOffsetLocalToHead; - glm::vec3 corrected = humanSystem->getPosition() + gazeOffset; - */ + // Scale by proportional differences between avatar and human. + float humanEyeSeparationInModelSpace = glm::length(humanLeftEye - humanRightEye); + float avatarEyeSeparation = glm::length(avatarLeftEye - avatarRightEye); + gazeOffset = gazeOffset * humanEyeSeparationInModelSpace / avatarEyeSeparation; + } // And now we can finally add that offset to the camera. glm::vec3 corrected = qApp->getViewFrustum()->getPosition() + gazeOffset; From 460582239a804a2a4dc069e30e7ebf5a50217537 Mon Sep 17 00:00:00 2001 From: "Anthony J. Thibault" Date: Mon, 29 Feb 2016 18:02:50 -0800 Subject: [PATCH 2/5] Avatar eye look at fixes for HMDs There were three things that were causing issues with eye look at vectors while wearing an HMD. 1) The matrix returned by AvatarUpdate->getHeadPose() was in the wrong space, it should be in avatar space. it was actually returning a matrix in sensor/room space. 2) The lookAtPosition was incorrect while wearing an HMD and with no avatars to look at. 3) The eye rotation limits in Rig::updateEyeJoint were relative to the model's zero orientation, NOT relative to the head. this was causing the eyes to hit limits when the avatar head turned. --- interface/src/Application.cpp | 4 ++-- interface/src/avatar/AvatarUpdate.cpp | 9 ++++++++- libraries/animation/src/Rig.cpp | 13 ++++++++++--- 3 files changed, 20 insertions(+), 6 deletions(-) diff --git a/interface/src/Application.cpp b/interface/src/Application.cpp index 21377fa945..586e7ee8e5 100644 --- a/interface/src/Application.cpp +++ b/interface/src/Application.cpp @@ -2946,9 +2946,9 @@ void Application::updateMyAvatarLookAtPosition() { } else { // I am not looking at anyone else, so just look forward if (isHMD) { - glm::mat4 headPose = _avatarUpdate->getHeadPose() ; + glm::mat4 headPose = _avatarUpdate->getHeadPose(); glm::quat headRotation = glm::quat_cast(headPose); - lookAtSpot = _myCamera.getPosition() + + lookAtSpot = myAvatar->getPosition() + myAvatar->getOrientation() * (headRotation * glm::vec3(0.0f, 0.0f, -TREE_SCALE)); } else { lookAtSpot = myAvatar->getHead()->getEyePosition() + diff --git a/interface/src/avatar/AvatarUpdate.cpp b/interface/src/avatar/AvatarUpdate.cpp index 68a13ba227..4881e3eaec 100644 --- a/interface/src/avatar/AvatarUpdate.cpp +++ b/interface/src/avatar/AvatarUpdate.cpp @@ -31,7 +31,14 @@ void AvatarUpdate::synchronousProcess() { // Keep our own updated value, so that our asynchronous code can consult it. _isHMDMode = qApp->isHMDMode(); auto frameCount = qApp->getFrameCount(); - _headPose = qApp->getActiveDisplayPlugin()->getHeadPose(frameCount); + + QSharedPointer manager = DependencyManager::get(); + MyAvatar* myAvatar = manager->getMyAvatar(); + assert(myAvatar); + + // transform the head pose from the displayPlugin into avatar coordinates. + glm::mat4 invAvatarMat = glm::inverse(createMatFromQuatAndPos(myAvatar->getOrientation(), myAvatar->getPosition())); + _headPose = invAvatarMat * (myAvatar->getSensorToWorldMatrix() * qApp->getActiveDisplayPlugin()->getHeadPose(frameCount)); if (!isThreaded()) { process(); diff --git a/libraries/animation/src/Rig.cpp b/libraries/animation/src/Rig.cpp index 3952dc5b40..7bab4dfc1d 100644 --- a/libraries/animation/src/Rig.cpp +++ b/libraries/animation/src/Rig.cpp @@ -1067,14 +1067,21 @@ void Rig::updateEyeJoint(int index, const glm::vec3& modelTranslation, const glm glm::mat4 rigToWorld = createMatFromQuatAndPos(modelRotation, modelTranslation); glm::mat4 worldToRig = glm::inverse(rigToWorld); glm::vec3 zAxis = glm::normalize(_internalPoseSet._absolutePoses[index].trans - transformPoint(worldToRig, lookAtSpot)); - glm::quat q = rotationBetween(IDENTITY_FRONT, zAxis); + + glm::quat desiredQuat = rotationBetween(IDENTITY_FRONT, zAxis); + glm::quat headQuat; + int headIndex = _animSkeleton->nameToJointIndex("Head"); + if (headIndex >= 0) { + headQuat = _internalPoseSet._absolutePoses[headIndex].rot; + } + glm::quat deltaQuat = desiredQuat * glm::inverse(headQuat); // limit rotation const float MAX_ANGLE = 30.0f * RADIANS_PER_DEGREE; - q = glm::angleAxis(glm::clamp(glm::angle(q), -MAX_ANGLE, MAX_ANGLE), glm::axis(q)); + deltaQuat = glm::angleAxis(glm::clamp(glm::angle(deltaQuat), -MAX_ANGLE, MAX_ANGLE), glm::axis(deltaQuat)); // directly set absolutePose rotation - _internalPoseSet._absolutePoses[index].rot = q; + _internalPoseSet._absolutePoses[index].rot = deltaQuat * headQuat; } } From 3cde9721743da428f4dfc25aaeb34f1dde3d9e5e Mon Sep 17 00:00:00 2001 From: "Anthony J. Thibault" Date: Tue, 1 Mar 2016 11:07:22 -0800 Subject: [PATCH 3/5] Rig: issue warnings for missing joints Also, Removed Rig::computeEyesInRigFrame, it was causing warnings because it was looking up Eye and Head joints for all models, not just avatars. --- interface/src/avatar/MyAvatar.cpp | 20 ----------- libraries/animation/src/Rig.cpp | 60 ++++++++++++++----------------- libraries/animation/src/Rig.h | 4 +-- 3 files changed, 29 insertions(+), 55 deletions(-) diff --git a/interface/src/avatar/MyAvatar.cpp b/interface/src/avatar/MyAvatar.cpp index 1b04fa4fa4..db4b900a3c 100644 --- a/interface/src/avatar/MyAvatar.cpp +++ b/interface/src/avatar/MyAvatar.cpp @@ -1766,25 +1766,6 @@ glm::quat MyAvatar::getWorldBodyOrientation() const { return glm::quat_cast(_sensorToWorldMatrix * _bodySensorMatrix); } -#if 0 -// derive avatar body position and orientation from the current HMD Sensor location. -// results are in sensor space -glm::mat4 MyAvatar::deriveBodyFromHMDSensor() const { - if (_rig) { - // orientation - const glm::quat hmdOrientation = getHMDSensorOrientation(); - const glm::quat yaw = cancelOutRollAndPitch(hmdOrientation); - // position - // we flip about yAxis when going from "root" to "avatar" frame - // and we must also apply "yaw" to get into HMD frame - glm::quat rotY180 = glm::angleAxis((float)M_PI, glm::vec3(0.0f, 1.0f, 0.0f)); - glm::vec3 eyesInAvatarFrame = rotY180 * yaw * _rig->getEyesInRootFrame(); - glm::vec3 bodyPos = getHMDSensorPosition() - eyesInAvatarFrame; - return createMatFromQuatAndPos(yaw, bodyPos); - } - return glm::mat4(); -} -#else // old school meat hook style glm::mat4 MyAvatar::deriveBodyFromHMDSensor() const { @@ -1825,7 +1806,6 @@ glm::mat4 MyAvatar::deriveBodyFromHMDSensor() const { return createMatFromQuatAndPos(hmdOrientationYawOnly, bodyPos); } -#endif glm::vec3 MyAvatar::getPositionForAudio() { switch (_audioListenerMode) { diff --git a/libraries/animation/src/Rig.cpp b/libraries/animation/src/Rig.cpp index 7bab4dfc1d..02968e3665 100644 --- a/libraries/animation/src/Rig.cpp +++ b/libraries/animation/src/Rig.cpp @@ -173,8 +173,6 @@ void Rig::initJointStates(const FBXGeometry& geometry, const glm::mat4& modelOff _animSkeleton = std::make_shared(geometry); - computeEyesInRootFrame(_animSkeleton->getRelativeDefaultPoses()); - _internalPoseSet._relativePoses.clear(); _internalPoseSet._relativePoses = _animSkeleton->getRelativeDefaultPoses(); @@ -201,8 +199,6 @@ void Rig::reset(const FBXGeometry& geometry) { _geometryOffset = AnimPose(geometry.offset); _animSkeleton = std::make_shared(geometry); - computeEyesInRootFrame(_animSkeleton->getRelativeDefaultPoses()); - _internalPoseSet._relativePoses.clear(); _internalPoseSet._relativePoses = _animSkeleton->getRelativeDefaultPoses(); @@ -237,10 +233,20 @@ int Rig::getJointStateCount() const { return (int)_internalPoseSet._relativePoses.size(); } +static const uint32_t MAX_JOINT_NAME_WARNING_COUNT = 100; + int Rig::indexOfJoint(const QString& jointName) const { if (_animSkeleton) { - return _animSkeleton->nameToJointIndex(jointName); + int result = _animSkeleton->nameToJointIndex(jointName); + + // This is a content error, so we should issue a warning. + if (result < 0 && _jointNameWarningCount < MAX_JOINT_NAME_WARNING_COUNT) { + qCWarning(animation) << "Rig: Missing joint" << jointName << "in avatar model"; + _jointNameWarningCount++; + } + return result; } else { + // This is normal and can happen when the avatar model has not been dowloaded/loaded yet. return -1; } } @@ -444,26 +450,6 @@ void Rig::calcAnimAlpha(float speed, const std::vector& referenceSpeeds, *alphaOut = alpha; } -void Rig::computeEyesInRootFrame(const AnimPoseVec& poses) { - // TODO: use cached eye/hips indices for these calculations - int numPoses = (int)poses.size(); - int hipsIndex = _animSkeleton->nameToJointIndex(QString("Hips")); - int headIndex = _animSkeleton->nameToJointIndex(QString("Head")); - if (hipsIndex > 0 && headIndex > 0) { - int rightEyeIndex = _animSkeleton->nameToJointIndex(QString("RightEye")); - int leftEyeIndex = _animSkeleton->nameToJointIndex(QString("LeftEye")); - if (numPoses > rightEyeIndex && numPoses > leftEyeIndex && rightEyeIndex > 0 && leftEyeIndex > 0) { - glm::vec3 rightEye = _animSkeleton->getAbsolutePose(rightEyeIndex, poses).trans; - glm::vec3 leftEye = _animSkeleton->getAbsolutePose(leftEyeIndex, poses).trans; - glm::vec3 hips = _animSkeleton->getAbsolutePose(hipsIndex, poses).trans; - _eyesInRootFrame = 0.5f * (rightEye + leftEye) - hips; - } else { - glm::vec3 hips = _animSkeleton->getAbsolutePose(hipsIndex, poses).trans; - _eyesInRootFrame = 0.5f * (DEFAULT_RIGHT_EYE_POS + DEFAULT_LEFT_EYE_POS) - hips; - } - } -} - void Rig::setEnableInverseKinematics(bool enable) { _enableInverseKinematics = enable; } @@ -893,8 +879,6 @@ void Rig::updateAnimations(float deltaTime, glm::mat4 rootTransform) { for (auto& trigger : triggersOut) { _animVars.setTrigger(trigger); } - - computeEyesInRootFrame(_internalPoseSet._relativePoses); } applyOverridePoses(); @@ -1070,7 +1054,7 @@ void Rig::updateEyeJoint(int index, const glm::vec3& modelTranslation, const glm glm::quat desiredQuat = rotationBetween(IDENTITY_FRONT, zAxis); glm::quat headQuat; - int headIndex = _animSkeleton->nameToJointIndex("Head"); + int headIndex = indexOfJoint("Head"); if (headIndex >= 0) { headQuat = _internalPoseSet._absolutePoses[headIndex].rot; } @@ -1093,7 +1077,11 @@ void Rig::updateFromHandParameters(const HandParameters& params, float dt) { const float MIN_LENGTH = 1.0e-4f; // project the hips onto the xz plane. - auto hipsTrans = _internalPoseSet._absolutePoses[_animSkeleton->nameToJointIndex("Hips")].trans; + int hipsIndex = indexOfJoint("Hips"); + glm::vec3 hipsTrans; + if (hipsIndex >= 0) { + hipsTrans = _internalPoseSet._absolutePoses[hipsIndex].trans; + } const glm::vec2 bodyCircleCenter(hipsTrans.x, hipsTrans.z); if (params.isLeftEnabled) { @@ -1278,7 +1266,11 @@ void Rig::computeAvatarBoundingCapsule( AnimPose geometryToRig = _modelOffset * _geometryOffset; - AnimPose hips = geometryToRig * _animSkeleton->getAbsoluteBindPose(_animSkeleton->nameToJointIndex("Hips")); + AnimPose hips(glm::vec3(1), glm::quat(), glm::vec3()); + int hipsIndex = indexOfJoint("Hips"); + if (hipsIndex >= 0) { + hips = geometryToRig * _animSkeleton->getAbsoluteBindPose(hipsIndex); + } AnimVariantMap animVars; glm::quat handRotation = glm::angleAxis(PI, Vectors::UNIT_X); animVars.set("leftHandPosition", hips.trans); @@ -1288,8 +1280,8 @@ void Rig::computeAvatarBoundingCapsule( animVars.set("rightHandRotation", handRotation); animVars.set("rightHandType", (int)IKTarget::Type::RotationAndPosition); - int rightFootIndex = _animSkeleton->nameToJointIndex("RightFoot"); - int leftFootIndex = _animSkeleton->nameToJointIndex("LeftFoot"); + int rightFootIndex = indexOfJoint("RightFoot"); + int leftFootIndex = indexOfJoint("LeftFoot"); if (rightFootIndex != -1 && leftFootIndex != -1) { glm::vec3 foot = Vectors::ZERO; glm::quat footRotation = glm::angleAxis(0.5f * PI, Vectors::UNIT_X); @@ -1321,7 +1313,7 @@ void Rig::computeAvatarBoundingCapsule( // HACK to reduce the radius of the bounding capsule to be tight with the torso, we only consider joints // from the head to the hips when computing the rest of the bounding capsule. - int index = _animSkeleton->nameToJointIndex(QString("Head")); + int index = indexOfJoint("Head"); while (index != -1) { const FBXJointShapeInfo& shapeInfo = geometry.joints.at(index).shapeInfo; AnimPose pose = finalPoses[index]; @@ -1344,3 +1336,5 @@ void Rig::computeAvatarBoundingCapsule( glm::vec3 rigCenter = (geometryToRig * (0.5f * (totalExtents.maximum + totalExtents.minimum))); localOffsetOut = rigCenter - (geometryToRig * rootPosition); } + + diff --git a/libraries/animation/src/Rig.h b/libraries/animation/src/Rig.h index 9c5b014d55..3d5d44b844 100644 --- a/libraries/animation/src/Rig.h +++ b/libraries/animation/src/Rig.h @@ -231,8 +231,6 @@ public: void updateEyeJoint(int index, const glm::vec3& modelTranslation, const glm::quat& modelRotation, const glm::quat& worldHeadOrientation, const glm::vec3& lookAt, const glm::vec3& saccade); void calcAnimAlpha(float speed, const std::vector& referenceSpeeds, float* alphaOut) const; - void computeEyesInRootFrame(const AnimPoseVec& poses); - AnimPose _modelOffset; // model to rig space AnimPose _geometryOffset; // geometry to model space (includes unit offset & fst offsets) @@ -305,6 +303,8 @@ public: bool _lastEnableInverseKinematics { true }; bool _enableInverseKinematics { true }; + mutable uint32_t _jointNameWarningCount { 0 }; + private: QMap _stateHandlers; int _nextStateHandlerId { 0 }; From babf48ac5756131734cf6c2bcc0cae4a2a660f00 Mon Sep 17 00:00:00 2001 From: "Anthony J. Thibault" Date: Tue, 1 Mar 2016 11:17:03 -0800 Subject: [PATCH 4/5] MyAvatar: take IPD scale into account when computing corrected eye lookAt --- interface/src/avatar/MyAvatar.cpp | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/interface/src/avatar/MyAvatar.cpp b/interface/src/avatar/MyAvatar.cpp index db4b900a3c..2f6f99ba90 100644 --- a/interface/src/avatar/MyAvatar.cpp +++ b/interface/src/avatar/MyAvatar.cpp @@ -879,8 +879,11 @@ void MyAvatar::updateLookAtTargetAvatar() { glm::vec3 humanLeftEye = humanSystem->getPosition() + (humanSystem->getOrientation() * leftEyeHeadLocal); glm::vec3 humanRightEye = humanSystem->getPosition() + (humanSystem->getOrientation() * rightEyeHeadLocal); + auto hmdInterface = DependencyManager::get(); + float ipdScale = hmdInterface->getIPDScale(); + // Scale by proportional differences between avatar and human. - float humanEyeSeparationInModelSpace = glm::length(humanLeftEye - humanRightEye); + float humanEyeSeparationInModelSpace = glm::length(humanLeftEye - humanRightEye) * ipdScale; float avatarEyeSeparation = glm::length(avatarLeftEye - avatarRightEye); gazeOffset = gazeOffset * humanEyeSeparationInModelSpace / avatarEyeSeparation; } From 506e46faeeaa3f7d16dc9f6eaffe4c5c116b3a85 Mon Sep 17 00:00:00 2001 From: "Anthony J. Thibault" Date: Tue, 1 Mar 2016 11:21:14 -0800 Subject: [PATCH 5/5] MyAvatar: fix for missing include --- interface/src/avatar/MyAvatar.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/interface/src/avatar/MyAvatar.cpp b/interface/src/avatar/MyAvatar.cpp index 2f6f99ba90..46bc128605 100644 --- a/interface/src/avatar/MyAvatar.cpp +++ b/interface/src/avatar/MyAvatar.cpp @@ -19,6 +19,7 @@ #include +#include #include #include #include