Merge pull request #7227 from hyperlogic/tony/eye-look-at-fixes

Avatar eye look at fixes for HMD and desktop
This commit is contained in:
Chris Collins 2016-03-07 11:50:16 -08:00
commit 7850ca3576
5 changed files with 67 additions and 85 deletions

View file

@ -2944,9 +2944,9 @@ void Application::updateMyAvatarLookAtPosition() {
} else {
// I am not looking at anyone else, so just look forward
if (isHMD) {
glm::mat4 headPose = _avatarUpdate->getHeadPose() ;
glm::mat4 headPose = _avatarUpdate->getHeadPose();
glm::quat headRotation = glm::quat_cast(headPose);
lookAtSpot = _myCamera.getPosition() +
lookAtSpot = myAvatar->getPosition() +
myAvatar->getOrientation() * (headRotation * glm::vec3(0.0f, 0.0f, -TREE_SCALE));
} else {
lookAtSpot = myAvatar->getHead()->getEyePosition() +

View file

@ -31,7 +31,14 @@ void AvatarUpdate::synchronousProcess() {
// Keep our own updated value, so that our asynchronous code can consult it.
_isHMDMode = qApp->isHMDMode();
auto frameCount = qApp->getFrameCount();
_headPose = qApp->getActiveDisplayPlugin()->getHeadPose(frameCount);
QSharedPointer<AvatarManager> manager = DependencyManager::get<AvatarManager>();
MyAvatar* myAvatar = manager->getMyAvatar();
assert(myAvatar);
// transform the head pose from the displayPlugin into avatar coordinates.
glm::mat4 invAvatarMat = glm::inverse(createMatFromQuatAndPos(myAvatar->getOrientation(), myAvatar->getPosition()));
_headPose = invAvatarMat * (myAvatar->getSensorToWorldMatrix() * qApp->getActiveDisplayPlugin()->getHeadPose(frameCount));
if (!isThreaded()) {
process();

View file

@ -19,6 +19,7 @@
#include <QtCore/QTimer>
#include <scripting/HMDScriptingInterface.h>
#include <AccountManager.h>
#include <AddressManager.h>
#include <AudioClient.h>
@ -849,7 +850,7 @@ void MyAvatar::updateLookAtTargetAvatar() {
avatar->setIsLookAtTarget(false);
if (!avatar->isMyAvatar() && avatar->isInitialized() &&
(distanceTo < GREATEST_LOOKING_AT_DISTANCE * getUniformScale())) {
float angleTo = glm::angle(lookForward, glm::normalize(avatar->getHead()->getEyePosition() - cameraPosition));
float angleTo = glm::angle(lookForward, glm::normalize(avatar->getHead()->getEyePosition() - getHead()->getEyePosition()));
if (angleTo < (smallestAngleTo * (isCurrentTarget ? KEEP_LOOKING_AT_CURRENT_ANGLE_FACTOR : 1.0f))) {
_lookAtTargetAvatar = avatarPointer;
_targetAvatarPosition = avatarPointer->getPosition();
@ -864,36 +865,29 @@ void MyAvatar::updateLookAtTargetAvatar() {
// Let's get everything to world space:
glm::vec3 avatarLeftEye = getHead()->getLeftEyePosition();
glm::vec3 avatarRightEye = getHead()->getRightEyePosition();
// When not in HMD, these might both answer identity (i.e., the bridge of the nose). That's ok.
// By my inpsection of the code and live testing, getEyeOffset and getEyePose are the same. (Application hands identity as offset matrix.)
// This might be more work than needed for any given use, but as we explore different formulations, we go mad if we don't work in world space.
glm::mat4 leftEye = qApp->getEyeOffset(Eye::Left);
glm::mat4 rightEye = qApp->getEyeOffset(Eye::Right);
glm::vec3 leftEyeHeadLocal = glm::vec3(leftEye[3]);
glm::vec3 rightEyeHeadLocal = glm::vec3(rightEye[3]);
auto humanSystem = qApp->getViewFrustum();
glm::vec3 humanLeftEye = humanSystem->getPosition() + (humanSystem->getOrientation() * leftEyeHeadLocal);
glm::vec3 humanRightEye = humanSystem->getPosition() + (humanSystem->getOrientation() * rightEyeHeadLocal);
// First find out where (in world space) the person is looking relative to that bridge-of-the-avatar point.
// (We will be adding that offset to the camera position, after making some other adjustments.)
glm::vec3 gazeOffset = lookAtPosition - getHead()->getEyePosition();
// Scale by proportional differences between avatar and human.
float humanEyeSeparationInModelSpace = glm::length(humanLeftEye - humanRightEye);
float avatarEyeSeparation = glm::length(avatarLeftEye - avatarRightEye);
gazeOffset = gazeOffset * humanEyeSeparationInModelSpace / avatarEyeSeparation;
// scale gazeOffset by IPD, if wearing an HMD.
if (qApp->isHMDMode()) {
glm::mat4 leftEye = qApp->getEyeOffset(Eye::Left);
glm::mat4 rightEye = qApp->getEyeOffset(Eye::Right);
glm::vec3 leftEyeHeadLocal = glm::vec3(leftEye[3]);
glm::vec3 rightEyeHeadLocal = glm::vec3(rightEye[3]);
auto humanSystem = qApp->getViewFrustum();
glm::vec3 humanLeftEye = humanSystem->getPosition() + (humanSystem->getOrientation() * leftEyeHeadLocal);
glm::vec3 humanRightEye = humanSystem->getPosition() + (humanSystem->getOrientation() * rightEyeHeadLocal);
// If the camera is also not oriented with the head, adjust by getting the offset in head-space...
/* Not needed (i.e., code is a no-op), but I'm leaving the example code here in case something like this is needed someday.
glm::quat avatarHeadOrientation = getHead()->getOrientation();
glm::vec3 gazeOffsetLocalToHead = glm::inverse(avatarHeadOrientation) * gazeOffset;
// ... and treat that as though it were in camera space, bringing it back to world space.
// But camera is fudged to make the picture feel like the avatar's orientation.
glm::quat humanOrientation = humanSystem->getOrientation(); // or just avatar getOrienation() ?
gazeOffset = humanOrientation * gazeOffsetLocalToHead;
glm::vec3 corrected = humanSystem->getPosition() + gazeOffset;
*/
auto hmdInterface = DependencyManager::get<HMDScriptingInterface>();
float ipdScale = hmdInterface->getIPDScale();
// Scale by proportional differences between avatar and human.
float humanEyeSeparationInModelSpace = glm::length(humanLeftEye - humanRightEye) * ipdScale;
float avatarEyeSeparation = glm::length(avatarLeftEye - avatarRightEye);
gazeOffset = gazeOffset * humanEyeSeparationInModelSpace / avatarEyeSeparation;
}
// And now we can finally add that offset to the camera.
glm::vec3 corrected = qApp->getViewFrustum()->getPosition() + gazeOffset;
@ -1777,25 +1771,6 @@ glm::quat MyAvatar::getWorldBodyOrientation() const {
return glm::quat_cast(_sensorToWorldMatrix * _bodySensorMatrix);
}
#if 0
// derive avatar body position and orientation from the current HMD Sensor location.
// results are in sensor space
glm::mat4 MyAvatar::deriveBodyFromHMDSensor() const {
if (_rig) {
// orientation
const glm::quat hmdOrientation = getHMDSensorOrientation();
const glm::quat yaw = cancelOutRollAndPitch(hmdOrientation);
// position
// we flip about yAxis when going from "root" to "avatar" frame
// and we must also apply "yaw" to get into HMD frame
glm::quat rotY180 = glm::angleAxis((float)M_PI, glm::vec3(0.0f, 1.0f, 0.0f));
glm::vec3 eyesInAvatarFrame = rotY180 * yaw * _rig->getEyesInRootFrame();
glm::vec3 bodyPos = getHMDSensorPosition() - eyesInAvatarFrame;
return createMatFromQuatAndPos(yaw, bodyPos);
}
return glm::mat4();
}
#else
// old school meat hook style
glm::mat4 MyAvatar::deriveBodyFromHMDSensor() const {
@ -1836,7 +1811,6 @@ glm::mat4 MyAvatar::deriveBodyFromHMDSensor() const {
return createMatFromQuatAndPos(hmdOrientationYawOnly, bodyPos);
}
#endif
glm::vec3 MyAvatar::getPositionForAudio() {
switch (_audioListenerMode) {

View file

@ -173,8 +173,6 @@ void Rig::initJointStates(const FBXGeometry& geometry, const glm::mat4& modelOff
_animSkeleton = std::make_shared<AnimSkeleton>(geometry);
computeEyesInRootFrame(_animSkeleton->getRelativeDefaultPoses());
_internalPoseSet._relativePoses.clear();
_internalPoseSet._relativePoses = _animSkeleton->getRelativeDefaultPoses();
@ -201,8 +199,6 @@ void Rig::reset(const FBXGeometry& geometry) {
_geometryOffset = AnimPose(geometry.offset);
_animSkeleton = std::make_shared<AnimSkeleton>(geometry);
computeEyesInRootFrame(_animSkeleton->getRelativeDefaultPoses());
_internalPoseSet._relativePoses.clear();
_internalPoseSet._relativePoses = _animSkeleton->getRelativeDefaultPoses();
@ -237,10 +233,20 @@ int Rig::getJointStateCount() const {
return (int)_internalPoseSet._relativePoses.size();
}
static const uint32_t MAX_JOINT_NAME_WARNING_COUNT = 100;
int Rig::indexOfJoint(const QString& jointName) const {
if (_animSkeleton) {
return _animSkeleton->nameToJointIndex(jointName);
int result = _animSkeleton->nameToJointIndex(jointName);
// This is a content error, so we should issue a warning.
if (result < 0 && _jointNameWarningCount < MAX_JOINT_NAME_WARNING_COUNT) {
qCWarning(animation) << "Rig: Missing joint" << jointName << "in avatar model";
_jointNameWarningCount++;
}
return result;
} else {
// This is normal and can happen when the avatar model has not been dowloaded/loaded yet.
return -1;
}
}
@ -444,26 +450,6 @@ void Rig::calcAnimAlpha(float speed, const std::vector<float>& referenceSpeeds,
*alphaOut = alpha;
}
void Rig::computeEyesInRootFrame(const AnimPoseVec& poses) {
// TODO: use cached eye/hips indices for these calculations
int numPoses = (int)poses.size();
int hipsIndex = _animSkeleton->nameToJointIndex(QString("Hips"));
int headIndex = _animSkeleton->nameToJointIndex(QString("Head"));
if (hipsIndex > 0 && headIndex > 0) {
int rightEyeIndex = _animSkeleton->nameToJointIndex(QString("RightEye"));
int leftEyeIndex = _animSkeleton->nameToJointIndex(QString("LeftEye"));
if (numPoses > rightEyeIndex && numPoses > leftEyeIndex && rightEyeIndex > 0 && leftEyeIndex > 0) {
glm::vec3 rightEye = _animSkeleton->getAbsolutePose(rightEyeIndex, poses).trans;
glm::vec3 leftEye = _animSkeleton->getAbsolutePose(leftEyeIndex, poses).trans;
glm::vec3 hips = _animSkeleton->getAbsolutePose(hipsIndex, poses).trans;
_eyesInRootFrame = 0.5f * (rightEye + leftEye) - hips;
} else {
glm::vec3 hips = _animSkeleton->getAbsolutePose(hipsIndex, poses).trans;
_eyesInRootFrame = 0.5f * (DEFAULT_RIGHT_EYE_POS + DEFAULT_LEFT_EYE_POS) - hips;
}
}
}
void Rig::setEnableInverseKinematics(bool enable) {
_enableInverseKinematics = enable;
}
@ -893,8 +879,6 @@ void Rig::updateAnimations(float deltaTime, glm::mat4 rootTransform) {
for (auto& trigger : triggersOut) {
_animVars.setTrigger(trigger);
}
computeEyesInRootFrame(_internalPoseSet._relativePoses);
}
applyOverridePoses();
@ -1067,14 +1051,21 @@ void Rig::updateEyeJoint(int index, const glm::vec3& modelTranslation, const glm
glm::mat4 rigToWorld = createMatFromQuatAndPos(modelRotation, modelTranslation);
glm::mat4 worldToRig = glm::inverse(rigToWorld);
glm::vec3 zAxis = glm::normalize(_internalPoseSet._absolutePoses[index].trans - transformPoint(worldToRig, lookAtSpot));
glm::quat q = rotationBetween(IDENTITY_FRONT, zAxis);
glm::quat desiredQuat = rotationBetween(IDENTITY_FRONT, zAxis);
glm::quat headQuat;
int headIndex = indexOfJoint("Head");
if (headIndex >= 0) {
headQuat = _internalPoseSet._absolutePoses[headIndex].rot;
}
glm::quat deltaQuat = desiredQuat * glm::inverse(headQuat);
// limit rotation
const float MAX_ANGLE = 30.0f * RADIANS_PER_DEGREE;
q = glm::angleAxis(glm::clamp(glm::angle(q), -MAX_ANGLE, MAX_ANGLE), glm::axis(q));
deltaQuat = glm::angleAxis(glm::clamp(glm::angle(deltaQuat), -MAX_ANGLE, MAX_ANGLE), glm::axis(deltaQuat));
// directly set absolutePose rotation
_internalPoseSet._absolutePoses[index].rot = q;
_internalPoseSet._absolutePoses[index].rot = deltaQuat * headQuat;
}
}
@ -1086,7 +1077,11 @@ void Rig::updateFromHandParameters(const HandParameters& params, float dt) {
const float MIN_LENGTH = 1.0e-4f;
// project the hips onto the xz plane.
auto hipsTrans = _internalPoseSet._absolutePoses[_animSkeleton->nameToJointIndex("Hips")].trans;
int hipsIndex = indexOfJoint("Hips");
glm::vec3 hipsTrans;
if (hipsIndex >= 0) {
hipsTrans = _internalPoseSet._absolutePoses[hipsIndex].trans;
}
const glm::vec2 bodyCircleCenter(hipsTrans.x, hipsTrans.z);
if (params.isLeftEnabled) {
@ -1271,7 +1266,11 @@ void Rig::computeAvatarBoundingCapsule(
AnimPose geometryToRig = _modelOffset * _geometryOffset;
AnimPose hips = geometryToRig * _animSkeleton->getAbsoluteBindPose(_animSkeleton->nameToJointIndex("Hips"));
AnimPose hips(glm::vec3(1), glm::quat(), glm::vec3());
int hipsIndex = indexOfJoint("Hips");
if (hipsIndex >= 0) {
hips = geometryToRig * _animSkeleton->getAbsoluteBindPose(hipsIndex);
}
AnimVariantMap animVars;
glm::quat handRotation = glm::angleAxis(PI, Vectors::UNIT_X);
animVars.set("leftHandPosition", hips.trans);
@ -1281,8 +1280,8 @@ void Rig::computeAvatarBoundingCapsule(
animVars.set("rightHandRotation", handRotation);
animVars.set("rightHandType", (int)IKTarget::Type::RotationAndPosition);
int rightFootIndex = _animSkeleton->nameToJointIndex("RightFoot");
int leftFootIndex = _animSkeleton->nameToJointIndex("LeftFoot");
int rightFootIndex = indexOfJoint("RightFoot");
int leftFootIndex = indexOfJoint("LeftFoot");
if (rightFootIndex != -1 && leftFootIndex != -1) {
glm::vec3 foot = Vectors::ZERO;
glm::quat footRotation = glm::angleAxis(0.5f * PI, Vectors::UNIT_X);
@ -1314,7 +1313,7 @@ void Rig::computeAvatarBoundingCapsule(
// HACK to reduce the radius of the bounding capsule to be tight with the torso, we only consider joints
// from the head to the hips when computing the rest of the bounding capsule.
int index = _animSkeleton->nameToJointIndex(QString("Head"));
int index = indexOfJoint("Head");
while (index != -1) {
const FBXJointShapeInfo& shapeInfo = geometry.joints.at(index).shapeInfo;
AnimPose pose = finalPoses[index];
@ -1337,3 +1336,5 @@ void Rig::computeAvatarBoundingCapsule(
glm::vec3 rigCenter = (geometryToRig * (0.5f * (totalExtents.maximum + totalExtents.minimum)));
localOffsetOut = rigCenter - (geometryToRig * rootPosition);
}

View file

@ -231,8 +231,6 @@ public:
void updateEyeJoint(int index, const glm::vec3& modelTranslation, const glm::quat& modelRotation, const glm::quat& worldHeadOrientation, const glm::vec3& lookAt, const glm::vec3& saccade);
void calcAnimAlpha(float speed, const std::vector<float>& referenceSpeeds, float* alphaOut) const;
void computeEyesInRootFrame(const AnimPoseVec& poses);
AnimPose _modelOffset; // model to rig space
AnimPose _geometryOffset; // geometry to model space (includes unit offset & fst offsets)
@ -305,6 +303,8 @@ public:
bool _lastEnableInverseKinematics { true };
bool _enableInverseKinematics { true };
mutable uint32_t _jointNameWarningCount { 0 };
private:
QMap<int, StateHandler> _stateHandlers;
int _nextStateHandlerId { 0 };