Reduce lock time.

This commit is contained in:
Howard Stearns 2015-10-28 10:49:19 -07:00
parent 9e223a9502
commit 367175b8a6
2 changed files with 58 additions and 59 deletions

View file

@ -355,9 +355,6 @@ AvatarSharedPointer AvatarManager::getAvatarBySessionID(const QUuid& sessionID)
if (sessionID == _myAvatar->getSessionUUID()) {
return std::static_pointer_cast<Avatar>(_myAvatar);
}
AvatarSharedPointer avatar;
withAvatarHash([&avatar, &sessionID] (const AvatarHash& hash) {
avatar = hash[sessionID];
});
return avatar;
QReadLocker locker(&_hashLock);
return _avatarHash[sessionID];
}

View file

@ -1018,72 +1018,74 @@ void MyAvatar::updateLookAtTargetAvatar() {
const float KEEP_LOOKING_AT_CURRENT_ANGLE_FACTOR = 1.3f;
const float GREATEST_LOOKING_AT_DISTANCE = 10.0f;
DependencyManager::get<AvatarManager>()->withAvatarHash([&] (const AvatarHash& hash) {
foreach (const AvatarSharedPointer& avatarPointer, hash) {
auto avatar = static_pointer_cast<Avatar>(avatarPointer);
bool isCurrentTarget = avatar->getIsLookAtTarget();
float distanceTo = glm::length(avatar->getHead()->getEyePosition() - cameraPosition);
avatar->setIsLookAtTarget(false);
if (!avatar->isMyAvatar() && avatar->isInitialized() && (distanceTo < GREATEST_LOOKING_AT_DISTANCE * getScale())) {
float angleTo = glm::angle(lookForward, glm::normalize(avatar->getHead()->getEyePosition() - cameraPosition));
if (angleTo < (smallestAngleTo * (isCurrentTarget ? KEEP_LOOKING_AT_CURRENT_ANGLE_FACTOR : 1.0f))) {
_lookAtTargetAvatar = avatarPointer;
_targetAvatarPosition = avatarPointer->getPosition();
smallestAngleTo = angleTo;
}
if (isLookingAtMe(avatar)) {
AvatarHash hash;
DependencyManager::get<AvatarManager>()->withAvatarHash([&] (const AvatarHash& locked) {
hash = locked; // make a shallow copy and operate on that, to minimize lock time
});
foreach (const AvatarSharedPointer& avatarPointer, hash) {
auto avatar = static_pointer_cast<Avatar>(avatarPointer);
bool isCurrentTarget = avatar->getIsLookAtTarget();
float distanceTo = glm::length(avatar->getHead()->getEyePosition() - cameraPosition);
avatar->setIsLookAtTarget(false);
if (!avatar->isMyAvatar() && avatar->isInitialized() && (distanceTo < GREATEST_LOOKING_AT_DISTANCE * getScale())) {
float angleTo = glm::angle(lookForward, glm::normalize(avatar->getHead()->getEyePosition() - cameraPosition));
if (angleTo < (smallestAngleTo * (isCurrentTarget ? KEEP_LOOKING_AT_CURRENT_ANGLE_FACTOR : 1.0f))) {
_lookAtTargetAvatar = avatarPointer;
_targetAvatarPosition = avatarPointer->getPosition();
smallestAngleTo = angleTo;
}
if (isLookingAtMe(avatar)) {
// Alter their gaze to look directly at my camera; this looks more natural than looking at my avatar's face.
glm::vec3 lookAtPosition = avatar->getHead()->getLookAtPosition(); // A position, in world space, on my avatar.
// Alter their gaze to look directly at my camera; this looks more natural than looking at my avatar's face.
glm::vec3 lookAtPosition = avatar->getHead()->getLookAtPosition(); // A position, in world space, on my avatar.
// The camera isn't at the point midway between the avatar eyes. (Even without an HMD, the head can be offset a bit.)
// Let's get everything to world space:
glm::vec3 avatarLeftEye = getHead()->getLeftEyePosition();
glm::vec3 avatarRightEye = getHead()->getRightEyePosition();
// When not in HMD, these might both answer identity (i.e., the bridge of the nose). That's ok.
// By my inpsection of the code and live testing, getEyeOffset and getEyePose are the same. (Application hands identity as offset matrix.)
// This might be more work than needed for any given use, but as we explore different formulations, we go mad if we don't work in world space.
glm::mat4 leftEye = qApp->getEyeOffset(Eye::Left);
glm::mat4 rightEye = qApp->getEyeOffset(Eye::Right);
glm::vec3 leftEyeHeadLocal = glm::vec3(leftEye[3]);
glm::vec3 rightEyeHeadLocal = glm::vec3(rightEye[3]);
auto humanSystem = qApp->getViewFrustum();
glm::vec3 humanLeftEye = humanSystem->getPosition() + (humanSystem->getOrientation() * leftEyeHeadLocal);
glm::vec3 humanRightEye = humanSystem->getPosition() + (humanSystem->getOrientation() * rightEyeHeadLocal);
// The camera isn't at the point midway between the avatar eyes. (Even without an HMD, the head can be offset a bit.)
// Let's get everything to world space:
glm::vec3 avatarLeftEye = getHead()->getLeftEyePosition();
glm::vec3 avatarRightEye = getHead()->getRightEyePosition();
// When not in HMD, these might both answer identity (i.e., the bridge of the nose). That's ok.
// By my inpsection of the code and live testing, getEyeOffset and getEyePose are the same. (Application hands identity as offset matrix.)
// This might be more work than needed for any given use, but as we explore different formulations, we go mad if we don't work in world space.
glm::mat4 leftEye = qApp->getEyeOffset(Eye::Left);
glm::mat4 rightEye = qApp->getEyeOffset(Eye::Right);
glm::vec3 leftEyeHeadLocal = glm::vec3(leftEye[3]);
glm::vec3 rightEyeHeadLocal = glm::vec3(rightEye[3]);
auto humanSystem = qApp->getViewFrustum();
glm::vec3 humanLeftEye = humanSystem->getPosition() + (humanSystem->getOrientation() * leftEyeHeadLocal);
glm::vec3 humanRightEye = humanSystem->getPosition() + (humanSystem->getOrientation() * rightEyeHeadLocal);
// First find out where (in world space) the person is looking relative to that bridge-of-the-avatar point.
// (We will be adding that offset to the camera position, after making some other adjustments.)
glm::vec3 gazeOffset = lookAtPosition - getHead()->getEyePosition();
// First find out where (in world space) the person is looking relative to that bridge-of-the-avatar point.
// (We will be adding that offset to the camera position, after making some other adjustments.)
glm::vec3 gazeOffset = lookAtPosition - getHead()->getEyePosition();
// Scale by proportional differences between avatar and human.
float humanEyeSeparationInModelSpace = glm::length(humanLeftEye - humanRightEye);
float avatarEyeSeparation = glm::length(avatarLeftEye - avatarRightEye);
gazeOffset = gazeOffset * humanEyeSeparationInModelSpace / avatarEyeSeparation;
// Scale by proportional differences between avatar and human.
float humanEyeSeparationInModelSpace = glm::length(humanLeftEye - humanRightEye);
float avatarEyeSeparation = glm::length(avatarLeftEye - avatarRightEye);
gazeOffset = gazeOffset * humanEyeSeparationInModelSpace / avatarEyeSeparation;
// If the camera is also not oriented with the head, adjust by getting the offset in head-space...
/* Not needed (i.e., code is a no-op), but I'm leaving the example code here in case something like this is needed someday.
glm::quat avatarHeadOrientation = getHead()->getOrientation();
glm::vec3 gazeOffsetLocalToHead = glm::inverse(avatarHeadOrientation) * gazeOffset;
// ... and treat that as though it were in camera space, bringing it back to world space.
// But camera is fudged to make the picture feel like the avatar's orientation.
glm::quat humanOrientation = humanSystem->getOrientation(); // or just avatar getOrienation() ?
gazeOffset = humanOrientation * gazeOffsetLocalToHead;
glm::vec3 corrected = humanSystem->getPosition() + gazeOffset;
*/
// If the camera is also not oriented with the head, adjust by getting the offset in head-space...
/* Not needed (i.e., code is a no-op), but I'm leaving the example code here in case something like this is needed someday.
glm::quat avatarHeadOrientation = getHead()->getOrientation();
glm::vec3 gazeOffsetLocalToHead = glm::inverse(avatarHeadOrientation) * gazeOffset;
// ... and treat that as though it were in camera space, bringing it back to world space.
// But camera is fudged to make the picture feel like the avatar's orientation.
glm::quat humanOrientation = humanSystem->getOrientation(); // or just avatar getOrienation() ?
gazeOffset = humanOrientation * gazeOffsetLocalToHead;
glm::vec3 corrected = humanSystem->getPosition() + gazeOffset;
*/
// And now we can finally add that offset to the camera.
glm::vec3 corrected = qApp->getViewFrustum()->getPosition() + gazeOffset;
// And now we can finally add that offset to the camera.
glm::vec3 corrected = qApp->getViewFrustum()->getPosition() + gazeOffset;
avatar->getHead()->setCorrectedLookAtPosition(corrected);
avatar->getHead()->setCorrectedLookAtPosition(corrected);
} else {
avatar->getHead()->clearCorrectedLookAtPosition();
}
} else {
avatar->getHead()->clearCorrectedLookAtPosition();
}
} else {
avatar->getHead()->clearCorrectedLookAtPosition();
}
});
}
auto avatarPointer = _lookAtTargetAvatar.lock();
if (avatarPointer) {
static_pointer_cast<Avatar>(avatarPointer)->setIsLookAtTarget(true);