mirror of
https://github.com/overte-org/overte.git
synced 2025-08-09 02:16:51 +02:00
getAvatarHash => withAvatarHash
This commit is contained in:
parent
c2a4aab7c3
commit
7b0b77f4d1
4 changed files with 65 additions and 55 deletions
|
@ -355,5 +355,9 @@ AvatarSharedPointer AvatarManager::getAvatarBySessionID(const QUuid& sessionID)
|
||||||
if (sessionID == _myAvatar->getSessionUUID()) {
|
if (sessionID == _myAvatar->getSessionUUID()) {
|
||||||
return std::static_pointer_cast<Avatar>(_myAvatar);
|
return std::static_pointer_cast<Avatar>(_myAvatar);
|
||||||
}
|
}
|
||||||
return getAvatarHash()[sessionID];
|
AvatarSharedPointer avatar;
|
||||||
|
withAvatarHash([&avatar, &sessionID] (const AvatarHash& hash) {
|
||||||
|
avatar = hash[sessionID];
|
||||||
|
});
|
||||||
|
return avatar;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1018,71 +1018,72 @@ void MyAvatar::updateLookAtTargetAvatar() {
|
||||||
const float KEEP_LOOKING_AT_CURRENT_ANGLE_FACTOR = 1.3f;
|
const float KEEP_LOOKING_AT_CURRENT_ANGLE_FACTOR = 1.3f;
|
||||||
const float GREATEST_LOOKING_AT_DISTANCE = 10.0f;
|
const float GREATEST_LOOKING_AT_DISTANCE = 10.0f;
|
||||||
|
|
||||||
foreach (const AvatarSharedPointer& avatarPointer, DependencyManager::get<AvatarManager>()->getAvatarHash()) {
|
DependencyManager::get<AvatarManager>()->withAvatarHash([&] (const AvatarHash& hash) {
|
||||||
auto avatar = static_pointer_cast<Avatar>(avatarPointer);
|
foreach (const AvatarSharedPointer& avatarPointer, hash) {
|
||||||
bool isCurrentTarget = avatar->getIsLookAtTarget();
|
auto avatar = static_pointer_cast<Avatar>(avatarPointer);
|
||||||
float distanceTo = glm::length(avatar->getHead()->getEyePosition() - cameraPosition);
|
bool isCurrentTarget = avatar->getIsLookAtTarget();
|
||||||
avatar->setIsLookAtTarget(false);
|
float distanceTo = glm::length(avatar->getHead()->getEyePosition() - cameraPosition);
|
||||||
if (!avatar->isMyAvatar() && avatar->isInitialized() && (distanceTo < GREATEST_LOOKING_AT_DISTANCE * getScale())) {
|
avatar->setIsLookAtTarget(false);
|
||||||
float angleTo = glm::angle(lookForward, glm::normalize(avatar->getHead()->getEyePosition() - cameraPosition));
|
if (!avatar->isMyAvatar() && avatar->isInitialized() && (distanceTo < GREATEST_LOOKING_AT_DISTANCE * getScale())) {
|
||||||
if (angleTo < (smallestAngleTo * (isCurrentTarget ? KEEP_LOOKING_AT_CURRENT_ANGLE_FACTOR : 1.0f))) {
|
float angleTo = glm::angle(lookForward, glm::normalize(avatar->getHead()->getEyePosition() - cameraPosition));
|
||||||
_lookAtTargetAvatar = avatarPointer;
|
if (angleTo < (smallestAngleTo * (isCurrentTarget ? KEEP_LOOKING_AT_CURRENT_ANGLE_FACTOR : 1.0f))) {
|
||||||
_targetAvatarPosition = avatarPointer->getPosition();
|
_lookAtTargetAvatar = avatarPointer;
|
||||||
smallestAngleTo = angleTo;
|
_targetAvatarPosition = avatarPointer->getPosition();
|
||||||
}
|
smallestAngleTo = angleTo;
|
||||||
if (isLookingAtMe(avatar)) {
|
}
|
||||||
|
if (isLookingAtMe(avatar)) {
|
||||||
|
|
||||||
// Alter their gaze to look directly at my camera; this looks more natural than looking at my avatar's face.
|
// Alter their gaze to look directly at my camera; this looks more natural than looking at my avatar's face.
|
||||||
glm::vec3 lookAtPosition = avatar->getHead()->getLookAtPosition(); // A position, in world space, on my avatar.
|
glm::vec3 lookAtPosition = avatar->getHead()->getLookAtPosition(); // A position, in world space, on my avatar.
|
||||||
|
|
||||||
// The camera isn't at the point midway between the avatar eyes. (Even without an HMD, the head can be offset a bit.)
|
// The camera isn't at the point midway between the avatar eyes. (Even without an HMD, the head can be offset a bit.)
|
||||||
// Let's get everything to world space:
|
// Let's get everything to world space:
|
||||||
glm::vec3 avatarLeftEye = getHead()->getLeftEyePosition();
|
glm::vec3 avatarLeftEye = getHead()->getLeftEyePosition();
|
||||||
glm::vec3 avatarRightEye = getHead()->getRightEyePosition();
|
glm::vec3 avatarRightEye = getHead()->getRightEyePosition();
|
||||||
// When not in HMD, these might both answer identity (i.e., the bridge of the nose). That's ok.
|
// When not in HMD, these might both answer identity (i.e., the bridge of the nose). That's ok.
|
||||||
// By my inpsection of the code and live testing, getEyeOffset and getEyePose are the same. (Application hands identity as offset matrix.)
|
// By my inpsection of the code and live testing, getEyeOffset and getEyePose are the same. (Application hands identity as offset matrix.)
|
||||||
// This might be more work than needed for any given use, but as we explore different formulations, we go mad if we don't work in world space.
|
// This might be more work than needed for any given use, but as we explore different formulations, we go mad if we don't work in world space.
|
||||||
glm::mat4 leftEye = qApp->getEyeOffset(Eye::Left);
|
glm::mat4 leftEye = qApp->getEyeOffset(Eye::Left);
|
||||||
glm::mat4 rightEye = qApp->getEyeOffset(Eye::Right);
|
glm::mat4 rightEye = qApp->getEyeOffset(Eye::Right);
|
||||||
glm::vec3 leftEyeHeadLocal = glm::vec3(leftEye[3]);
|
glm::vec3 leftEyeHeadLocal = glm::vec3(leftEye[3]);
|
||||||
glm::vec3 rightEyeHeadLocal = glm::vec3(rightEye[3]);
|
glm::vec3 rightEyeHeadLocal = glm::vec3(rightEye[3]);
|
||||||
auto humanSystem = qApp->getViewFrustum();
|
auto humanSystem = qApp->getViewFrustum();
|
||||||
glm::vec3 humanLeftEye = humanSystem->getPosition() + (humanSystem->getOrientation() * leftEyeHeadLocal);
|
glm::vec3 humanLeftEye = humanSystem->getPosition() + (humanSystem->getOrientation() * leftEyeHeadLocal);
|
||||||
glm::vec3 humanRightEye = humanSystem->getPosition() + (humanSystem->getOrientation() * rightEyeHeadLocal);
|
glm::vec3 humanRightEye = humanSystem->getPosition() + (humanSystem->getOrientation() * rightEyeHeadLocal);
|
||||||
|
|
||||||
|
// First find out where (in world space) the person is looking relative to that bridge-of-the-avatar point.
|
||||||
|
// (We will be adding that offset to the camera position, after making some other adjustments.)
|
||||||
|
glm::vec3 gazeOffset = lookAtPosition - getHead()->getEyePosition();
|
||||||
|
|
||||||
// First find out where (in world space) the person is looking relative to that bridge-of-the-avatar point.
|
// Scale by proportional differences between avatar and human.
|
||||||
// (We will be adding that offset to the camera position, after making some other adjustments.)
|
float humanEyeSeparationInModelSpace = glm::length(humanLeftEye - humanRightEye);
|
||||||
glm::vec3 gazeOffset = lookAtPosition - getHead()->getEyePosition();
|
float avatarEyeSeparation = glm::length(avatarLeftEye - avatarRightEye);
|
||||||
|
gazeOffset = gazeOffset * humanEyeSeparationInModelSpace / avatarEyeSeparation;
|
||||||
|
|
||||||
// Scale by proportional differences between avatar and human.
|
// If the camera is also not oriented with the head, adjust by getting the offset in head-space...
|
||||||
float humanEyeSeparationInModelSpace = glm::length(humanLeftEye - humanRightEye);
|
/* Not needed (i.e., code is a no-op), but I'm leaving the example code here in case something like this is needed someday.
|
||||||
float avatarEyeSeparation = glm::length(avatarLeftEye - avatarRightEye);
|
glm::quat avatarHeadOrientation = getHead()->getOrientation();
|
||||||
gazeOffset = gazeOffset * humanEyeSeparationInModelSpace / avatarEyeSeparation;
|
glm::vec3 gazeOffsetLocalToHead = glm::inverse(avatarHeadOrientation) * gazeOffset;
|
||||||
|
// ... and treat that as though it were in camera space, bringing it back to world space.
|
||||||
|
// But camera is fudged to make the picture feel like the avatar's orientation.
|
||||||
|
glm::quat humanOrientation = humanSystem->getOrientation(); // or just avatar getOrienation() ?
|
||||||
|
gazeOffset = humanOrientation * gazeOffsetLocalToHead;
|
||||||
|
glm::vec3 corrected = humanSystem->getPosition() + gazeOffset;
|
||||||
|
*/
|
||||||
|
|
||||||
// If the camera is also not oriented with the head, adjust by getting the offset in head-space...
|
// And now we can finally add that offset to the camera.
|
||||||
/* Not needed (i.e., code is a no-op), but I'm leaving the example code here in case something like this is needed someday.
|
glm::vec3 corrected = qApp->getViewFrustum()->getPosition() + gazeOffset;
|
||||||
glm::quat avatarHeadOrientation = getHead()->getOrientation();
|
|
||||||
glm::vec3 gazeOffsetLocalToHead = glm::inverse(avatarHeadOrientation) * gazeOffset;
|
|
||||||
// ... and treat that as though it were in camera space, bringing it back to world space.
|
|
||||||
// But camera is fudged to make the picture feel like the avatar's orientation.
|
|
||||||
glm::quat humanOrientation = humanSystem->getOrientation(); // or just avatar getOrienation() ?
|
|
||||||
gazeOffset = humanOrientation * gazeOffsetLocalToHead;
|
|
||||||
glm::vec3 corrected = humanSystem->getPosition() + gazeOffset;
|
|
||||||
*/
|
|
||||||
|
|
||||||
// And now we can finally add that offset to the camera.
|
avatar->getHead()->setCorrectedLookAtPosition(corrected);
|
||||||
glm::vec3 corrected = qApp->getViewFrustum()->getPosition() + gazeOffset;
|
|
||||||
|
|
||||||
avatar->getHead()->setCorrectedLookAtPosition(corrected);
|
|
||||||
|
|
||||||
|
} else {
|
||||||
|
avatar->getHead()->clearCorrectedLookAtPosition();
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
avatar->getHead()->clearCorrectedLookAtPosition();
|
avatar->getHead()->clearCorrectedLookAtPosition();
|
||||||
}
|
}
|
||||||
} else {
|
|
||||||
avatar->getHead()->clearCorrectedLookAtPosition();
|
|
||||||
}
|
}
|
||||||
}
|
});
|
||||||
auto avatarPointer = _lookAtTargetAvatar.lock();
|
auto avatarPointer = _lookAtTargetAvatar.lock();
|
||||||
if (avatarPointer) {
|
if (avatarPointer) {
|
||||||
static_pointer_cast<Avatar>(avatarPointer)->setIsLookAtTarget(true);
|
static_pointer_cast<Avatar>(avatarPointer)->setIsLookAtTarget(true);
|
||||||
|
|
|
@ -22,6 +22,10 @@ AvatarHashMap::AvatarHashMap() {
|
||||||
connect(DependencyManager::get<NodeList>().data(), &NodeList::uuidChanged, this, &AvatarHashMap::sessionUUIDChanged);
|
connect(DependencyManager::get<NodeList>().data(), &NodeList::uuidChanged, this, &AvatarHashMap::sessionUUIDChanged);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void AvatarHashMap::withAvatarHash(std::function<void(const AvatarHash& hash)> callback) {
|
||||||
|
QReadLocker locker(&_hashLock);
|
||||||
|
callback(_avatarHash);
|
||||||
|
}
|
||||||
bool AvatarHashMap::isAvatarInRange(const glm::vec3& position, const float range) {
|
bool AvatarHashMap::isAvatarInRange(const glm::vec3& position, const float range) {
|
||||||
QReadLocker locker(&_hashLock);
|
QReadLocker locker(&_hashLock);
|
||||||
foreach(const AvatarSharedPointer& sharedAvatar, _avatarHash) {
|
foreach(const AvatarSharedPointer& sharedAvatar, _avatarHash) {
|
||||||
|
|
|
@ -16,6 +16,7 @@
|
||||||
#include <QtCore/QSharedPointer>
|
#include <QtCore/QSharedPointer>
|
||||||
#include <QtCore/QUuid>
|
#include <QtCore/QUuid>
|
||||||
|
|
||||||
|
#include <functional>
|
||||||
#include <memory>
|
#include <memory>
|
||||||
|
|
||||||
#include <DependencyManager.h>
|
#include <DependencyManager.h>
|
||||||
|
@ -30,7 +31,7 @@ class AvatarHashMap : public QObject, public Dependency {
|
||||||
SINGLETON_DEPENDENCY
|
SINGLETON_DEPENDENCY
|
||||||
|
|
||||||
public:
|
public:
|
||||||
const AvatarHash& getAvatarHash() { return _avatarHash; }
|
void withAvatarHash(std::function<void(const AvatarHash& hash)>);
|
||||||
int size() { return _avatarHash.size(); }
|
int size() { return _avatarHash.size(); }
|
||||||
|
|
||||||
public slots:
|
public slots:
|
||||||
|
|
Loading…
Reference in a new issue