mirror of
https://github.com/lubosz/overte.git
synced 2025-04-23 16:14:01 +02:00
Avatars look at your camera, not your avatar. Improvements to lookAt code
This commit is contained in:
parent
729d3cf9bc
commit
75e536235c
10 changed files with 44 additions and 50 deletions
|
@ -1890,10 +1890,9 @@ void Application::shrinkMirrorView() {
|
|||
const float HEAD_SPHERE_RADIUS = 0.1f;
|
||||
|
||||
bool Application::isLookingAtMyAvatar(Avatar* avatar) {
|
||||
glm::vec3 theirLookat = avatar->getHead()->getLookAtPosition();
|
||||
glm::vec3 theirLookAt = avatar->getHead()->getLookAtPosition();
|
||||
glm::vec3 myEyePosition = _myAvatar->getHead()->getEyePosition();
|
||||
|
||||
if (pointInSphere(theirLookat, myEyePosition, HEAD_SPHERE_RADIUS * _myAvatar->getScale())) {
|
||||
if (pointInSphere(theirLookAt, myEyePosition, HEAD_SPHERE_RADIUS * _myAvatar->getScale())) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
|
@ -1999,21 +1998,23 @@ void Application::updateMyAvatarLookAtPosition() {
|
|||
lookAtSpot = _myCamera.getPosition();
|
||||
|
||||
} else {
|
||||
if (_myAvatar->getLookAtTargetAvatar() && _myAvatar != _myAvatar->getLookAtTargetAvatar()) {
|
||||
AvatarSharedPointer lookingAt = _myAvatar->getLookAtTargetAvatar().toStrongRef();
|
||||
if (lookingAt && _myAvatar != lookingAt.data()) {
|
||||
|
||||
isLookingAtSomeone = true;
|
||||
// If I am looking at someone else, look directly at one of their eyes
|
||||
if (tracker) {
|
||||
// If tracker active, look at the eye for the side my gaze is biased toward
|
||||
if (tracker->getEstimatedEyeYaw() > _myAvatar->getHead()->getFinalYaw()) {
|
||||
// Look at their right eye
|
||||
lookAtSpot = static_cast<Avatar*>(_myAvatar->getLookAtTargetAvatar())->getHead()->getRightEyePosition();
|
||||
lookAtSpot = static_cast<Avatar*>(lookingAt.data())->getHead()->getRightEyePosition();
|
||||
} else {
|
||||
// Look at their left eye
|
||||
lookAtSpot = static_cast<Avatar*>(_myAvatar->getLookAtTargetAvatar())->getHead()->getLeftEyePosition();
|
||||
lookAtSpot = static_cast<Avatar*>(lookingAt.data())->getHead()->getLeftEyePosition();
|
||||
}
|
||||
} else {
|
||||
// Need to add randomly looking back and forth between left and right eye for case with no tracker
|
||||
lookAtSpot = static_cast<Avatar*>(_myAvatar->getLookAtTargetAvatar())->getHead()->getEyePosition();
|
||||
lookAtSpot = static_cast<Avatar*>(lookingAt.data())->getHead()->getEyePosition();
|
||||
}
|
||||
} else {
|
||||
// I am not looking at anyone else, so just look forward
|
||||
|
|
|
@ -630,7 +630,7 @@ void Audio::handleAudioInput() {
|
|||
measuredDcOffset += networkAudioSamples[i];
|
||||
networkAudioSamples[i] -= (int16_t) _dcOffset;
|
||||
thisSample = fabsf(networkAudioSamples[i]);
|
||||
if (thisSample >= (32767.0f * CLIPPING_THRESHOLD)) {
|
||||
if (thisSample >= ((float)MAX_16_BIT_AUDIO_SAMPLE * CLIPPING_THRESHOLD)) {
|
||||
_timeSinceLastClip = 0.0f;
|
||||
}
|
||||
loudness += thisSample;
|
||||
|
@ -1375,32 +1375,16 @@ int Audio::addBufferToScope(QByteArray* byteArray, int frameOffset, const int16_
|
|||
return 0;
|
||||
}
|
||||
|
||||
// Constant multiplier to map sample value to vertical size of scope
|
||||
float multiplier = (float)MULTIPLIER_SCOPE_HEIGHT / logf(2.0f);
|
||||
|
||||
// Used to scale each sample. (logf(sample) + fadeOffset) is same as logf(sample * fade).
|
||||
float fadeOffset = logf(fade);
|
||||
|
||||
// Temporary variable receives sample value
|
||||
float sample;
|
||||
|
||||
// Temporary variable receives mapping of sample value
|
||||
int16_t value;
|
||||
|
||||
QMutexLocker lock(&_guard);
|
||||
// Short int pointer to mapped samples in byte array
|
||||
int16_t* destination = (int16_t*) byteArray->data();
|
||||
|
||||
for (int i = 0; i < sourceSamplesPerChannel; i++) {
|
||||
sample = (float)source[i * sourceNumberOfChannels + sourceChannel];
|
||||
if (sample > 1) {
|
||||
value = (int16_t)(multiplier * (logf(sample) + fadeOffset));
|
||||
} else if (sample < -1) {
|
||||
value = (int16_t)(-multiplier * (logf(-sample) + fadeOffset));
|
||||
} else {
|
||||
value = 0;
|
||||
}
|
||||
destination[frameOffset] = value;
|
||||
destination[frameOffset] = sample / (float) MAX_16_BIT_AUDIO_SAMPLE * (float)SCOPE_HEIGHT / 2.0f;
|
||||
frameOffset = (frameOffset == _samplesPerScope - 1) ? 0 : frameOffset + 1;
|
||||
}
|
||||
return frameOffset;
|
||||
|
|
|
@ -47,6 +47,8 @@
|
|||
|
||||
static const int NUM_AUDIO_CHANNELS = 2;
|
||||
|
||||
static const int MAX_16_BIT_AUDIO_SAMPLE = 32767;
|
||||
|
||||
|
||||
class QAudioInput;
|
||||
class QAudioOutput;
|
||||
|
|
|
@ -64,7 +64,7 @@ void FaceModel::maybeUpdateEyeRotation(Model* model, const JointState& parentSta
|
|||
glm::translate(state.getDefaultTranslationInConstrainedFrame()) *
|
||||
joint.preTransform * glm::mat4_cast(joint.preRotation * joint.rotation));
|
||||
glm::vec3 front = glm::vec3(inverse * glm::vec4(_owningHead->getFinalOrientationInWorldFrame() * IDENTITY_FRONT, 0.0f));
|
||||
glm::vec3 lookAt = glm::vec3(inverse * glm::vec4(_owningHead->getLookAtPosition() +
|
||||
glm::vec3 lookAt = glm::vec3(inverse * glm::vec4(_owningHead->getCorrectedLookAtPosition() +
|
||||
_owningHead->getSaccade() - model->getTranslation(), 1.0f));
|
||||
glm::quat between = rotationBetween(front, lookAt);
|
||||
const float MAX_ANGLE = 30.0f * RADIANS_PER_DEGREE;
|
||||
|
|
|
@ -200,7 +200,7 @@ void Head::render(float alpha, Model::RenderMode mode) {
|
|||
}
|
||||
|
||||
void Head::renderPostLighting() {
|
||||
renderLookatVectors(_leftEyePosition, _rightEyePosition, _lookAtPosition);
|
||||
renderLookatVectors(_leftEyePosition, _rightEyePosition, getCorrectedLookAtPosition());
|
||||
}
|
||||
|
||||
void Head::setScale (float scale) {
|
||||
|
@ -220,12 +220,17 @@ glm::quat Head::getFinalOrientationInLocalFrame() const {
|
|||
|
||||
glm::vec3 Head::getCorrectedLookAtPosition() {
|
||||
if (_isLookingAtMe) {
|
||||
return getLookAtPosition();
|
||||
} else {
|
||||
return _correctedLookAtPosition;
|
||||
} else {
|
||||
return getLookAtPosition();
|
||||
}
|
||||
}
|
||||
|
||||
void Head::setCorrectedLookAtPosition(glm::vec3 correctedLookAtPosition) {
|
||||
_isLookingAtMe = true;
|
||||
_correctedLookAtPosition = correctedLookAtPosition;
|
||||
}
|
||||
|
||||
glm::quat Head::getCameraOrientation () const {
|
||||
if (OculusManager::isConnected()) {
|
||||
return getOrientation();
|
||||
|
|
|
@ -63,9 +63,10 @@ public:
|
|||
const glm::vec3& getAngularVelocity() const { return _angularVelocity; }
|
||||
void setAngularVelocity(glm::vec3 angularVelocity) { _angularVelocity = angularVelocity; }
|
||||
|
||||
void setCorrectedLookAtPosition(glm::vec3 correctedLookAtPosition) { _correctedLookAtPosition = correctedLookAtPosition; }
|
||||
void setCorrectedLookAtPosition(glm::vec3 correctedLookAtPosition);
|
||||
glm::vec3 getCorrectedLookAtPosition();
|
||||
void clearCorrectedLookAtPosition() { _isLookingAtMe = false; }
|
||||
bool getIsLookingAtMe() { return _isLookingAtMe; }
|
||||
|
||||
float getScale() const { return _scale; }
|
||||
glm::vec3 getPosition() const { return _position; }
|
||||
|
|
|
@ -951,16 +951,23 @@ void MyAvatar::updateLookAtTargetAvatar() {
|
|||
//
|
||||
_lookAtTargetAvatar.clear();
|
||||
_targetAvatarPosition = glm::vec3(0.0f);
|
||||
const float MIN_LOOKAT_ANGLE = PI / 4.0f; // Smallest angle between face and person where we will look at someone
|
||||
float smallestAngleTo = MIN_LOOKAT_ANGLE;
|
||||
|
||||
glm::quat faceRotation = Application::getInstance()->getViewFrustum()->getOrientation();
|
||||
FaceTracker* tracker = Application::getInstance()->getActiveFaceTracker();
|
||||
if (tracker) {
|
||||
// If faceshift or other face tracker in use, add on the actual angle of the head
|
||||
faceRotation *= tracker->getHeadRotation();
|
||||
}
|
||||
glm::vec3 lookForward = faceRotation * IDENTITY_FRONT;
|
||||
glm::vec3 cameraPosition = Application::getInstance()->getCamera()->getPosition();
|
||||
float smallestAngleTo = glm::radians(Application::getInstance()->getCamera()->getFieldOfView()) / 2.f;
|
||||
|
||||
int howManyLookingAtMe = 0;
|
||||
foreach (const AvatarSharedPointer& avatarPointer, Application::getInstance()->getAvatarManager().getAvatarHash()) {
|
||||
Avatar* avatar = static_cast<Avatar*>(avatarPointer.data());
|
||||
avatar->setIsLookAtTarget(false);
|
||||
if (!avatar->isMyAvatar()) {
|
||||
glm::vec3 DEFAULT_GAZE_IN_HEAD_FRAME = glm::vec3(0.0f, 0.0f, -1.0f);
|
||||
float angleTo = glm::angle(getHead()->getFinalOrientationInWorldFrame() * DEFAULT_GAZE_IN_HEAD_FRAME,
|
||||
glm::normalize(avatar->getHead()->getEyePosition() - getHead()->getEyePosition()));
|
||||
if (!avatar->isMyAvatar() && avatar->isInitialized()) {
|
||||
float angleTo = glm::angle(lookForward, glm::normalize(avatar->getHead()->getEyePosition() - cameraPosition));
|
||||
if (angleTo < smallestAngleTo) {
|
||||
_lookAtTargetAvatar = avatarPointer;
|
||||
_targetAvatarPosition = avatarPointer->getPosition();
|
||||
|
@ -970,17 +977,16 @@ void MyAvatar::updateLookAtTargetAvatar() {
|
|||
if (Application::getInstance()->isLookingAtMyAvatar(avatar)) {
|
||||
howManyLookingAtMe++;
|
||||
// Have that avatar look directly at my camera
|
||||
// TODO: correct to look at left/right eye
|
||||
avatar->getHead()->setLookAtPosition(Application::getInstance()->getCamera()->getPosition());
|
||||
// Philip TODO: correct to look at left/right eye
|
||||
avatar->getHead()->setCorrectedLookAtPosition(Application::getInstance()->getCamera()->getPosition());
|
||||
} else {
|
||||
avatar->getHead()->clearCorrectedLookAtPosition();
|
||||
}
|
||||
}
|
||||
}
|
||||
if (_lookAtTargetAvatar) {
|
||||
static_cast<Avatar*>(_lookAtTargetAvatar.data())->setIsLookAtTarget(true);
|
||||
}
|
||||
if (howManyLookingAtMe > 0) {
|
||||
qDebug() << "look @me: " << howManyLookingAtMe;
|
||||
}
|
||||
}
|
||||
|
||||
void MyAvatar::clearLookAtTargetAvatar() {
|
||||
|
|
|
@ -119,7 +119,7 @@ public:
|
|||
Q_INVOKABLE glm::vec3 getEyePosition() const { return getHead()->getEyePosition(); }
|
||||
|
||||
Q_INVOKABLE glm::vec3 getTargetAvatarPosition() const { return _targetAvatarPosition; }
|
||||
AvatarData* getLookAtTargetAvatar() const { return _lookAtTargetAvatar.data(); }
|
||||
QWeakPointer<AvatarData> getLookAtTargetAvatar() const { return _lookAtTargetAvatar; }
|
||||
void updateLookAtTargetAvatar();
|
||||
void clearLookAtTargetAvatar();
|
||||
|
||||
|
|
|
@ -21,11 +21,6 @@ AvatarHashMap::AvatarHashMap() :
|
|||
connect(NodeList::getInstance(), &NodeList::uuidChanged, this, &AvatarHashMap::sessionUUIDChanged);
|
||||
}
|
||||
|
||||
void AvatarHashMap::insert(const QUuid& sessionUUID, AvatarSharedPointer avatar) {
|
||||
_avatarHash.insert(sessionUUID, avatar);
|
||||
avatar->setSessionUUID(sessionUUID);
|
||||
}
|
||||
|
||||
AvatarHash::iterator AvatarHashMap::erase(const AvatarHash::iterator& iterator) {
|
||||
qDebug() << "Removing Avatar with UUID" << iterator.key() << "from AvatarHashMap.";
|
||||
return _avatarHash.erase(iterator);
|
||||
|
@ -95,9 +90,11 @@ AvatarSharedPointer AvatarHashMap::matchingOrNewAvatar(const QUuid& sessionUUID,
|
|||
matchingAvatar = newSharedAvatar();
|
||||
|
||||
qDebug() << "Adding avatar with sessionUUID " << sessionUUID << "to AvatarHashMap.";
|
||||
_avatarHash.insert(sessionUUID, matchingAvatar);
|
||||
|
||||
matchingAvatar->setSessionUUID(sessionUUID);
|
||||
matchingAvatar->setOwningAvatarMixer(mixerWeakPointer);
|
||||
|
||||
_avatarHash.insert(sessionUUID, matchingAvatar);
|
||||
}
|
||||
|
||||
return matchingAvatar;
|
||||
|
|
|
@ -30,8 +30,6 @@ public:
|
|||
|
||||
const AvatarHash& getAvatarHash() { return _avatarHash; }
|
||||
int size() const { return _avatarHash.size(); }
|
||||
|
||||
virtual void insert(const QUuid& sessionUUID, AvatarSharedPointer avatar);
|
||||
|
||||
public slots:
|
||||
void processAvatarMixerDatagram(const QByteArray& datagram, const QWeakPointer<Node>& mixerWeakPointer);
|
||||
|
|
Loading…
Reference in a new issue