mirror of
https://github.com/overte-org/overte.git
synced 2025-04-17 05:30:41 +02:00
Merge pull request #3479 from PhilipRosedale/master
Improved eye contact, audio driven mouth and brows, linear audio scope
This commit is contained in:
commit
0775e9dde4
12 changed files with 82 additions and 48 deletions
|
@ -43,7 +43,7 @@ var noFly = true;
|
|||
var fixedWalkVelocity = true;
|
||||
|
||||
//var roomLimits = { xMin: 618, xMax: 635.5, zMin: 528, zMax: 552.5 };
|
||||
var roomLimits = { xMin: 193.0, xMax: 206.5, zMin: 251.4, zMax: 269.5 };
|
||||
var roomLimits = { xMin: 100.0, xMax: 206.5, zMin: 251.4, zMax: 269.5 };
|
||||
|
||||
function isInRoom(position) {
|
||||
var BUFFER = 2.0;
|
||||
|
|
|
@ -1888,13 +1888,12 @@ void Application::shrinkMirrorView() {
|
|||
}
|
||||
}
|
||||
|
||||
const float HEAD_SPHERE_RADIUS = 0.07f;
|
||||
const float HEAD_SPHERE_RADIUS = 0.1f;
|
||||
|
||||
bool Application::isLookingAtMyAvatar(Avatar* avatar) {
|
||||
glm::vec3 theirLookat = avatar->getHead()->getLookAtPosition();
|
||||
glm::vec3 myHeadPosition = _myAvatar->getHead()->getPosition();
|
||||
|
||||
if (pointInSphere(theirLookat, myHeadPosition, HEAD_SPHERE_RADIUS * _myAvatar->getScale())) {
|
||||
glm::vec3 theirLookAt = avatar->getHead()->getLookAtPosition();
|
||||
glm::vec3 myEyePosition = _myAvatar->getHead()->getEyePosition();
|
||||
if (pointInSphere(theirLookAt, myEyePosition, HEAD_SPHERE_RADIUS * _myAvatar->getScale())) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
|
@ -2000,21 +1999,23 @@ void Application::updateMyAvatarLookAtPosition() {
|
|||
lookAtSpot = _myCamera.getPosition();
|
||||
|
||||
} else {
|
||||
if (_myAvatar->getLookAtTargetAvatar() && _myAvatar != _myAvatar->getLookAtTargetAvatar()) {
|
||||
AvatarSharedPointer lookingAt = _myAvatar->getLookAtTargetAvatar().toStrongRef();
|
||||
if (lookingAt && _myAvatar != lookingAt.data()) {
|
||||
|
||||
isLookingAtSomeone = true;
|
||||
// If I am looking at someone else, look directly at one of their eyes
|
||||
if (tracker) {
|
||||
// If tracker active, look at the eye for the side my gaze is biased toward
|
||||
if (tracker->getEstimatedEyeYaw() > _myAvatar->getHead()->getFinalYaw()) {
|
||||
// Look at their right eye
|
||||
lookAtSpot = static_cast<Avatar*>(_myAvatar->getLookAtTargetAvatar())->getHead()->getRightEyePosition();
|
||||
lookAtSpot = static_cast<Avatar*>(lookingAt.data())->getHead()->getRightEyePosition();
|
||||
} else {
|
||||
// Look at their left eye
|
||||
lookAtSpot = static_cast<Avatar*>(_myAvatar->getLookAtTargetAvatar())->getHead()->getLeftEyePosition();
|
||||
lookAtSpot = static_cast<Avatar*>(lookingAt.data())->getHead()->getLeftEyePosition();
|
||||
}
|
||||
} else {
|
||||
// Need to add randomly looking back and forth between left and right eye for case with no tracker
|
||||
lookAtSpot = static_cast<Avatar*>(_myAvatar->getLookAtTargetAvatar())->getHead()->getEyePosition();
|
||||
lookAtSpot = static_cast<Avatar*>(lookingAt.data())->getHead()->getEyePosition();
|
||||
}
|
||||
} else {
|
||||
// I am not looking at anyone else, so just look forward
|
||||
|
|
|
@ -298,6 +298,8 @@ public:
|
|||
ScriptEngine* getScriptEngine(QString scriptHash) { return _scriptEnginesHash.contains(scriptHash) ? _scriptEnginesHash[scriptHash] : NULL; }
|
||||
|
||||
void setCursorVisible(bool visible);
|
||||
|
||||
bool isLookingAtMyAvatar(Avatar* avatar);
|
||||
|
||||
signals:
|
||||
|
||||
|
@ -412,7 +414,6 @@ private:
|
|||
void updateCursor(float deltaTime);
|
||||
|
||||
Avatar* findLookatTargetAvatar(glm::vec3& eyePosition, QUuid &nodeUUID);
|
||||
bool isLookingAtMyAvatar(Avatar* avatar);
|
||||
|
||||
void renderLookatIndicator(glm::vec3 pointOfInterest);
|
||||
|
||||
|
|
|
@ -630,7 +630,7 @@ void Audio::handleAudioInput() {
|
|||
measuredDcOffset += networkAudioSamples[i];
|
||||
networkAudioSamples[i] -= (int16_t) _dcOffset;
|
||||
thisSample = fabsf(networkAudioSamples[i]);
|
||||
if (thisSample >= (32767.0f * CLIPPING_THRESHOLD)) {
|
||||
if (thisSample >= ((float)MAX_16_BIT_AUDIO_SAMPLE * CLIPPING_THRESHOLD)) {
|
||||
_timeSinceLastClip = 0.0f;
|
||||
}
|
||||
loudness += thisSample;
|
||||
|
@ -1375,32 +1375,16 @@ int Audio::addBufferToScope(QByteArray* byteArray, int frameOffset, const int16_
|
|||
return 0;
|
||||
}
|
||||
|
||||
// Constant multiplier to map sample value to vertical size of scope
|
||||
float multiplier = (float)MULTIPLIER_SCOPE_HEIGHT / logf(2.0f);
|
||||
|
||||
// Used to scale each sample. (logf(sample) + fadeOffset) is same as logf(sample * fade).
|
||||
float fadeOffset = logf(fade);
|
||||
|
||||
// Temporary variable receives sample value
|
||||
float sample;
|
||||
|
||||
// Temporary variable receives mapping of sample value
|
||||
int16_t value;
|
||||
|
||||
QMutexLocker lock(&_guard);
|
||||
// Short int pointer to mapped samples in byte array
|
||||
int16_t* destination = (int16_t*) byteArray->data();
|
||||
|
||||
for (int i = 0; i < sourceSamplesPerChannel; i++) {
|
||||
sample = (float)source[i * sourceNumberOfChannels + sourceChannel];
|
||||
if (sample > 1) {
|
||||
value = (int16_t)(multiplier * (logf(sample) + fadeOffset));
|
||||
} else if (sample < -1) {
|
||||
value = (int16_t)(-multiplier * (logf(-sample) + fadeOffset));
|
||||
} else {
|
||||
value = 0;
|
||||
}
|
||||
destination[frameOffset] = value;
|
||||
destination[frameOffset] = sample / (float) MAX_16_BIT_AUDIO_SAMPLE * (float)SCOPE_HEIGHT / 2.0f;
|
||||
frameOffset = (frameOffset == _samplesPerScope - 1) ? 0 : frameOffset + 1;
|
||||
}
|
||||
return frameOffset;
|
||||
|
|
|
@ -47,6 +47,8 @@
|
|||
|
||||
static const int NUM_AUDIO_CHANNELS = 2;
|
||||
|
||||
static const int MAX_16_BIT_AUDIO_SAMPLE = 32767;
|
||||
|
||||
|
||||
class QAudioInput;
|
||||
class QAudioOutput;
|
||||
|
|
|
@ -64,7 +64,7 @@ void FaceModel::maybeUpdateEyeRotation(Model* model, const JointState& parentSta
|
|||
glm::translate(state.getDefaultTranslationInConstrainedFrame()) *
|
||||
joint.preTransform * glm::mat4_cast(joint.preRotation * joint.rotation));
|
||||
glm::vec3 front = glm::vec3(inverse * glm::vec4(_owningHead->getFinalOrientationInWorldFrame() * IDENTITY_FRONT, 0.0f));
|
||||
glm::vec3 lookAt = glm::vec3(inverse * glm::vec4(_owningHead->getLookAtPosition() +
|
||||
glm::vec3 lookAt = glm::vec3(inverse * glm::vec4(_owningHead->getCorrectedLookAtPosition() +
|
||||
_owningHead->getSaccade() - model->getTranslation(), 1.0f));
|
||||
glm::quat between = rotationBetween(front, lookAt);
|
||||
const float MAX_ANGLE = 30.0f * RADIANS_PER_DEGREE;
|
||||
|
|
|
@ -34,6 +34,7 @@ Head::Head(Avatar* owningAvatar) :
|
|||
_lastLoudness(0.0f),
|
||||
_longTermAverageLoudness(-1.0f),
|
||||
_audioAttack(0.0f),
|
||||
_audioJawOpen(0.0f),
|
||||
_angularVelocity(0,0,0),
|
||||
_renderLookatVectors(false),
|
||||
_saccade(0.0f, 0.0f, 0.0f),
|
||||
|
@ -47,6 +48,7 @@ Head::Head(Avatar* owningAvatar) :
|
|||
_deltaLeanSideways(0.f),
|
||||
_deltaLeanForward(0.f),
|
||||
_isCameraMoving(false),
|
||||
_isLookingAtMe(false),
|
||||
_faceModel(this)
|
||||
{
|
||||
|
||||
|
@ -156,11 +158,21 @@ void Head::simulate(float deltaTime, bool isMine, bool billboard) {
|
|||
}
|
||||
|
||||
// use data to update fake Faceshift blendshape coefficients
|
||||
const float JAW_OPEN_SCALE = 10.f;
|
||||
const float JAW_OPEN_SCALE = 0.015f;
|
||||
const float JAW_OPEN_RATE = 0.9f;
|
||||
const float JAW_CLOSE_RATE = 0.90f;
|
||||
float audioDelta = sqrtf(glm::max(_averageLoudness - _longTermAverageLoudness, 0.0f)) * JAW_OPEN_SCALE;
|
||||
if (audioDelta > _audioJawOpen) {
|
||||
_audioJawOpen += (audioDelta - _audioJawOpen) * JAW_OPEN_RATE;
|
||||
} else {
|
||||
_audioJawOpen *= JAW_CLOSE_RATE;
|
||||
}
|
||||
_audioJawOpen = glm::clamp(_audioJawOpen, 0.0f, 1.0f);
|
||||
|
||||
Application::getInstance()->getFaceshift()->updateFakeCoefficients(_leftEyeBlink,
|
||||
_rightEyeBlink,
|
||||
_browAudioLift,
|
||||
glm::clamp(log(_averageLoudness) / JAW_OPEN_SCALE, 0.0f, 1.0f),
|
||||
_audioJawOpen,
|
||||
_blendshapeCoefficients);
|
||||
}
|
||||
|
||||
|
@ -199,7 +211,7 @@ void Head::render(float alpha, Model::RenderMode mode) {
|
|||
}
|
||||
|
||||
void Head::renderPostLighting() {
|
||||
renderLookatVectors(_leftEyePosition, _rightEyePosition, _lookAtPosition);
|
||||
renderLookatVectors(_leftEyePosition, _rightEyePosition, getCorrectedLookAtPosition());
|
||||
}
|
||||
|
||||
void Head::setScale (float scale) {
|
||||
|
@ -217,6 +229,19 @@ glm::quat Head::getFinalOrientationInLocalFrame() const {
|
|||
return glm::quat(glm::radians(glm::vec3(getFinalPitch(), getFinalYaw(), getFinalRoll() )));
|
||||
}
|
||||
|
||||
glm::vec3 Head::getCorrectedLookAtPosition() {
|
||||
if (_isLookingAtMe) {
|
||||
return _correctedLookAtPosition;
|
||||
} else {
|
||||
return getLookAtPosition();
|
||||
}
|
||||
}
|
||||
|
||||
void Head::setCorrectedLookAtPosition(glm::vec3 correctedLookAtPosition) {
|
||||
_isLookingAtMe = true;
|
||||
_correctedLookAtPosition = correctedLookAtPosition;
|
||||
}
|
||||
|
||||
glm::quat Head::getCameraOrientation () const {
|
||||
if (OculusManager::isConnected()) {
|
||||
return getOrientation();
|
||||
|
|
|
@ -63,6 +63,11 @@ public:
|
|||
const glm::vec3& getAngularVelocity() const { return _angularVelocity; }
|
||||
void setAngularVelocity(glm::vec3 angularVelocity) { _angularVelocity = angularVelocity; }
|
||||
|
||||
void setCorrectedLookAtPosition(glm::vec3 correctedLookAtPosition);
|
||||
glm::vec3 getCorrectedLookAtPosition();
|
||||
void clearCorrectedLookAtPosition() { _isLookingAtMe = false; }
|
||||
bool getIsLookingAtMe() { return _isLookingAtMe; }
|
||||
|
||||
float getScale() const { return _scale; }
|
||||
glm::vec3 getPosition() const { return _position; }
|
||||
const glm::vec3& getEyePosition() const { return _eyePosition; }
|
||||
|
@ -125,6 +130,7 @@ private:
|
|||
float _lastLoudness;
|
||||
float _longTermAverageLoudness;
|
||||
float _audioAttack;
|
||||
float _audioJawOpen;
|
||||
glm::vec3 _angularVelocity;
|
||||
bool _renderLookatVectors;
|
||||
glm::vec3 _saccade;
|
||||
|
@ -143,8 +149,11 @@ private:
|
|||
float _deltaLeanForward;
|
||||
|
||||
bool _isCameraMoving;
|
||||
bool _isLookingAtMe;
|
||||
FaceModel _faceModel;
|
||||
|
||||
glm::vec3 _correctedLookAtPosition;
|
||||
|
||||
// private methods
|
||||
void renderLookatVectors(glm::vec3 leftEyePosition, glm::vec3 rightEyePosition, glm::vec3 lookatPosition);
|
||||
|
||||
|
|
|
@ -951,20 +951,37 @@ void MyAvatar::updateLookAtTargetAvatar() {
|
|||
//
|
||||
_lookAtTargetAvatar.clear();
|
||||
_targetAvatarPosition = glm::vec3(0.0f);
|
||||
const float MIN_LOOKAT_ANGLE = PI / 4.0f; // Smallest angle between face and person where we will look at someone
|
||||
float smallestAngleTo = MIN_LOOKAT_ANGLE;
|
||||
|
||||
glm::quat faceRotation = Application::getInstance()->getViewFrustum()->getOrientation();
|
||||
FaceTracker* tracker = Application::getInstance()->getActiveFaceTracker();
|
||||
if (tracker) {
|
||||
// If faceshift or other face tracker in use, add on the actual angle of the head
|
||||
faceRotation *= tracker->getHeadRotation();
|
||||
}
|
||||
glm::vec3 lookForward = faceRotation * IDENTITY_FRONT;
|
||||
glm::vec3 cameraPosition = Application::getInstance()->getCamera()->getPosition();
|
||||
float smallestAngleTo = glm::radians(Application::getInstance()->getCamera()->getFieldOfView()) / 2.f;
|
||||
|
||||
int howManyLookingAtMe = 0;
|
||||
foreach (const AvatarSharedPointer& avatarPointer, Application::getInstance()->getAvatarManager().getAvatarHash()) {
|
||||
Avatar* avatar = static_cast<Avatar*>(avatarPointer.data());
|
||||
avatar->setIsLookAtTarget(false);
|
||||
if (!avatar->isMyAvatar()) {
|
||||
glm::vec3 DEFAULT_GAZE_IN_HEAD_FRAME = glm::vec3(0.0f, 0.0f, -1.0f);
|
||||
float angleTo = glm::angle(getHead()->getFinalOrientationInWorldFrame() * DEFAULT_GAZE_IN_HEAD_FRAME,
|
||||
glm::normalize(avatar->getHead()->getEyePosition() - getHead()->getEyePosition()));
|
||||
if (!avatar->isMyAvatar() && avatar->isInitialized()) {
|
||||
float angleTo = glm::angle(lookForward, glm::normalize(avatar->getHead()->getEyePosition() - cameraPosition));
|
||||
if (angleTo < smallestAngleTo) {
|
||||
_lookAtTargetAvatar = avatarPointer;
|
||||
_targetAvatarPosition = avatarPointer->getPosition();
|
||||
smallestAngleTo = angleTo;
|
||||
}
|
||||
// Check if this avatar is looking at me, and fix their gaze on my camera if so
|
||||
if (Application::getInstance()->isLookingAtMyAvatar(avatar)) {
|
||||
howManyLookingAtMe++;
|
||||
// Have that avatar look directly at my camera
|
||||
// Philip TODO: correct to look at left/right eye
|
||||
avatar->getHead()->setCorrectedLookAtPosition(Application::getInstance()->getViewFrustum()->getPosition());
|
||||
} else {
|
||||
avatar->getHead()->clearCorrectedLookAtPosition();
|
||||
}
|
||||
}
|
||||
}
|
||||
if (_lookAtTargetAvatar) {
|
||||
|
|
|
@ -119,7 +119,7 @@ public:
|
|||
Q_INVOKABLE glm::vec3 getEyePosition() const { return getHead()->getEyePosition(); }
|
||||
|
||||
Q_INVOKABLE glm::vec3 getTargetAvatarPosition() const { return _targetAvatarPosition; }
|
||||
AvatarData* getLookAtTargetAvatar() const { return _lookAtTargetAvatar.data(); }
|
||||
QWeakPointer<AvatarData> getLookAtTargetAvatar() const { return _lookAtTargetAvatar; }
|
||||
void updateLookAtTargetAvatar();
|
||||
void clearLookAtTargetAvatar();
|
||||
|
||||
|
|
|
@ -21,11 +21,6 @@ AvatarHashMap::AvatarHashMap() :
|
|||
connect(NodeList::getInstance(), &NodeList::uuidChanged, this, &AvatarHashMap::sessionUUIDChanged);
|
||||
}
|
||||
|
||||
void AvatarHashMap::insert(const QUuid& sessionUUID, AvatarSharedPointer avatar) {
|
||||
_avatarHash.insert(sessionUUID, avatar);
|
||||
avatar->setSessionUUID(sessionUUID);
|
||||
}
|
||||
|
||||
AvatarHash::iterator AvatarHashMap::erase(const AvatarHash::iterator& iterator) {
|
||||
qDebug() << "Removing Avatar with UUID" << iterator.key() << "from AvatarHashMap.";
|
||||
return _avatarHash.erase(iterator);
|
||||
|
@ -95,9 +90,11 @@ AvatarSharedPointer AvatarHashMap::matchingOrNewAvatar(const QUuid& sessionUUID,
|
|||
matchingAvatar = newSharedAvatar();
|
||||
|
||||
qDebug() << "Adding avatar with sessionUUID " << sessionUUID << "to AvatarHashMap.";
|
||||
_avatarHash.insert(sessionUUID, matchingAvatar);
|
||||
|
||||
matchingAvatar->setSessionUUID(sessionUUID);
|
||||
matchingAvatar->setOwningAvatarMixer(mixerWeakPointer);
|
||||
|
||||
_avatarHash.insert(sessionUUID, matchingAvatar);
|
||||
}
|
||||
|
||||
return matchingAvatar;
|
||||
|
|
|
@ -30,8 +30,6 @@ public:
|
|||
|
||||
const AvatarHash& getAvatarHash() { return _avatarHash; }
|
||||
int size() const { return _avatarHash.size(); }
|
||||
|
||||
virtual void insert(const QUuid& sessionUUID, AvatarSharedPointer avatar);
|
||||
|
||||
public slots:
|
||||
void processAvatarMixerDatagram(const QByteArray& datagram, const QWeakPointer<Node>& mixerWeakPointer);
|
||||
|
|
Loading…
Reference in a new issue