mirror of
https://github.com/overte-org/overte.git
synced 2025-04-10 01:15:06 +02:00
Merge pull request #13338 from amantley/scriptedBlendshapesFaceFlags
Scripted Blendshapes and Procedural Face Movement Flags
This commit is contained in:
commit
0ba97fa83b
12 changed files with 591 additions and 104 deletions
|
@ -2117,6 +2117,31 @@ bool MyAvatar::shouldRenderHead(const RenderArgs* renderArgs) const {
|
|||
return !defaultMode || !firstPerson || !insideHead;
|
||||
}
|
||||
|
||||
void MyAvatar::setHasScriptedBlendshapes(bool hasScriptedBlendshapes) {
|
||||
if (hasScriptedBlendshapes == _hasScriptedBlendShapes) {
|
||||
return;
|
||||
}
|
||||
if (!hasScriptedBlendshapes) {
|
||||
// send a forced avatarData update to make sure the script can send neutal blendshapes on unload
|
||||
// without having to wait for the update loop, make sure _hasScriptedBlendShapes is still true
|
||||
// before sending the update, or else it won't send the neutal blendshapes to the receiving clients
|
||||
sendAvatarDataPacket(true);
|
||||
}
|
||||
_hasScriptedBlendShapes = hasScriptedBlendshapes;
|
||||
}
|
||||
|
||||
void MyAvatar::setHasProceduralBlinkFaceMovement(bool hasProceduralBlinkFaceMovement) {
|
||||
_headData->setHasProceduralBlinkFaceMovement(hasProceduralBlinkFaceMovement);
|
||||
}
|
||||
|
||||
void MyAvatar::setHasProceduralEyeFaceMovement(bool hasProceduralEyeFaceMovement) {
|
||||
_headData->setHasProceduralEyeFaceMovement(hasProceduralEyeFaceMovement);
|
||||
}
|
||||
|
||||
void MyAvatar::setHasAudioEnabledFaceMovement(bool hasAudioEnabledFaceMovement) {
|
||||
_headData->setHasAudioEnabledFaceMovement(hasAudioEnabledFaceMovement);
|
||||
}
|
||||
|
||||
void MyAvatar::updateOrientation(float deltaTime) {
|
||||
|
||||
// Smoothly rotate body with arrow keys
|
||||
|
|
|
@ -86,6 +86,10 @@ class MyAvatar : public Avatar {
|
|||
* @property {number} audioListenerModeCamera=1 - The audio listening position is at the camera. <em>Read-only.</em>
|
||||
* @property {number} audioListenerModeCustom=2 - The audio listening position is at a the position specified by set by the
|
||||
* <code>customListenPosition</code> and <code>customListenOrientation</code> property values. <em>Read-only.</em>
|
||||
* @property {boolean} hasScriptedBlendshapes=false - Blendshapes will be transmitted over the network if set to true.
|
||||
* @property {boolean} hasProceduralBlinkFaceMovement=true - procedural blinking will be turned on if set to true.
|
||||
* @property {boolean} hasProceduralEyeFaceMovement=true - procedural eye movement will be turned on if set to true.
|
||||
* @property {boolean} hasAudioEnabledFaceMovement=true - If set to true, voice audio will move the mouth Blendshapes while MyAvatar.hasScriptedBlendshapes is enabled.
|
||||
* @property {Vec3} customListenPosition=Vec3.ZERO - The listening position used when the <code>audioListenerMode</code>
|
||||
* property value is <code>audioListenerModeCustom</code>.
|
||||
* @property {Quat} customListenOrientation=Quat.IDENTITY - The listening orientation used when the
|
||||
|
@ -187,6 +191,10 @@ class MyAvatar : public Avatar {
|
|||
Q_PROPERTY(AudioListenerMode audioListenerModeHead READ getAudioListenerModeHead)
|
||||
Q_PROPERTY(AudioListenerMode audioListenerModeCamera READ getAudioListenerModeCamera)
|
||||
Q_PROPERTY(AudioListenerMode audioListenerModeCustom READ getAudioListenerModeCustom)
|
||||
Q_PROPERTY(bool hasScriptedBlendshapes READ getHasScriptedBlendshapes WRITE setHasScriptedBlendshapes)
|
||||
Q_PROPERTY(bool hasProceduralBlinkFaceMovement READ getHasProceduralBlinkFaceMovement WRITE setHasProceduralBlinkFaceMovement)
|
||||
Q_PROPERTY(bool hasProceduralEyeFaceMovement READ getHasProceduralEyeFaceMovement WRITE setHasProceduralEyeFaceMovement)
|
||||
Q_PROPERTY(bool hasAudioEnabledFaceMovement READ getHasAudioEnabledFaceMovement WRITE setHasAudioEnabledFaceMovement)
|
||||
//TODO: make gravity feature work Q_PROPERTY(glm::vec3 gravity READ getGravity WRITE setGravity)
|
||||
|
||||
Q_PROPERTY(glm::vec3 leftHandPosition READ getLeftHandPosition)
|
||||
|
@ -1380,6 +1388,14 @@ private:
|
|||
virtual bool shouldRenderHead(const RenderArgs* renderArgs) const override;
|
||||
void setShouldRenderLocally(bool shouldRender) { _shouldRender = shouldRender; setEnableMeshVisible(shouldRender); }
|
||||
bool getShouldRenderLocally() const { return _shouldRender; }
|
||||
void setHasScriptedBlendshapes(bool hasScriptedBlendshapes);
|
||||
bool getHasScriptedBlendshapes() const override { return _hasScriptedBlendShapes; }
|
||||
void setHasProceduralBlinkFaceMovement(bool hasProceduralBlinkFaceMovement);
|
||||
bool getHasProceduralBlinkFaceMovement() const override { return _headData->getHasProceduralBlinkFaceMovement(); }
|
||||
void setHasProceduralEyeFaceMovement(bool hasProceduralEyeFaceMovement);
|
||||
bool getHasProceduralEyeFaceMovement() const override { return _headData->getHasProceduralEyeFaceMovement(); }
|
||||
void setHasAudioEnabledFaceMovement(bool hasAudioEnabledFaceMovement);
|
||||
bool getHasAudioEnabledFaceMovement() const override { return _headData->getHasAudioEnabledFaceMovement(); }
|
||||
bool isMyAvatar() const override { return true; }
|
||||
virtual int parseDataFromBuffer(const QByteArray& buffer) override;
|
||||
virtual glm::vec3 getSkeletonPosition() const override;
|
||||
|
@ -1488,6 +1504,7 @@ private:
|
|||
bool _hmdRollControlEnabled { true };
|
||||
float _hmdRollControlDeadZone { ROLL_CONTROL_DEAD_ZONE_DEFAULT };
|
||||
float _hmdRollControlRate { ROLL_CONTROL_RATE_DEFAULT };
|
||||
std::atomic<bool> _hasScriptedBlendShapes { false };
|
||||
|
||||
// working copy -- see AvatarData for thread-safe _sensorToWorldMatrixCache, used for outward facing access
|
||||
glm::mat4 _sensorToWorldMatrix { glm::mat4() };
|
||||
|
|
|
@ -46,32 +46,18 @@ void MyHead::simulate(float deltaTime) {
|
|||
auto player = DependencyManager::get<recording::Deck>();
|
||||
// Only use face trackers when not playing back a recording.
|
||||
if (!player->isPlaying()) {
|
||||
FaceTracker* faceTracker = qApp->getActiveFaceTracker();
|
||||
_isFaceTrackerConnected = faceTracker != nullptr && !faceTracker->isMuted();
|
||||
auto faceTracker = qApp->getActiveFaceTracker();
|
||||
const bool hasActualFaceTrackerConnected = faceTracker && !faceTracker->isMuted();
|
||||
_isFaceTrackerConnected = hasActualFaceTrackerConnected || _owningAvatar->getHasScriptedBlendshapes();
|
||||
if (_isFaceTrackerConnected) {
|
||||
_transientBlendshapeCoefficients = faceTracker->getBlendshapeCoefficients();
|
||||
|
||||
if (typeid(*faceTracker) == typeid(DdeFaceTracker)) {
|
||||
|
||||
if (Menu::getInstance()->isOptionChecked(MenuOption::UseAudioForMouth)) {
|
||||
calculateMouthShapes(deltaTime);
|
||||
|
||||
const int JAW_OPEN_BLENDSHAPE = 21;
|
||||
const int MMMM_BLENDSHAPE = 34;
|
||||
const int FUNNEL_BLENDSHAPE = 40;
|
||||
const int SMILE_LEFT_BLENDSHAPE = 28;
|
||||
const int SMILE_RIGHT_BLENDSHAPE = 29;
|
||||
_transientBlendshapeCoefficients[JAW_OPEN_BLENDSHAPE] += _audioJawOpen;
|
||||
_transientBlendshapeCoefficients[SMILE_LEFT_BLENDSHAPE] += _mouth4;
|
||||
_transientBlendshapeCoefficients[SMILE_RIGHT_BLENDSHAPE] += _mouth4;
|
||||
_transientBlendshapeCoefficients[MMMM_BLENDSHAPE] += _mouth2;
|
||||
_transientBlendshapeCoefficients[FUNNEL_BLENDSHAPE] += _mouth3;
|
||||
}
|
||||
applyEyelidOffset(getFinalOrientationInWorldFrame());
|
||||
if (hasActualFaceTrackerConnected) {
|
||||
_blendshapeCoefficients = faceTracker->getBlendshapeCoefficients();
|
||||
}
|
||||
}
|
||||
|
||||
auto eyeTracker = DependencyManager::get<EyeTracker>();
|
||||
_isEyeTrackerConnected = eyeTracker->isTracking();
|
||||
// if eye tracker is connected we should get the data here.
|
||||
}
|
||||
Parent::simulate(deltaTime);
|
||||
}
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
#include <trackers/FaceTracker.h>
|
||||
#include <trackers/EyeTracker.h>
|
||||
#include <Rig.h>
|
||||
#include "Logging.h"
|
||||
|
||||
#include "Avatar.h"
|
||||
|
||||
|
@ -58,25 +59,30 @@ void Head::simulate(float deltaTime) {
|
|||
_longTermAverageLoudness = glm::mix(_longTermAverageLoudness, _averageLoudness, glm::min(deltaTime / AUDIO_LONG_TERM_AVERAGING_SECS, 1.0f));
|
||||
}
|
||||
|
||||
if (!_isFaceTrackerConnected) {
|
||||
if (!_isEyeTrackerConnected) {
|
||||
// Update eye saccades
|
||||
const float AVERAGE_MICROSACCADE_INTERVAL = 1.0f;
|
||||
const float AVERAGE_SACCADE_INTERVAL = 6.0f;
|
||||
const float MICROSACCADE_MAGNITUDE = 0.002f;
|
||||
const float SACCADE_MAGNITUDE = 0.04f;
|
||||
const float NOMINAL_FRAME_RATE = 60.0f;
|
||||
if (!_isEyeTrackerConnected) {
|
||||
// Update eye saccades
|
||||
const float AVERAGE_MICROSACCADE_INTERVAL = 1.0f;
|
||||
const float AVERAGE_SACCADE_INTERVAL = 6.0f;
|
||||
const float MICROSACCADE_MAGNITUDE = 0.002f;
|
||||
const float SACCADE_MAGNITUDE = 0.04f;
|
||||
const float NOMINAL_FRAME_RATE = 60.0f;
|
||||
|
||||
if (randFloat() < deltaTime / AVERAGE_MICROSACCADE_INTERVAL) {
|
||||
_saccadeTarget = MICROSACCADE_MAGNITUDE * randVector();
|
||||
} else if (randFloat() < deltaTime / AVERAGE_SACCADE_INTERVAL) {
|
||||
_saccadeTarget = SACCADE_MAGNITUDE * randVector();
|
||||
}
|
||||
_saccade += (_saccadeTarget - _saccade) * pow(0.5f, NOMINAL_FRAME_RATE * deltaTime);
|
||||
} else {
|
||||
_saccade = glm::vec3();
|
||||
if (randFloat() < deltaTime / AVERAGE_MICROSACCADE_INTERVAL) {
|
||||
_saccadeTarget = MICROSACCADE_MAGNITUDE * randVector();
|
||||
} else if (randFloat() < deltaTime / AVERAGE_SACCADE_INTERVAL) {
|
||||
_saccadeTarget = SACCADE_MAGNITUDE * randVector();
|
||||
}
|
||||
_saccade += (_saccadeTarget - _saccade) * pow(0.5f, NOMINAL_FRAME_RATE * deltaTime);
|
||||
} else {
|
||||
_saccade = glm::vec3();
|
||||
}
|
||||
|
||||
const float BLINK_SPEED = 10.0f;
|
||||
const float BLINK_SPEED_VARIABILITY = 1.0f;
|
||||
const float BLINK_START_VARIABILITY = 0.25f;
|
||||
const float FULLY_OPEN = 0.0f;
|
||||
const float FULLY_CLOSED = 1.0f;
|
||||
if (getHasProceduralBlinkFaceMovement()) {
|
||||
// Detect transition from talking to not; force blink after that and a delay
|
||||
bool forceBlink = false;
|
||||
const float TALKING_LOUDNESS = 100.0f;
|
||||
|
@ -88,29 +94,12 @@ void Head::simulate(float deltaTime) {
|
|||
forceBlink = true;
|
||||
}
|
||||
|
||||
// Update audio attack data for facial animation (eyebrows and mouth)
|
||||
float audioAttackAveragingRate = (10.0f - deltaTime * NORMAL_HZ) / 10.0f; // --> 0.9 at 60 Hz
|
||||
_audioAttack = audioAttackAveragingRate * _audioAttack +
|
||||
(1.0f - audioAttackAveragingRate) * fabs((audioLoudness - _longTermAverageLoudness) - _lastLoudness);
|
||||
_lastLoudness = (audioLoudness - _longTermAverageLoudness);
|
||||
|
||||
const float BROW_LIFT_THRESHOLD = 100.0f;
|
||||
if (_audioAttack > BROW_LIFT_THRESHOLD) {
|
||||
_browAudioLift += sqrtf(_audioAttack) * 0.01f;
|
||||
}
|
||||
_browAudioLift = glm::clamp(_browAudioLift *= 0.7f, 0.0f, 1.0f);
|
||||
|
||||
const float BLINK_SPEED = 10.0f;
|
||||
const float BLINK_SPEED_VARIABILITY = 1.0f;
|
||||
const float BLINK_START_VARIABILITY = 0.25f;
|
||||
const float FULLY_OPEN = 0.0f;
|
||||
const float FULLY_CLOSED = 1.0f;
|
||||
if (_leftEyeBlinkVelocity == 0.0f && _rightEyeBlinkVelocity == 0.0f) {
|
||||
// no blinking when brows are raised; blink less with increasing loudness
|
||||
const float BASE_BLINK_RATE = 15.0f / 60.0f;
|
||||
const float ROOT_LOUDNESS_TO_BLINK_INTERVAL = 0.25f;
|
||||
if (forceBlink || (_browAudioLift < EPSILON && shouldDo(glm::max(1.0f, sqrt(fabs(_averageLoudness - _longTermAverageLoudness)) *
|
||||
ROOT_LOUDNESS_TO_BLINK_INTERVAL) / BASE_BLINK_RATE, deltaTime))) {
|
||||
ROOT_LOUDNESS_TO_BLINK_INTERVAL) / BASE_BLINK_RATE, deltaTime))) {
|
||||
_leftEyeBlinkVelocity = BLINK_SPEED + randFloat() * BLINK_SPEED_VARIABILITY;
|
||||
_rightEyeBlinkVelocity = BLINK_SPEED + randFloat() * BLINK_SPEED_VARIABILITY;
|
||||
if (randFloat() < 0.5f) {
|
||||
|
@ -136,22 +125,45 @@ void Head::simulate(float deltaTime) {
|
|||
_rightEyeBlinkVelocity = 0.0f;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
_rightEyeBlink = FULLY_OPEN;
|
||||
_leftEyeBlink = FULLY_OPEN;
|
||||
}
|
||||
|
||||
// use data to update fake Faceshift blendshape coefficients
|
||||
if (getHasAudioEnabledFaceMovement()) {
|
||||
// Update audio attack data for facial animation (eyebrows and mouth)
|
||||
float audioAttackAveragingRate = (10.0f - deltaTime * NORMAL_HZ) / 10.0f; // --> 0.9 at 60 Hz
|
||||
_audioAttack = audioAttackAveragingRate * _audioAttack +
|
||||
(1.0f - audioAttackAveragingRate) * fabs((audioLoudness - _longTermAverageLoudness) - _lastLoudness);
|
||||
_lastLoudness = (audioLoudness - _longTermAverageLoudness);
|
||||
const float BROW_LIFT_THRESHOLD = 100.0f;
|
||||
if (_audioAttack > BROW_LIFT_THRESHOLD) {
|
||||
_browAudioLift += sqrtf(_audioAttack) * 0.01f;
|
||||
}
|
||||
_browAudioLift = glm::clamp(_browAudioLift *= 0.7f, 0.0f, 1.0f);
|
||||
calculateMouthShapes(deltaTime);
|
||||
FaceTracker::updateFakeCoefficients(_leftEyeBlink,
|
||||
_rightEyeBlink,
|
||||
_browAudioLift,
|
||||
_audioJawOpen,
|
||||
_mouth2,
|
||||
_mouth3,
|
||||
_mouth4,
|
||||
_transientBlendshapeCoefficients);
|
||||
|
||||
applyEyelidOffset(getOrientation());
|
||||
|
||||
} else {
|
||||
_saccade = glm::vec3();
|
||||
_audioJawOpen = 0.0f;
|
||||
_browAudioLift = 0.0f;
|
||||
_mouth2 = 0.0f;
|
||||
_mouth3 = 0.0f;
|
||||
_mouth4 = 0.0f;
|
||||
_mouthTime = 0.0f;
|
||||
}
|
||||
|
||||
FaceTracker::updateFakeCoefficients(_leftEyeBlink,
|
||||
_rightEyeBlink,
|
||||
_browAudioLift,
|
||||
_audioJawOpen,
|
||||
_mouth2,
|
||||
_mouth3,
|
||||
_mouth4,
|
||||
_transientBlendshapeCoefficients);
|
||||
|
||||
if (getHasProceduralEyeFaceMovement()) {
|
||||
applyEyelidOffset(getOrientation());
|
||||
}
|
||||
|
||||
_leftEyePosition = _rightEyePosition = getPosition();
|
||||
|
|
|
@ -300,14 +300,15 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSent
|
|||
tranlationChangedSince(lastSentTime) ||
|
||||
parentInfoChangedSince(lastSentTime));
|
||||
|
||||
hasFaceTrackerInfo = !dropFaceTracking && hasFaceTracker() && (sendAll || faceTrackerInfoChangedSince(lastSentTime));
|
||||
hasFaceTrackerInfo = !dropFaceTracking && (hasFaceTracker() || getHasScriptedBlendshapes()) &&
|
||||
(sendAll || faceTrackerInfoChangedSince(lastSentTime));
|
||||
hasJointData = sendAll || !sendMinimum;
|
||||
hasJointDefaultPoseFlags = hasJointData;
|
||||
}
|
||||
|
||||
|
||||
const size_t byteArraySize = AvatarDataPacket::MAX_CONSTANT_HEADER_SIZE +
|
||||
(hasFaceTrackerInfo ? AvatarDataPacket::maxFaceTrackerInfoSize(_headData->getNumSummedBlendshapeCoefficients()) : 0) +
|
||||
(hasFaceTrackerInfo ? AvatarDataPacket::maxFaceTrackerInfoSize(_headData->getBlendshapeCoefficients().size()) : 0) +
|
||||
(hasJointData ? AvatarDataPacket::maxJointDataSize(_jointData.size()) : 0) +
|
||||
(hasJointDefaultPoseFlags ? AvatarDataPacket::maxJointDefaultPoseFlagsSize(_jointData.size()) : 0);
|
||||
|
||||
|
@ -442,7 +443,7 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSent
|
|||
auto startSection = destinationBuffer;
|
||||
auto data = reinterpret_cast<AvatarDataPacket::AdditionalFlags*>(destinationBuffer);
|
||||
|
||||
uint8_t flags { 0 };
|
||||
uint16_t flags { 0 };
|
||||
|
||||
setSemiNibbleAt(flags, KEY_STATE_START_BIT, _keyState);
|
||||
|
||||
|
@ -450,20 +451,33 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSent
|
|||
bool isFingerPointing = _handState & IS_FINGER_POINTING_FLAG;
|
||||
setSemiNibbleAt(flags, HAND_STATE_START_BIT, _handState & ~IS_FINGER_POINTING_FLAG);
|
||||
if (isFingerPointing) {
|
||||
setAtBit(flags, HAND_STATE_FINGER_POINTING_BIT);
|
||||
setAtBit16(flags, HAND_STATE_FINGER_POINTING_BIT);
|
||||
}
|
||||
// face tracker state
|
||||
if (_headData->_isFaceTrackerConnected) {
|
||||
setAtBit(flags, IS_FACE_TRACKER_CONNECTED);
|
||||
setAtBit16(flags, IS_FACE_TRACKER_CONNECTED);
|
||||
}
|
||||
// eye tracker state
|
||||
if (_headData->_isEyeTrackerConnected) {
|
||||
setAtBit(flags, IS_EYE_TRACKER_CONNECTED);
|
||||
setAtBit16(flags, IS_EYE_TRACKER_CONNECTED);
|
||||
}
|
||||
// referential state
|
||||
if (!parentID.isNull()) {
|
||||
setAtBit(flags, HAS_REFERENTIAL);
|
||||
setAtBit16(flags, HAS_REFERENTIAL);
|
||||
}
|
||||
// audio face movement
|
||||
if (_headData->getHasAudioEnabledFaceMovement()) {
|
||||
setAtBit16(flags, AUDIO_ENABLED_FACE_MOVEMENT);
|
||||
}
|
||||
// procedural eye face movement
|
||||
if (_headData->getHasProceduralEyeFaceMovement()) {
|
||||
setAtBit16(flags, PROCEDURAL_EYE_FACE_MOVEMENT);
|
||||
}
|
||||
// procedural blink face movement
|
||||
if (_headData->getHasProceduralBlinkFaceMovement()) {
|
||||
setAtBit16(flags, PROCEDURAL_BLINK_FACE_MOVEMENT);
|
||||
}
|
||||
|
||||
data->flags = flags;
|
||||
destinationBuffer += sizeof(AvatarDataPacket::AdditionalFlags);
|
||||
|
||||
|
@ -506,8 +520,9 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSent
|
|||
if (hasFaceTrackerInfo) {
|
||||
auto startSection = destinationBuffer;
|
||||
auto faceTrackerInfo = reinterpret_cast<AvatarDataPacket::FaceTrackerInfo*>(destinationBuffer);
|
||||
const auto& blendshapeCoefficients = _headData->getSummedBlendshapeCoefficients();
|
||||
|
||||
const auto& blendshapeCoefficients = _headData->getBlendshapeCoefficients();
|
||||
// note: we don't use the blink and average loudness, we just use the numBlendShapes and
|
||||
// compute the procedural info on the client side.
|
||||
faceTrackerInfo->leftEyeBlink = _headData->_leftEyeBlink;
|
||||
faceTrackerInfo->rightEyeBlink = _headData->_rightEyeBlink;
|
||||
faceTrackerInfo->averageLoudness = _headData->_averageLoudness;
|
||||
|
@ -972,7 +987,7 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) {
|
|||
|
||||
PACKET_READ_CHECK(AdditionalFlags, sizeof(AvatarDataPacket::AdditionalFlags));
|
||||
auto data = reinterpret_cast<const AvatarDataPacket::AdditionalFlags*>(sourceBuffer);
|
||||
uint8_t bitItems = data->flags;
|
||||
uint16_t bitItems = data->flags;
|
||||
|
||||
// key state, stored as a semi-nibble in the bitItems
|
||||
auto newKeyState = (KeyState)getSemiNibbleAt(bitItems, KEY_STATE_START_BIT);
|
||||
|
@ -980,26 +995,38 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) {
|
|||
// hand state, stored as a semi-nibble plus a bit in the bitItems
|
||||
// we store the hand state as well as other items in a shared bitset. The hand state is an octal, but is split
|
||||
// into two sections to maintain backward compatibility. The bits are ordered as such (0-7 left to right).
|
||||
// +---+-----+-----+--+
|
||||
// |x,x|H0,H1|x,x,x|H2|
|
||||
// +---+-----+-----+--+
|
||||
// AA 6/1/18 added three more flags bits 8,9, and 10 for procedural audio, blink, and eye saccade enabled
|
||||
// +---+-----+-----+--+--+--+--+-----+
|
||||
// |x,x|H0,H1|x,x,x|H2|Au|Bl|Ey|xxxxx|
|
||||
// +---+-----+-----+--+--+--+--+-----+
|
||||
// Hand state - H0,H1,H2 is found in the 3rd, 4th, and 8th bits
|
||||
auto newHandState = getSemiNibbleAt(bitItems, HAND_STATE_START_BIT)
|
||||
+ (oneAtBit(bitItems, HAND_STATE_FINGER_POINTING_BIT) ? IS_FINGER_POINTING_FLAG : 0);
|
||||
+ (oneAtBit16(bitItems, HAND_STATE_FINGER_POINTING_BIT) ? IS_FINGER_POINTING_FLAG : 0);
|
||||
|
||||
auto newFaceTrackerConnected = oneAtBit(bitItems, IS_FACE_TRACKER_CONNECTED);
|
||||
auto newEyeTrackerConnected = oneAtBit(bitItems, IS_EYE_TRACKER_CONNECTED);
|
||||
auto newFaceTrackerConnected = oneAtBit16(bitItems, IS_FACE_TRACKER_CONNECTED);
|
||||
auto newEyeTrackerConnected = oneAtBit16(bitItems, IS_EYE_TRACKER_CONNECTED);
|
||||
|
||||
auto newHasAudioEnabledFaceMovement = oneAtBit16(bitItems, AUDIO_ENABLED_FACE_MOVEMENT);
|
||||
auto newHasProceduralEyeFaceMovement = oneAtBit16(bitItems, PROCEDURAL_EYE_FACE_MOVEMENT);
|
||||
auto newHasProceduralBlinkFaceMovement = oneAtBit16(bitItems, PROCEDURAL_BLINK_FACE_MOVEMENT);
|
||||
|
||||
|
||||
bool keyStateChanged = (_keyState != newKeyState);
|
||||
bool handStateChanged = (_handState != newHandState);
|
||||
bool faceStateChanged = (_headData->_isFaceTrackerConnected != newFaceTrackerConnected);
|
||||
bool eyeStateChanged = (_headData->_isEyeTrackerConnected != newEyeTrackerConnected);
|
||||
bool somethingChanged = keyStateChanged || handStateChanged || faceStateChanged || eyeStateChanged;
|
||||
bool audioEnableFaceMovementChanged = (_headData->getHasAudioEnabledFaceMovement() != newHasAudioEnabledFaceMovement);
|
||||
bool proceduralEyeFaceMovementChanged = (_headData->getHasProceduralEyeFaceMovement() != newHasProceduralEyeFaceMovement);
|
||||
bool proceduralBlinkFaceMovementChanged = (_headData->getHasProceduralBlinkFaceMovement() != newHasProceduralBlinkFaceMovement);
|
||||
bool somethingChanged = keyStateChanged || handStateChanged || faceStateChanged || eyeStateChanged || audioEnableFaceMovementChanged || proceduralEyeFaceMovementChanged || proceduralBlinkFaceMovementChanged;
|
||||
|
||||
_keyState = newKeyState;
|
||||
_handState = newHandState;
|
||||
_headData->_isFaceTrackerConnected = newFaceTrackerConnected;
|
||||
_headData->_isEyeTrackerConnected = newEyeTrackerConnected;
|
||||
_headData->setHasAudioEnabledFaceMovement(newHasAudioEnabledFaceMovement);
|
||||
_headData->setHasProceduralEyeFaceMovement(newHasProceduralEyeFaceMovement);
|
||||
_headData->setHasProceduralBlinkFaceMovement(newHasProceduralBlinkFaceMovement);
|
||||
|
||||
sourceBuffer += sizeof(AvatarDataPacket::AdditionalFlags);
|
||||
|
||||
|
@ -1060,23 +1087,21 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) {
|
|||
|
||||
PACKET_READ_CHECK(FaceTrackerInfo, sizeof(AvatarDataPacket::FaceTrackerInfo));
|
||||
auto faceTrackerInfo = reinterpret_cast<const AvatarDataPacket::FaceTrackerInfo*>(sourceBuffer);
|
||||
sourceBuffer += sizeof(AvatarDataPacket::FaceTrackerInfo);
|
||||
|
||||
_headData->_leftEyeBlink = faceTrackerInfo->leftEyeBlink;
|
||||
_headData->_rightEyeBlink = faceTrackerInfo->rightEyeBlink;
|
||||
_headData->_averageLoudness = faceTrackerInfo->averageLoudness;
|
||||
_headData->_browAudioLift = faceTrackerInfo->browAudioLift;
|
||||
|
||||
int numCoefficients = faceTrackerInfo->numBlendshapeCoefficients;
|
||||
const int coefficientsSize = sizeof(float) * numCoefficients;
|
||||
sourceBuffer += sizeof(AvatarDataPacket::FaceTrackerInfo);
|
||||
|
||||
PACKET_READ_CHECK(FaceTrackerCoefficients, coefficientsSize);
|
||||
_headData->_blendshapeCoefficients.resize(numCoefficients); // make sure there's room for the copy!
|
||||
_headData->_transientBlendshapeCoefficients.resize(numCoefficients);
|
||||
//only copy the blendshapes to headData, not the procedural face info
|
||||
memcpy(_headData->_blendshapeCoefficients.data(), sourceBuffer, coefficientsSize);
|
||||
sourceBuffer += coefficientsSize;
|
||||
|
||||
int numBytesRead = sourceBuffer - startSection;
|
||||
_faceTrackerRate.increment(numBytesRead);
|
||||
_faceTrackerUpdateRate.increment();
|
||||
} else {
|
||||
_headData->_blendshapeCoefficients.fill(0, _headData->_blendshapeCoefficients.size());
|
||||
}
|
||||
|
||||
if (hasJointData) {
|
||||
|
|
|
@ -79,20 +79,30 @@ const quint32 AVATAR_MOTION_SCRIPTABLE_BITS =
|
|||
// Bitset of state flags - we store the key state, hand state, Faceshift, eye tracking, and existence of
|
||||
// referential data in this bit set. The hand state is an octal, but is split into two sections to maintain
|
||||
// backward compatibility. The bits are ordered as such (0-7 left to right).
|
||||
// +-----+-----+-+-+-+--+
|
||||
// |K0,K1|H0,H1|F|E|R|H2|
|
||||
// +-----+-----+-+-+-+--+
|
||||
// AA 6/1/18 added three more flags bits 8,9, and 10 for procedural audio, blink, and eye saccade enabled
|
||||
//
|
||||
// +-----+-----+-+-+-+--+--+--+--+-----+
|
||||
// |K0,K1|H0,H1|F|E|R|H2|Au|Bl|Ey|xxxxx|
|
||||
// +-----+-----+-+-+-+--+--+--+--+-----+
|
||||
//
|
||||
// Key state - K0,K1 is found in the 1st and 2nd bits
|
||||
// Hand state - H0,H1,H2 is found in the 3rd, 4th, and 8th bits
|
||||
// Face tracker - F is found in the 5th bit
|
||||
// Eye tracker - E is found in the 6th bit
|
||||
// Referential Data - R is found in the 7th bit
|
||||
// Procedural audio to mouth movement is enabled 8th bit
|
||||
// Procedural Blink is enabled 9th bit
|
||||
// Procedural Eyelid is enabled 10th bit
|
||||
|
||||
const int KEY_STATE_START_BIT = 0; // 1st and 2nd bits
|
||||
const int HAND_STATE_START_BIT = 2; // 3rd and 4th bits
|
||||
const int IS_FACE_TRACKER_CONNECTED = 4; // 5th bit
|
||||
const int IS_EYE_TRACKER_CONNECTED = 5; // 6th bit (was CHAT_CIRCLING)
|
||||
const int HAS_REFERENTIAL = 6; // 7th bit
|
||||
const int HAND_STATE_FINGER_POINTING_BIT = 7; // 8th bit
|
||||
const int AUDIO_ENABLED_FACE_MOVEMENT = 8; // 9th bit
|
||||
const int PROCEDURAL_EYE_FACE_MOVEMENT = 9; // 10th bit
|
||||
const int PROCEDURAL_BLINK_FACE_MOVEMENT = 10; // 11th bit
|
||||
|
||||
|
||||
const char HAND_STATE_NULL = 0;
|
||||
|
@ -200,9 +210,9 @@ namespace AvatarDataPacket {
|
|||
static_assert(sizeof(SensorToWorldMatrix) == SENSOR_TO_WORLD_SIZE, "AvatarDataPacket::SensorToWorldMatrix size doesn't match.");
|
||||
|
||||
PACKED_BEGIN struct AdditionalFlags {
|
||||
uint8_t flags; // additional flags: hand state, key state, eye tracking
|
||||
uint16_t flags; // additional flags: hand state, key state, eye tracking
|
||||
} PACKED_END;
|
||||
const size_t ADDITIONAL_FLAGS_SIZE = 1;
|
||||
const size_t ADDITIONAL_FLAGS_SIZE = 2;
|
||||
static_assert(sizeof(AdditionalFlags) == ADDITIONAL_FLAGS_SIZE, "AvatarDataPacket::AdditionalFlags size doesn't match.");
|
||||
|
||||
// only present if HAS_REFERENTIAL flag is set in AvatarInfo.flags
|
||||
|
@ -501,6 +511,11 @@ public:
|
|||
|
||||
float getDomainLimitedScale() const;
|
||||
|
||||
virtual bool getHasScriptedBlendshapes() const { return false; }
|
||||
virtual bool getHasProceduralBlinkFaceMovement() const { return true; }
|
||||
virtual bool getHasProceduralEyeFaceMovement() const { return true; }
|
||||
virtual bool getHasAudioEnabledFaceMovement() const { return false; }
|
||||
|
||||
/**jsdoc
|
||||
* Returns the minimum scale allowed for this avatar in the current domain.
|
||||
* This value can change as the user changes avatars or when changing domains.
|
||||
|
|
|
@ -69,6 +69,24 @@ public:
|
|||
}
|
||||
bool lookAtPositionChangedSince(quint64 time) { return _lookAtPositionChanged >= time; }
|
||||
|
||||
bool getHasProceduralEyeFaceMovement() const { return _hasProceduralEyeFaceMovement; }
|
||||
|
||||
void setHasProceduralEyeFaceMovement(const bool hasProceduralEyeFaceMovement) {
|
||||
_hasProceduralEyeFaceMovement = hasProceduralEyeFaceMovement;
|
||||
}
|
||||
|
||||
bool getHasProceduralBlinkFaceMovement() const { return _hasProceduralBlinkFaceMovement; }
|
||||
|
||||
void setHasProceduralBlinkFaceMovement(const bool hasProceduralBlinkFaceMovement) {
|
||||
_hasProceduralBlinkFaceMovement = hasProceduralBlinkFaceMovement;
|
||||
}
|
||||
|
||||
bool getHasAudioEnabledFaceMovement() const { return _hasAudioEnabledFaceMovement; }
|
||||
|
||||
void setHasAudioEnabledFaceMovement(const bool hasAudioEnabledFaceMovement) {
|
||||
_hasAudioEnabledFaceMovement = hasAudioEnabledFaceMovement;
|
||||
}
|
||||
|
||||
friend class AvatarData;
|
||||
|
||||
QJsonObject toJson() const;
|
||||
|
@ -83,6 +101,9 @@ protected:
|
|||
glm::vec3 _lookAtPosition;
|
||||
quint64 _lookAtPositionChanged { 0 };
|
||||
|
||||
bool _hasAudioEnabledFaceMovement { true };
|
||||
bool _hasProceduralBlinkFaceMovement { true };
|
||||
bool _hasProceduralEyeFaceMovement { true };
|
||||
bool _isFaceTrackerConnected { false };
|
||||
bool _isEyeTrackerConnected { false };
|
||||
float _leftEyeBlink { 0.0f };
|
||||
|
|
|
@ -40,7 +40,7 @@ PacketVersion versionForPacketType(PacketType packetType) {
|
|||
case PacketType::AvatarData:
|
||||
case PacketType::BulkAvatarData:
|
||||
case PacketType::KillAvatar:
|
||||
return static_cast<PacketVersion>(AvatarMixerPacketVersion::FixMannequinDefaultAvatarFeet);
|
||||
return static_cast<PacketVersion>(AvatarMixerPacketVersion::ProceduralFaceMovementFlagsAndBlendshapes);
|
||||
case PacketType::MessagesData:
|
||||
return static_cast<PacketVersion>(MessageDataVersion::TextOrBinaryData);
|
||||
// ICE packets
|
||||
|
|
|
@ -283,7 +283,8 @@ enum class AvatarMixerPacketVersion : PacketVersion {
|
|||
UpdatedMannequinDefaultAvatar,
|
||||
AvatarJointDefaultPoseFlags,
|
||||
FBXReaderNodeReparenting,
|
||||
FixMannequinDefaultAvatarFeet
|
||||
FixMannequinDefaultAvatarFeet,
|
||||
ProceduralFaceMovementFlagsAndBlendshapes
|
||||
};
|
||||
|
||||
enum class DomainConnectRequestVersion : PacketVersion {
|
||||
|
|
|
@ -297,14 +297,23 @@ void setAtBit(unsigned char& byte, int bitIndex) {
|
|||
byte |= (1 << (7 - bitIndex));
|
||||
}
|
||||
|
||||
bool oneAtBit16(unsigned short word, int bitIndex) {
|
||||
return (word >> (15 - bitIndex) & 1);
|
||||
}
|
||||
|
||||
void setAtBit16(unsigned short& word, int bitIndex) {
|
||||
word |= (1 << (15 - bitIndex));
|
||||
}
|
||||
|
||||
|
||||
void clearAtBit(unsigned char& byte, int bitIndex) {
|
||||
if (oneAtBit(byte, bitIndex)) {
|
||||
byte -= (1 << (7 - bitIndex));
|
||||
}
|
||||
}
|
||||
|
||||
int getSemiNibbleAt(unsigned char byte, int bitIndex) {
|
||||
return (byte >> (6 - bitIndex) & 3); // semi-nibbles store 00, 01, 10, or 11
|
||||
int getSemiNibbleAt(unsigned short word, int bitIndex) {
|
||||
return (word >> (14 - bitIndex) & 3); // semi-nibbles store 00, 01, 10, or 11
|
||||
}
|
||||
|
||||
int getNthBit(unsigned char byte, int ordinal) {
|
||||
|
@ -326,9 +335,9 @@ int getNthBit(unsigned char byte, int ordinal) {
|
|||
return ERROR_RESULT;
|
||||
}
|
||||
|
||||
void setSemiNibbleAt(unsigned char& byte, int bitIndex, int value) {
|
||||
void setSemiNibbleAt(unsigned short& word, int bitIndex, int value) {
|
||||
//assert(value <= 3 && value >= 0);
|
||||
byte |= ((value & 3) << (6 - bitIndex)); // semi-nibbles store 00, 01, 10, or 11
|
||||
word |= ((value & 3) << (14 - bitIndex)); // semi-nibbles store 00, 01, 10, or 11
|
||||
}
|
||||
|
||||
bool isInEnvironment(const char* environment) {
|
||||
|
|
|
@ -163,9 +163,11 @@ void printVoxelCode(unsigned char* voxelCode);
|
|||
int numberOfOnes(unsigned char byte);
|
||||
bool oneAtBit(unsigned char byte, int bitIndex);
|
||||
void setAtBit(unsigned char& byte, int bitIndex);
|
||||
bool oneAtBit16(unsigned short word, int bitIndex);
|
||||
void setAtBit16(unsigned short& word, int bitIndex);
|
||||
void clearAtBit(unsigned char& byte, int bitIndex);
|
||||
int getSemiNibbleAt(unsigned char byte, int bitIndex);
|
||||
void setSemiNibbleAt(unsigned char& byte, int bitIndex, int value);
|
||||
int getSemiNibbleAt(unsigned short word, int bitIndex);
|
||||
void setSemiNibbleAt(unsigned short& word, int bitIndex, int value);
|
||||
|
||||
int getNthBit(unsigned char byte, int ordinal); /// determines the bit placement 0-7 of the ordinal set bit
|
||||
|
||||
|
|
374
scripts/developer/facialExpressions.js
Normal file
374
scripts/developer/facialExpressions.js
Normal file
|
@ -0,0 +1,374 @@
|
|||
//
|
||||
// facialExpressions.js
|
||||
// A script to set different emotions using blend shapes
|
||||
//
|
||||
// Author: Elisa Lupin-Jimenez
|
||||
// Copyright High Fidelity 2018
|
||||
//
|
||||
// Licensed under the Apache 2.0 License
|
||||
// See accompanying license file or http://apache.org/
|
||||
//
|
||||
// All assets are under CC Attribution Non-Commerical
|
||||
// http://creativecommons.org/licenses/
|
||||
//
|
||||
|
||||
(function() {
|
||||
|
||||
var TABLET_BUTTON_NAME = "EMOTIONS";
|
||||
// TODO: ADD HTML LANDING PAGE
|
||||
|
||||
var TRANSITION_TIME_SECONDS = 0.25;
|
||||
|
||||
var tablet = Tablet.getTablet("com.highfidelity.interface.tablet.system");
|
||||
var icon = "https://hifi-content.s3.amazonaws.com/elisalj/emoji_scripts/icons/emoji-i.svg";
|
||||
var activeIcon = "https://hifi-content.s3.amazonaws.com/elisalj/emoji_scripts/icons/emoji-a.svg";
|
||||
var isActive = true;
|
||||
|
||||
var controllerMappingName;
|
||||
var controllerMapping;
|
||||
|
||||
var tabletButton = tablet.addButton({
|
||||
icon: icon,
|
||||
activeIcon: activeIcon,
|
||||
text: TABLET_BUTTON_NAME,
|
||||
isActive: true
|
||||
});
|
||||
|
||||
var toggle = function() {
|
||||
isActive = !isActive;
|
||||
tabletButton.editProperties({isActive: isActive});
|
||||
if (isActive) {
|
||||
Controller.enableMapping(controllerMappingName);
|
||||
} else {
|
||||
setEmotion(DEFAULT);
|
||||
Controller.disableMapping(controllerMappingName);
|
||||
}
|
||||
};
|
||||
|
||||
tabletButton.clicked.connect(toggle);
|
||||
|
||||
var DEFAULT = {
|
||||
"EyeOpen_L": 0.00,
|
||||
"EyeOpen_R": 0.00,
|
||||
"EyeBlink_L": 0.00,
|
||||
"EyeBlink_R": 0.00,
|
||||
"EyeSquint_L": 0.00,
|
||||
"EyeSquint_R": 0.00,
|
||||
"BrowsD_L": 0.00,
|
||||
"BrowsD_R": 0.00,
|
||||
"BrowsU_L": 0.00,
|
||||
"BrowsU_C": 0.00,
|
||||
"JawOpen": 0.00,
|
||||
"JawFwd": 0.00,
|
||||
"MouthFrown_L": 0.00,
|
||||
"MouthFrown_R": 0.00,
|
||||
"MouthSmile_L": 0.00,
|
||||
"MouthSmile_R": 0.00,
|
||||
"MouthDimple_L": 0.00,
|
||||
"MouthDimple_R": 0.00,
|
||||
"LipsUpperClose": 0.00,
|
||||
"LipsLowerClose": 0.00,
|
||||
"LipsLowerOpen": 0.00,
|
||||
"ChinUpperRaise": 0.00,
|
||||
"Sneer": 0.00,
|
||||
"Puff": 0.00
|
||||
};
|
||||
|
||||
var SMILE = {
|
||||
"EyeOpen_L": 0.00,
|
||||
"EyeOpen_R": 0.00,
|
||||
"EyeBlink_L": 0.30,
|
||||
"EyeBlink_R": 0.30,
|
||||
"EyeSquint_L": 0.90,
|
||||
"EyeSquint_R": 0.90,
|
||||
"BrowsD_L": 1.00,
|
||||
"BrowsD_R": 1.00,
|
||||
"BrowsU_L": 0.00,
|
||||
"BrowsU_C": 0.00,
|
||||
"JawOpen": 0.00,
|
||||
"JawFwd": 0.00,
|
||||
"MouthFrown_L": 0.00,
|
||||
"MouthFrown_R": 0.00,
|
||||
"MouthSmile_L": 1.00,
|
||||
"MouthSmile_R": 1.00,
|
||||
"MouthDimple_L": 1.00,
|
||||
"MouthDimple_R": 1.00,
|
||||
"LipsUpperClose": 0.40,
|
||||
"LipsLowerClose": 0.30,
|
||||
"LipsLowerOpen": 0.25,
|
||||
"ChinUpperRaise": 0.35,
|
||||
"Sneer": 0.00,
|
||||
"Puff": 0.00
|
||||
};
|
||||
|
||||
var LAUGH = {
|
||||
"EyeOpen_L": 0.00,
|
||||
"EyeOpen_R": 0.00,
|
||||
"EyeBlink_L": 0.45,
|
||||
"EyeBlink_R": 0.45,
|
||||
"EyeSquint_L": 0.75,
|
||||
"EyeSquint_R": 0.75,
|
||||
"BrowsD_L": 0.00,
|
||||
"BrowsD_R": 0.00,
|
||||
"BrowsU_L": 0.00,
|
||||
"BrowsU_C": 0.50,
|
||||
"JawOpen": 0.50,
|
||||
"JawFwd": 0.00,
|
||||
"MouthFrown_L": 0.00,
|
||||
"MouthFrown_R": 0.00,
|
||||
"MouthSmile_L": 1.00,
|
||||
"MouthSmile_R": 1.00,
|
||||
"MouthDimple_L": 1.00,
|
||||
"MouthDimple_R": 1.00,
|
||||
"LipsUpperClose": 0.00,
|
||||
"LipsLowerClose": 0.00,
|
||||
"LipsLowerOpen": 0.00,
|
||||
"ChinUpperRaise": 0.30,
|
||||
"Sneer": 1.00,
|
||||
"Puff": 0.30
|
||||
};
|
||||
|
||||
var FLIRT = {
|
||||
"EyeOpen_L": 0.00,
|
||||
"EyeOpen_R": 0.00,
|
||||
"EyeBlink_L": 0.50,
|
||||
"EyeBlink_R": 0.50,
|
||||
"EyeSquint_L": 0.25,
|
||||
"EyeSquint_R": 0.25,
|
||||
"BrowsD_L": 0.00,
|
||||
"BrowsD_R": 1.00,
|
||||
"BrowsU_L": 0.55,
|
||||
"BrowsU_C": 0.00,
|
||||
"JawOpen": 0.00,
|
||||
"JawFwd": 0.00,
|
||||
"MouthFrown_L": 0.00,
|
||||
"MouthFrown_R": 0.00,
|
||||
"MouthSmile_L": 0.50,
|
||||
"MouthSmile_R": 0.00,
|
||||
"MouthDimple_L": 1.00,
|
||||
"MouthDimple_R": 1.00,
|
||||
"LipsUpperClose": 0.00,
|
||||
"LipsLowerClose": 0.00,
|
||||
"LipsLowerOpen": 0.00,
|
||||
"ChinUpperRaise": 0.00,
|
||||
"Sneer": 0.00,
|
||||
"Puff": 0.00
|
||||
};
|
||||
|
||||
var SAD = {
|
||||
"EyeOpen_L": 0.00,
|
||||
"EyeOpen_R": 0.00,
|
||||
"EyeBlink_L": 0.30,
|
||||
"EyeBlink_R": 0.30,
|
||||
"EyeSquint_L": 0.30,
|
||||
"EyeSquint_R": 0.30,
|
||||
"BrowsD_L": 0.00,
|
||||
"BrowsD_R": 0.00,
|
||||
"BrowsU_L": 0.00,
|
||||
"BrowsU_C": 0.50,
|
||||
"JawOpen": 0.00,
|
||||
"JawFwd": 0.80,
|
||||
"MouthFrown_L": 0.80,
|
||||
"MouthFrown_R": 0.80,
|
||||
"MouthSmile_L": 0.00,
|
||||
"MouthSmile_R": 0.00,
|
||||
"MouthDimple_L": 0.00,
|
||||
"MouthDimple_R": 0.00,
|
||||
"LipsUpperClose": 0.00,
|
||||
"LipsLowerClose": 0.50,
|
||||
"LipsLowerOpen": 0.00,
|
||||
"ChinUpperRaise": 0.00,
|
||||
"Sneer": 0.00,
|
||||
"Puff": 0.00
|
||||
};
|
||||
|
||||
var ANGRY = {
|
||||
"EyeOpen_L": 1.00,
|
||||
"EyeOpen_R": 1.00,
|
||||
"EyeBlink_L": 0.00,
|
||||
"EyeBlink_R": 0.00,
|
||||
"EyeSquint_L": 1.00,
|
||||
"EyeSquint_R": 1.00,
|
||||
"BrowsD_L": 1.00,
|
||||
"BrowsD_R": 1.00,
|
||||
"BrowsU_L": 0.00,
|
||||
"BrowsU_C": 0.00,
|
||||
"JawOpen": 0.00,
|
||||
"JawFwd": 0.00,
|
||||
"MouthFrown_L": 0.50,
|
||||
"MouthFrown_R": 0.50,
|
||||
"MouthSmile_L": 0.00,
|
||||
"MouthSmile_R": 0.00,
|
||||
"MouthDimple_L": 0.00,
|
||||
"MouthDimple_R": 0.00,
|
||||
"LipsUpperClose": 0.50,
|
||||
"LipsLowerClose": 0.50,
|
||||
"LipsLowerOpen": 0.00,
|
||||
"ChinUpperRaise": 0.00,
|
||||
"Sneer": 0.50,
|
||||
"Puff": 0.00
|
||||
};
|
||||
|
||||
var FEAR = {
|
||||
"EyeOpen_L": 1.00,
|
||||
"EyeOpen_R": 1.00,
|
||||
"EyeBlink_L": 0.00,
|
||||
"EyeBlink_R": 0.00,
|
||||
"EyeSquint_L": 0.00,
|
||||
"EyeSquint_R": 0.00,
|
||||
"BrowsD_L": 0.00,
|
||||
"BrowsD_R": 0.00,
|
||||
"BrowsU_L": 0.00,
|
||||
"BrowsU_C": 1.00,
|
||||
"JawOpen": 0.15,
|
||||
"JawFwd": 0.00,
|
||||
"MouthFrown_L": 0.30,
|
||||
"MouthFrown_R": 0.30,
|
||||
"MouthSmile_L": 0.00,
|
||||
"MouthSmile_R": 0.00,
|
||||
"MouthDimple_L": 0.00,
|
||||
"MouthDimple_R": 0.00,
|
||||
"LipsUpperClose": 0.00,
|
||||
"LipsLowerClose": 0.00,
|
||||
"LipsLowerOpen": 0.00,
|
||||
"ChinUpperRaise": 0.00,
|
||||
"Sneer": 0.00,
|
||||
"Puff": 0.00
|
||||
};
|
||||
|
||||
var DISGUST = {
|
||||
"EyeOpen_L": 0.00,
|
||||
"EyeOpen_R": 0.00,
|
||||
"EyeBlink_L": 0.25,
|
||||
"EyeBlink_R": 0.25,
|
||||
"EyeSquint_L": 1.00,
|
||||
"EyeSquint_R": 1.00,
|
||||
"BrowsD_L": 1.00,
|
||||
"BrowsD_R": 1.00,
|
||||
"BrowsU_L": 0.00,
|
||||
"BrowsU_C": 0.00,
|
||||
"JawOpen": 0.00,
|
||||
"JawFwd": 0.00,
|
||||
"MouthFrown_L": 1.00,
|
||||
"MouthFrown_R": 1.00,
|
||||
"MouthSmile_L": 0.00,
|
||||
"MouthSmile_R": 0.00,
|
||||
"MouthDimple_L": 0.00,
|
||||
"MouthDimple_R": 0.00,
|
||||
"LipsUpperClose": 0.00,
|
||||
"LipsLowerClose": 0.75,
|
||||
"LipsLowerOpen": 0.00,
|
||||
"ChinUpperRaise": 0.75,
|
||||
"Sneer": 1.00,
|
||||
"Puff": 0.00
|
||||
};
|
||||
|
||||
|
||||
function mixValue(valueA, valueB, percentage) {
|
||||
return valueA + ((valueB - valueA) * percentage);
|
||||
}
|
||||
|
||||
var lastEmotionUsed = DEFAULT;
|
||||
var emotion = DEFAULT;
|
||||
var isChangingEmotion = false;
|
||||
var changingEmotionPercentage = 0.0;
|
||||
|
||||
Script.update.connect(function(deltaTime) {
|
||||
if (!isChangingEmotion) {
|
||||
return;
|
||||
}
|
||||
changingEmotionPercentage += deltaTime / TRANSITION_TIME_SECONDS;
|
||||
if (changingEmotionPercentage >= 1.0) {
|
||||
changingEmotionPercentage = 1.0;
|
||||
isChangingEmotion = false;
|
||||
if (emotion === DEFAULT) {
|
||||
MyAvatar.hasScriptedBlendshapes = false;
|
||||
}
|
||||
}
|
||||
for (var blendshape in emotion) {
|
||||
MyAvatar.setBlendshape(blendshape,
|
||||
mixValue(lastEmotionUsed[blendshape], emotion[blendshape], changingEmotionPercentage));
|
||||
}
|
||||
});
|
||||
|
||||
function setEmotion(currentEmotion) {
|
||||
if (emotion !== lastEmotionUsed) {
|
||||
lastEmotionUsed = emotion;
|
||||
}
|
||||
if (currentEmotion !== lastEmotionUsed) {
|
||||
changingEmotionPercentage = 0.0;
|
||||
emotion = currentEmotion;
|
||||
isChangingEmotion = true;
|
||||
MyAvatar.hasScriptedBlendshapes = true;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
controllerMappingName = 'Hifi-FacialExpressions-Mapping';
|
||||
controllerMapping = Controller.newMapping(controllerMappingName);
|
||||
|
||||
controllerMapping.from(Controller.Hardware.Keyboard.H).to(function(value) {
|
||||
if (value !== 0) {
|
||||
setEmotion(SMILE);
|
||||
}
|
||||
});
|
||||
|
||||
controllerMapping.from(Controller.Hardware.Keyboard.J).to(function(value) {
|
||||
if (value !== 0) {
|
||||
setEmotion(LAUGH);
|
||||
}
|
||||
});
|
||||
|
||||
controllerMapping.from(Controller.Hardware.Keyboard.K).to(function(value) {
|
||||
if (value !== 0) {
|
||||
setEmotion(FLIRT);
|
||||
}
|
||||
});
|
||||
|
||||
controllerMapping.from(Controller.Hardware.Keyboard.L).to(function(value) {
|
||||
if (value !== 0) {
|
||||
setEmotion(SAD);
|
||||
}
|
||||
});
|
||||
|
||||
controllerMapping.from(Controller.Hardware.Keyboard.V).to(function(value) {
|
||||
if (value !== 0) {
|
||||
setEmotion(ANGRY);
|
||||
}
|
||||
});
|
||||
|
||||
controllerMapping.from(Controller.Hardware.Keyboard.B).to(function(value) {
|
||||
if (value !== 0) {
|
||||
setEmotion(FEAR);
|
||||
}
|
||||
});
|
||||
|
||||
controllerMapping.from(Controller.Hardware.Keyboard.M).to(function(value) {
|
||||
if (value !== 0) {
|
||||
setEmotion(DISGUST);
|
||||
}
|
||||
});
|
||||
|
||||
controllerMapping.from(Controller.Hardware.Keyboard.N).to(function(value) {
|
||||
if (value !== 0) {
|
||||
setEmotion(DEFAULT);
|
||||
}
|
||||
});
|
||||
|
||||
Controller.enableMapping(controllerMappingName);
|
||||
|
||||
Script.scriptEnding.connect(function() {
|
||||
tabletButton.clicked.disconnect(toggle);
|
||||
tablet.removeButton(tabletButton);
|
||||
Controller.disableMapping(controllerMappingName);
|
||||
|
||||
if (emotion !== DEFAULT || isChangingEmotion) {
|
||||
isChangingEmotion = false;
|
||||
for (var blendshape in DEFAULT) {
|
||||
MyAvatar.setBlendshape(blendshape, DEFAULT[blendshape]);
|
||||
}
|
||||
MyAvatar.hasScriptedBlendshapes = false;
|
||||
}
|
||||
});
|
||||
|
||||
}());
|
Loading…
Reference in a new issue