cleaned up white space and removed extra declaration of hadAudioEnabledFaceMovement

This commit is contained in:
amantley 2018-06-07 15:11:57 -07:00
parent 97831e61f0
commit 836c3da858
7 changed files with 10 additions and 166 deletions

View file

@ -1466,7 +1466,6 @@ private:
float _hmdRollControlDeadZone { ROLL_CONTROL_DEAD_ZONE_DEFAULT };
float _hmdRollControlRate { ROLL_CONTROL_RATE_DEFAULT };
std::atomic<bool> _hasScriptedBlendShapes { false };
bool _hasAudioEnabledFaceMovement { true };
// working copy -- see AvatarData for thread-safe _sensorToWorldMatrixCache, used for outward facing access
glm::mat4 _sensorToWorldMatrix { glm::mat4() };

View file

@ -54,7 +54,7 @@ void MyHead::simulate(float deltaTime) {
_blendshapeCoefficients = faceTracker->getBlendshapeCoefficients();
}
}
auto eyeTracker = DependencyManager::get<EyeTracker>();
_isEyeTrackerConnected = eyeTracker->isTracking();
// if eye tracker is connected we should get the data here.

View file

@ -45,13 +45,6 @@ void Head::reset() {
void Head::simulate(float deltaTime) {
const float NORMAL_HZ = 60.0f; // the update rate the constant values were tuned for
//qCDebug(avatars_renderer) << "name " << _owningAvatar->getName();
//if (_owningAvatar->isMyAvatar()) {
// qCDebug(avatars_renderer) << "my avatar";
//} else {
// qCDebug(avatars_renderer) << "not my avatar " << _owningAvatar->getAudioLoudness();
//}
// grab the audio loudness from the owning avatar, if we have one
float audioLoudness = _owningAvatar ? _owningAvatar->getAudioLoudness() : 0.0f;
@ -84,9 +77,7 @@ void Head::simulate(float deltaTime) {
} else {
_saccade = glm::vec3();
}
const float BLINK_SPEED = 10.0f;
const float BLINK_SPEED_VARIABILITY = 1.0f;
const float BLINK_START_VARIABILITY = 0.25f;
@ -162,7 +153,7 @@ void Head::simulate(float deltaTime) {
_mouth4 = 0.0f;
_mouthTime = 0.0f;
}
FaceTracker::updateFakeCoefficients(_leftEyeBlink,
_rightEyeBlink,
_browAudioLift,
@ -171,11 +162,11 @@ void Head::simulate(float deltaTime) {
_mouth3,
_mouth4,
_transientBlendshapeCoefficients);
if (getHasProceduralEyeFaceMovement()) {
applyEyelidOffset(getOrientation());
}
_leftEyePosition = _rightEyePosition = getPosition();
if (_owningAvatar) {
auto skeletonModel = static_cast<Avatar*>(_owningAvatar)->getSkeletonModel();

View file

@ -521,8 +521,7 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSent
auto startSection = destinationBuffer;
auto faceTrackerInfo = reinterpret_cast<AvatarDataPacket::FaceTrackerInfo*>(destinationBuffer);
const auto& blendshapeCoefficients = _headData->getBlendshapeCoefficients();
//note: we don't use the blink and average loudness, we just use the numBlendShapes and
// note: we don't use the blink and average loudness, we just use the numBlendShapes and
// compute the procedural info on the client side.
faceTrackerInfo->leftEyeBlink = _headData->_leftEyeBlink;
faceTrackerInfo->rightEyeBlink = _headData->_rightEyeBlink;
@ -1068,7 +1067,6 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) {
} else {
qCWarning(avatars) << "name " << getName() << "key state flag is false";
}
}
int numBytesRead = sourceBuffer - startSection;
@ -1121,7 +1119,6 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) {
}
if (hasFaceTrackerInfo) {
//qCWarning(avatars) << "parsing face tracker info ";
auto startSection = sourceBuffer;
PACKET_READ_CHECK(FaceTrackerInfo, sizeof(AvatarDataPacket::FaceTrackerInfo));
@ -1129,12 +1126,10 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) {
int numCoefficients = faceTrackerInfo->numBlendshapeCoefficients;
const int coefficientsSize = sizeof(float) * numCoefficients;
sourceBuffer += sizeof(AvatarDataPacket::FaceTrackerInfo);
PACKET_READ_CHECK(FaceTrackerCoefficients, coefficientsSize);
_headData->_blendshapeCoefficients.resize(numCoefficients); // make sure there's room for the copy!
_headData->_transientBlendshapeCoefficients.resize(numCoefficients);
//only copy the blendshapes to headData not the procedural face info
//only copy the blendshapes to headData, not the procedural face info
memcpy(_headData->_blendshapeCoefficients.data(), sourceBuffer, coefficientsSize);
sourceBuffer += coefficientsSize;

View file

@ -101,8 +101,6 @@ protected:
glm::vec3 _lookAtPosition;
quint64 _lookAtPositionChanged { 0 };
//std::atomic<bool> _hasProceduralBlinkFaceMovement{ true };
//std::atomic<bool> _hasProceduralEyeFaceMovement{ true };
bool _hasAudioEnabledFaceMovement { true };
bool _hasProceduralBlinkFaceMovement{ true };
bool _hasProceduralEyeFaceMovement{ true };

View file

@ -1,139 +0,0 @@
name = DefaultStylizedFemale_Clothed
type = body+head
scale = 1
filename = DefaultStylizedFemale_Clothed/DefaultStylizedFemale_Clothed.fbx
texdir = DefaultStylizedFemale_Clothed/textures
joint = jointLean = Spine
joint = jointRightHand = RightHand
joint = jointEyeLeft = LeftEye
joint = jointHead = HeadTop_End
joint = jointNeck = Neck
joint = jointRoot = Hips
joint = jointEyeRight = RightEye
joint = jointLeftHand = LeftHand
freeJoint = LeftArm
freeJoint = LeftForeArm
freeJoint = RightArm
freeJoint = RightForeArm
bs = EyeBlink_L = Blink_Left = 1
bs = Sneer = Squint_Right = 0.5
bs = Sneer = Squint_Left = 0.5
bs = Sneer = NoseScrunch_Right = 0.75
bs = Sneer = NoseScrunch_Left = 0.75
bs = ChinLowerRaise = Jaw_Up = 1
bs = EyeSquint_R = Squint_Right = 1
bs = MouthSmile_R = Smile_Right = 1
bs = ChinUpperRaise = UpperLipUp_Right = 0.5
bs = ChinUpperRaise = UpperLipUp_Left = 0.5
bs = LipsLowerOpen = LowerLipOut = 1
bs = LipsLowerDown = LowerLipDown_Right = 0.7
bs = LipsLowerDown = LowerLipDown_Left = 0.7
bs = BrowsU_L = BrowsUp_Left = 1
bs = MouthRight = Midmouth_Right = 1
bs = MouthDimple_R = Smile_Right = 0.25
bs = LipsPucker = MouthNarrow_Right = 1
bs = LipsPucker = MouthNarrow_Left = 1
bs = Puff = CheekPuff_Right = 1
bs = Puff = CheekPuff_Left = 1
bs = JawFwd = JawForeward = 1
bs = BrowsD_L = BrowsDown_Left = 1
bs = LipsFunnel = TongueUp = 1
bs = LipsFunnel = MouthWhistle_NarrowAdjust_Right = 0.5
bs = LipsFunnel = MouthWhistle_NarrowAdjust_Left = 0.5
bs = LipsFunnel = MouthNarrow_Right = 1
bs = LipsFunnel = MouthNarrow_Left = 1
bs = LipsFunnel = Jaw_Down = 0.36
bs = LipsFunnel = JawForeward = 0.39
bs = LipsUpperOpen = UpperLipOut = 1
bs = EyeSquint_L = Squint_Left = 1
bs = MouthDimple_L = Smile_Left = 0.25
bs = LipsLowerClose = LowerLipIn = 1
bs = MouthFrown_R = Frown_Right = 1
bs = MouthFrown_L = Frown_Left = 1
bs = BrowsU_R = BrowsUp_Right = 1
bs = JawOpen = MouthOpen = 0.7
bs = JawRight = Jaw_Right = 1
bs = MouthLeft = Midmouth_Left = 1
bs = BrowsU_C = BrowsUp_Right = 1
bs = BrowsU_C = BrowsUp_Left = 1
bs = LipsUpperUp = UpperLipUp_Right = 0.7
bs = LipsUpperUp = UpperLipUp_Left = 0.7
bs = EyeBlink_R = Blink_Right = 1
bs = EyeOpen_R = EyesWide_Right = 1
bs = LipsUpperClose = UpperLipIn = 1
bs = MouthSmile_L = Smile_Left = 1
bs = EyeOpen_L = EyesWide_Left = 1
bs = JawLeft = JawRotateY_Left = 0.5
bs = BrowsD_R = BrowsDown_Right = 1
jointIndex = RightHandThumb4 = 21
jointIndex = Neck = 62
jointIndex = LeftHandIndex4 = 57
jointIndex = Body = 71
jointIndex = LeftHandIndex1 = 54
jointIndex = RightHand = 17
jointIndex = RightHandMiddle1 = 26
jointIndex = Spine = 11
jointIndex = RightHandRing2 = 31
jointIndex = RightArm = 15
jointIndex = RightHandPinky2 = 35
jointIndex = LeftToeBase = 9
jointIndex = RightHandIndex3 = 24
jointIndex = RightHandRing1 = 30
jointIndex = RightHandPinky1 = 34
jointIndex = RightEye = 66
jointIndex = LeftHandRing4 = 49
jointIndex = LeftHandRing2 = 47
jointIndex = RightHandMiddle2 = 27
jointIndex = Head = 63
jointIndex = LeftHandMiddle4 = 53
jointIndex = LeftLeg = 7
jointIndex = LeftHandPinky2 = 43
jointIndex = LeftHandThumb1 = 58
jointIndex = LeftHandPinky4 = 45
jointIndex = RightHandIndex1 = 22
jointIndex = Tops = 67
jointIndex = Hips = 0
jointIndex = LeftUpLeg = 6
jointIndex = RightShoulder = 14
jointIndex = Spine2 = 13
jointIndex = RightHandRing4 = 33
jointIndex = RightHandThumb3 = 20
jointIndex = RightHandIndex4 = 25
jointIndex = LeftFoot = 8
jointIndex = LeftHandRing3 = 48
jointIndex = LeftHand = 41
jointIndex = LeftForeArm = 40
jointIndex = LeftToe_End = 10
jointIndex = Bottoms = 68
jointIndex = RightFoot = 3
jointIndex = LeftHandMiddle2 = 51
jointIndex = LeftHandThumb3 = 60
jointIndex = RightHandPinky3 = 36
jointIndex = LeftEye = 65
jointIndex = LeftHandIndex2 = 55
jointIndex = RightHandIndex2 = 23
jointIndex = LeftHandPinky1 = 42
jointIndex = LeftHandMiddle3 = 52
jointIndex = RightHandMiddle4 = 29
jointIndex = LeftHandThumb2 = 59
jointIndex = Shoes = 69
jointIndex = RightHandThumb1 = 18
jointIndex = RightToe_End = 5
jointIndex = RightHandThumb2 = 19
jointIndex = RightUpLeg = 1
jointIndex = RightLeg = 2
jointIndex = LeftHandMiddle1 = 50
jointIndex = LeftHandIndex3 = 56
jointIndex = LeftHandThumb4 = 61
jointIndex = RightHandRing3 = 32
jointIndex = Hair = 70
jointIndex = Spine1 = 12
jointIndex = LeftHandRing1 = 46
jointIndex = LeftArm = 39
jointIndex = LeftShoulder = 38
jointIndex = RightForeArm = 16
jointIndex = HeadTop_End = 64
jointIndex = RightHandPinky4 = 37
jointIndex = LeftHandPinky3 = 44
jointIndex = RightToeBase = 4
jointIndex = RightHandMiddle3 = 28

View file

@ -1,7 +1,7 @@
//
// facialExpressions.js
// A script to set different emotions using blend shapes
//
//
// Author: Elisa Lupin-Jimenez
// Copyright High Fidelity 2018
//
@ -286,7 +286,7 @@
}
}
for (var blendshape in emotion) {
MyAvatar.setBlendshape(blendshape,
MyAvatar.setBlendshape(blendshape,
mixValue(lastEmotionUsed[blendshape], emotion[blendshape], changingEmotionPercentage));
}
});