mirror of
https://github.com/overte-org/overte.git
synced 2025-04-22 02:24:58 +02:00
fixed setatone16 function to return the correct result for additional flags
This commit is contained in:
parent
0c879d85b5
commit
dca93ca61f
3 changed files with 24 additions and 5 deletions
libraries
|
@ -45,6 +45,13 @@ void Head::reset() {
|
|||
void Head::simulate(float deltaTime) {
|
||||
const float NORMAL_HZ = 60.0f; // the update rate the constant values were tuned for
|
||||
|
||||
qCDebug(avatars_renderer) << "name " << _owningAvatar->getName();
|
||||
if (_owningAvatar->isMyAvatar()) {
|
||||
qCDebug(avatars_renderer) << "my avatar";
|
||||
} else {
|
||||
qCDebug(avatars_renderer) << "not my avatar " << _owningAvatar->getAudioLoudness();
|
||||
}
|
||||
|
||||
// grab the audio loudness from the owning avatar, if we have one
|
||||
float audioLoudness = _owningAvatar ? _owningAvatar->getAudioLoudness() : 0.0f;
|
||||
|
||||
|
@ -78,6 +85,7 @@ void Head::simulate(float deltaTime) {
|
|||
_saccade = glm::vec3();
|
||||
}
|
||||
|
||||
|
||||
|
||||
const float BLINK_SPEED = 10.0f;
|
||||
const float BLINK_SPEED_VARIABILITY = 1.0f;
|
||||
|
@ -85,7 +93,7 @@ void Head::simulate(float deltaTime) {
|
|||
const float FULLY_OPEN = 0.0f;
|
||||
const float FULLY_CLOSED = 1.0f;
|
||||
if (getHasProceduralBlinkFaceMovement()) {
|
||||
qCDebug(avatars_renderer) << "in the blink code";
|
||||
qCDebug(avatars_renderer) << "in the blink code " << _owningAvatar->getName();
|
||||
// Detect transition from talking to not; force blink after that and a delay
|
||||
bool forceBlink = false;
|
||||
const float TALKING_LOUDNESS = 100.0f;
|
||||
|
@ -135,7 +143,7 @@ void Head::simulate(float deltaTime) {
|
|||
|
||||
// use data to update fake Faceshift blendshape coefficients
|
||||
if (getHasAudioEnabledFaceMovement()) {
|
||||
qCDebug(avatars_renderer) << "in the audio face code";
|
||||
//qCDebug(avatars_renderer) << "in the audio face code";
|
||||
// Update audio attack data for facial animation (eyebrows and mouth)
|
||||
float audioAttackAveragingRate = (10.0f - deltaTime * NORMAL_HZ) / 10.0f; // --> 0.9 at 60 Hz
|
||||
_audioAttack = audioAttackAveragingRate * _audioAttack +
|
||||
|
@ -167,7 +175,7 @@ void Head::simulate(float deltaTime) {
|
|||
_transientBlendshapeCoefficients);
|
||||
|
||||
if (getHasProceduralEyeFaceMovement()) {
|
||||
qCDebug(avatars_renderer) << "in the eye face code";
|
||||
//qCDebug(avatars_renderer) << "in the eye face code";
|
||||
applyEyelidOffset(getOrientation());
|
||||
}
|
||||
|
||||
|
|
|
@ -522,12 +522,17 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSent
|
|||
auto faceTrackerInfo = reinterpret_cast<AvatarDataPacket::FaceTrackerInfo*>(destinationBuffer);
|
||||
const auto& blendshapeCoefficients = _headData->getBlendshapeCoefficients();
|
||||
|
||||
//for (int i = 0; i < blendshapeCoefficients.size(); i++) {
|
||||
// qCWarning(avatars) << "blend coeff " << i << " " << blendshapeCoefficients[i];
|
||||
//}
|
||||
|
||||
faceTrackerInfo->leftEyeBlink = _headData->_leftEyeBlink;
|
||||
faceTrackerInfo->rightEyeBlink = _headData->_rightEyeBlink;
|
||||
faceTrackerInfo->averageLoudness = _headData->_averageLoudness;
|
||||
faceTrackerInfo->browAudioLift = _headData->_browAudioLift;
|
||||
faceTrackerInfo->numBlendshapeCoefficients = blendshapeCoefficients.size();
|
||||
destinationBuffer += sizeof(AvatarDataPacket::FaceTrackerInfo);
|
||||
qCWarning(avatars) << "face tracker info left eye blink " << faceTrackerInfo->leftEyeBlink;
|
||||
|
||||
memcpy(destinationBuffer, blendshapeCoefficients.data(), blendshapeCoefficients.size() * sizeof(float));
|
||||
destinationBuffer += blendshapeCoefficients.size() * sizeof(float);
|
||||
|
@ -1009,6 +1014,11 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) {
|
|||
auto newHasProceduralEyeFaceMovement = oneAtBit16(bitItems, PROCEDURAL_EYE_FACE_MOVEMENT);
|
||||
auto newHasProceduralBlinkFaceMovement = oneAtBit16(bitItems, PROCEDURAL_BLINK_FACE_MOVEMENT);
|
||||
|
||||
if (newHasAudioEnabledFaceMovement) {
|
||||
qCWarning(avatars) << "name " << getName() << "audio enabled flag is true";
|
||||
} else {
|
||||
qCWarning(avatars) << "name " << getName() << "audio enabled flag is false";
|
||||
}
|
||||
bool keyStateChanged = (_keyState != newKeyState);
|
||||
bool handStateChanged = (_handState != newHandState);
|
||||
bool faceStateChanged = (_headData->_isFaceTrackerConnected != newFaceTrackerConnected);
|
||||
|
@ -1086,6 +1096,7 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) {
|
|||
PACKET_READ_CHECK(FaceTrackerInfo, sizeof(AvatarDataPacket::FaceTrackerInfo));
|
||||
auto faceTrackerInfo = reinterpret_cast<const AvatarDataPacket::FaceTrackerInfo*>(sourceBuffer);
|
||||
sourceBuffer += sizeof(AvatarDataPacket::FaceTrackerInfo);
|
||||
qCWarning(avatars) << "parse data left eye blink " << faceTrackerInfo->leftEyeBlink;
|
||||
|
||||
_headData->_leftEyeBlink = faceTrackerInfo->leftEyeBlink;
|
||||
_headData->_rightEyeBlink = faceTrackerInfo->rightEyeBlink;
|
||||
|
|
|
@ -298,11 +298,11 @@ void setAtBit(unsigned char& byte, int bitIndex) {
|
|||
}
|
||||
|
||||
bool oneAtBit16(unsigned short word, int bitIndex) {
|
||||
return (word >> (7 - bitIndex) & 1);
|
||||
return (word >> (16 - bitIndex) & 1);
|
||||
}
|
||||
|
||||
void setAtBit16(unsigned short& word, int bitIndex) {
|
||||
word |= (1 << (7 - bitIndex));
|
||||
word |= (1 << (16 - bitIndex));
|
||||
}
|
||||
|
||||
|
||||
|
|
Loading…
Reference in a new issue