revert to old parsing of AvatarData

This commit is contained in:
Stephen Birarda 2014-01-31 10:34:11 -08:00
parent 79c8f91c78
commit 29abd2500f

View file

@ -172,105 +172,102 @@ int AvatarData::parseData(const QByteArray& packet) {
_handData = new HandData(this); _handData = new HandData(this);
} }
QDataStream packetStream(packet); // increment to push past the packet header
packetStream.skipRawData(numBytesForPacketHeader(packet)); const unsigned char* sourceBuffer = reinterpret_cast<const unsigned char*>(packet.data());
const unsigned char* startPosition = sourceBuffer + numBytesForPacketHeader(packet);
packetStream.readRawData(reinterpret_cast<char*>(&_position), sizeof(_position)); // Body world position
memcpy(&_position, sourceBuffer, sizeof(float) * 3);
sourceBuffer += sizeof(float) * 3;
// Body rotation (NOTE: This needs to become a quaternion to save two bytes) // Body rotation (NOTE: This needs to become a quaternion to save two bytes)
uint16_t twoByteHolder; sourceBuffer += unpackFloatAngleFromTwoByte((uint16_t*) sourceBuffer, &_bodyYaw);
packetStream >> twoByteHolder; sourceBuffer += unpackFloatAngleFromTwoByte((uint16_t*) sourceBuffer, &_bodyPitch);
unpackFloatAngleFromTwoByte(&twoByteHolder, &_bodyYaw); sourceBuffer += unpackFloatAngleFromTwoByte((uint16_t*) sourceBuffer, &_bodyRoll);
packetStream >> twoByteHolder; // Body scale
unpackFloatAngleFromTwoByte(&twoByteHolder, &_bodyPitch); sourceBuffer += unpackFloatRatioFromTwoByte(sourceBuffer, _targetScale);
packetStream >> twoByteHolder;
unpackFloatAngleFromTwoByte(&twoByteHolder, &_bodyRoll);
// body scale
packetStream >> twoByteHolder;
unpackFloatRatioFromTwoByte(reinterpret_cast<const unsigned char*>(&twoByteHolder), _targetScale);
// Head rotation (NOTE: This needs to become a quaternion to save two bytes) // Head rotation (NOTE: This needs to become a quaternion to save two bytes)
float headYaw, headPitch, headRoll; float headYaw, headPitch, headRoll;
sourceBuffer += unpackFloatAngleFromTwoByte((uint16_t*) sourceBuffer, &headYaw);
packetStream >> twoByteHolder; sourceBuffer += unpackFloatAngleFromTwoByte((uint16_t*) sourceBuffer, &headPitch);
unpackFloatAngleFromTwoByte(&twoByteHolder, &headYaw); sourceBuffer += unpackFloatAngleFromTwoByte((uint16_t*) sourceBuffer, &headRoll);
packetStream >> twoByteHolder;
unpackFloatAngleFromTwoByte(&twoByteHolder, &headPitch);
packetStream >> twoByteHolder;
unpackFloatAngleFromTwoByte(&twoByteHolder, &headRoll);
_headData->setYaw(headYaw); _headData->setYaw(headYaw);
_headData->setPitch(headPitch); _headData->setPitch(headPitch);
_headData->setRoll(headRoll); _headData->setRoll(headRoll);
// Head position relative to pelvis // Head position relative to pelvis
packetStream >> _headData->_leanSideways; memcpy(&_headData->_leanSideways, sourceBuffer, sizeof(_headData->_leanSideways));
packetStream >> _headData->_leanForward; sourceBuffer += sizeof(float);
memcpy(&_headData->_leanForward, sourceBuffer, sizeof(_headData->_leanForward));
sourceBuffer += sizeof(_headData->_leanForward);
// Hand Position - is relative to body position // Hand Position - is relative to body position
glm::vec3 handPositionRelative; glm::vec3 handPositionRelative;
packetStream.readRawData(reinterpret_cast<char*>(&handPositionRelative), sizeof(handPositionRelative)); memcpy(&handPositionRelative, sourceBuffer, sizeof(float) * 3);
_handPosition = _position + handPositionRelative; _handPosition = _position + handPositionRelative;
sourceBuffer += sizeof(float) * 3;
// Lookat Position
memcpy(&_headData->_lookAtPosition, sourceBuffer, sizeof(_headData->_lookAtPosition));
sourceBuffer += sizeof(_headData->_lookAtPosition);
packetStream.readRawData(reinterpret_cast<char*>(&_headData->_lookAtPosition), sizeof(_headData->_lookAtPosition));
// Instantaneous audio loudness (used to drive facial animation) // Instantaneous audio loudness (used to drive facial animation)
//sourceBuffer += unpackFloatFromByte(sourceBuffer, _audioLoudness, MAX_AUDIO_LOUDNESS); //sourceBuffer += unpackFloatFromByte(sourceBuffer, _audioLoudness, MAX_AUDIO_LOUDNESS);
packetStream >> _headData->_audioLoudness; memcpy(&_headData->_audioLoudness, sourceBuffer, sizeof(float));
sourceBuffer += sizeof(float);
// the rest is a chat message
// the rest is a chat message
int chatMessageSize = *sourceBuffer++;
_chatMessage = string((char*)sourceBuffer, chatMessageSize);
sourceBuffer += chatMessageSize * sizeof(char);
quint8 chatMessageSize;
packetStream >> chatMessageSize;
_chatMessage = string(packet.data() + packetStream.device()->pos(), chatMessageSize);
packetStream.skipRawData(chatMessageSize);
// voxel sending features... // voxel sending features...
unsigned char bitItems = 0; unsigned char bitItems = 0;
packetStream >> bitItems; bitItems = (unsigned char)*sourceBuffer++;
// key state, stored as a semi-nibble in the bitItems // key state, stored as a semi-nibble in the bitItems
_keyState = (KeyState)getSemiNibbleAt(bitItems,KEY_STATE_START_BIT); _keyState = (KeyState)getSemiNibbleAt(bitItems,KEY_STATE_START_BIT);
// hand state, stored as a semi-nibble in the bitItems // hand state, stored as a semi-nibble in the bitItems
_handState = getSemiNibbleAt(bitItems,HAND_STATE_START_BIT); _handState = getSemiNibbleAt(bitItems,HAND_STATE_START_BIT);
_headData->_isFaceshiftConnected = oneAtBit(bitItems, IS_FACESHIFT_CONNECTED); _headData->_isFaceshiftConnected = oneAtBit(bitItems, IS_FACESHIFT_CONNECTED);
_isChatCirclingEnabled = oneAtBit(bitItems, IS_CHAT_CIRCLING_ENABLED); _isChatCirclingEnabled = oneAtBit(bitItems, IS_CHAT_CIRCLING_ENABLED);
// If it is connected, pack up the data // If it is connected, pack up the data
if (_headData->_isFaceshiftConnected) { if (_headData->_isFaceshiftConnected) {
packetStream >> _headData->_leftEyeBlink; memcpy(&_headData->_leftEyeBlink, sourceBuffer, sizeof(float));
packetStream >> _headData->_rightEyeBlink; sourceBuffer += sizeof(float);
packetStream >> _headData->_averageLoudness;
packetStream >> _headData->_browAudioLift;
quint8 numBlendshapeCoefficients; memcpy(&_headData->_rightEyeBlink, sourceBuffer, sizeof(float));
packetStream >> numBlendshapeCoefficients; sourceBuffer += sizeof(float);
_headData->_blendshapeCoefficients.resize(numBlendshapeCoefficients); memcpy(&_headData->_averageLoudness, sourceBuffer, sizeof(float));
packetStream.readRawData(reinterpret_cast<char*>(_headData->_blendshapeCoefficients.data()), sourceBuffer += sizeof(float);
numBlendshapeCoefficients * sizeof(float));
memcpy(&_headData->_browAudioLift, sourceBuffer, sizeof(float));
sourceBuffer += sizeof(float);
_headData->_blendshapeCoefficients.resize(*sourceBuffer++);
memcpy(_headData->_blendshapeCoefficients.data(), sourceBuffer,
_headData->_blendshapeCoefficients.size() * sizeof(float));
sourceBuffer += _headData->_blendshapeCoefficients.size() * sizeof(float);
} }
// pupil dilation // pupil dilation
quint8 pupilByte; sourceBuffer += unpackFloatFromByte(sourceBuffer, _headData->_pupilDilation, 1.0f);
packetStream >> pupilByte;
unpackFloatFromByte(&pupilByte, _headData->_pupilDilation, 1.0f);
// leap hand data // leap hand data
if (packetStream.device()->pos() < packet.size()) { if (sourceBuffer - startPosition < packet.size()) {
// check passed, bytes match // check passed, bytes match
packetStream.skipRawData(_handData->decodeRemoteData(packet.mid(packetStream.device()->pos()))); sourceBuffer += _handData->decodeRemoteData(packet.mid(sourceBuffer - startPosition));
} }
return packetStream.device()->pos(); return sourceBuffer - startPosition;
} }
void AvatarData::setClampedTargetScale(float targetScale) { void AvatarData::setClampedTargetScale(float targetScale) {