mirror of
https://github.com/overte-org/overte.git
synced 2025-04-20 15:23:56 +02:00
hacking on new format
This commit is contained in:
parent
977cda3d2e
commit
02a6060b5e
4 changed files with 903 additions and 68 deletions
|
@ -52,6 +52,7 @@ const QString AvatarData::FRAME_NAME = "com.highfidelity.recording.AvatarData";
|
|||
static const int TRANSLATION_COMPRESSION_RADIX = 12;
|
||||
static const int SENSOR_TO_WORLD_SCALE_RADIX = 10;
|
||||
static const int AUDIO_LOUDNESS_RADIX = 2;
|
||||
static const int MODEL_OFFSET_RADIX = 6;
|
||||
|
||||
#define ASSERT(COND) do { if (!(COND)) { abort(); } } while(0)
|
||||
|
||||
|
@ -77,10 +78,21 @@ AvatarData::AvatarData() :
|
|||
setBodyRoll(0.0f);
|
||||
|
||||
ASSERT(sizeof(AvatarDataPacket::Header) == AvatarDataPacket::HEADER_SIZE);
|
||||
ASSERT(sizeof(AvatarDataPacket::MinimalAvatarInfo) == AvatarDataPacket::MINIMAL_AVATAR_INFO_SIZE);
|
||||
ASSERT(sizeof(AvatarDataPacket::AvatarInfo) == AvatarDataPacket::AVATAR_INFO_SIZE);
|
||||
ASSERT(sizeof(AvatarDataPacket::AvatarGlobalPosition) == AvatarDataPacket::AVATAR_GLOBAL_POSITION_SIZE);
|
||||
ASSERT(sizeof(AvatarDataPacket::AvatarLocalPosition) == AvatarDataPacket::AVATAR_LOCAL_POSITION_SIZE);
|
||||
ASSERT(sizeof(AvatarDataPacket::AvatarDimensions) == AvatarDataPacket::AVATAR_DIMENSIONS_SIZE);
|
||||
ASSERT(sizeof(AvatarDataPacket::AvatarOrientation) == AvatarDataPacket::AVATAR_ORIENTATION_SIZE);
|
||||
ASSERT(sizeof(AvatarDataPacket::AvatarScale) == AvatarDataPacket::AVATAR_SCALE_SIZE);
|
||||
ASSERT(sizeof(AvatarDataPacket::LookAtPosition) == AvatarDataPacket::LOOK_AT_POSITION_SIZE);
|
||||
ASSERT(sizeof(AvatarDataPacket::AudioLoudness) == AvatarDataPacket::AUDIO_LOUDNESS_SIZE);
|
||||
ASSERT(sizeof(AvatarDataPacket::SensorToWorldMatrix) == AvatarDataPacket::SENSOR_TO_WORLD_SIZE);
|
||||
ASSERT(sizeof(AvatarDataPacket::AdditionalFlags) == AvatarDataPacket::ADDITIONAL_FLAGS_SIZE);
|
||||
ASSERT(sizeof(AvatarDataPacket::ParentInfo) == AvatarDataPacket::PARENT_INFO_SIZE);
|
||||
ASSERT(sizeof(AvatarDataPacket::FaceTrackerInfo) == AvatarDataPacket::FACE_TRACKER_INFO_SIZE);
|
||||
|
||||
// Old format...
|
||||
ASSERT(sizeof(AvatarDataPacket::AvatarInfo) == AvatarDataPacket::AVATAR_INFO_SIZE);
|
||||
|
||||
}
|
||||
|
||||
AvatarData::~AvatarData() {
|
||||
|
@ -136,8 +148,11 @@ void AvatarData::setHandPosition(const glm::vec3& handPosition) {
|
|||
_handPosition = glm::inverse(getOrientation()) * (handPosition - getPosition());
|
||||
}
|
||||
|
||||
|
||||
QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail) {
|
||||
return toByteArray_NEW(dataDetail);
|
||||
}
|
||||
|
||||
QByteArray AvatarData::toByteArray_OLD(AvatarDataDetail dataDetail) {
|
||||
bool cullSmallChanges = (dataDetail == CullSmallData);
|
||||
bool sendAll = (dataDetail == SendAllData);
|
||||
bool sendMinimum = (dataDetail == MinimumData);
|
||||
|
@ -408,6 +423,398 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail) {
|
|||
return avatarDataByteArray.left(destinationBuffer - startPosition);
|
||||
}
|
||||
|
||||
void AvatarData::lazyInitHeadData() {
|
||||
// lazily allocate memory for HeadData in case we're not an Avatar instance
|
||||
if (!_headData) {
|
||||
_headData = new HeadData(this);
|
||||
}
|
||||
if (_forceFaceTrackerConnected) {
|
||||
_headData->_isFaceTrackerConnected = true;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
bool AvatarData::avatarLocalPositionChanged() {
|
||||
return _lastSentLocalPosition != getLocalPosition();
|
||||
}
|
||||
|
||||
bool AvatarData::avatarDimensionsChanged() {
|
||||
auto avatarDimensions = getPosition() - _globalBoundingBoxCorner;
|
||||
return _lastSentAvatarDimensions != avatarDimensions;
|
||||
}
|
||||
|
||||
bool AvatarData::avatarOrientationChanged() {
|
||||
return _lastSentLocalOrientation != getLocalOrientation();
|
||||
}
|
||||
|
||||
bool AvatarData::avatarScaleChanged() {
|
||||
return _lastSentScale != getDomainLimitedScale();
|
||||
}
|
||||
|
||||
bool AvatarData::lookAtPositionChanged() {
|
||||
return _lastSentLookAt != _headData->_lookAtPosition;
|
||||
}
|
||||
|
||||
bool AvatarData::audioLoudnessChanged() {
|
||||
return _lastSentAudioLoudness != glm::min(_headData->_audioLoudness, MAX_AUDIO_LOUDNESS);
|
||||
}
|
||||
|
||||
bool AvatarData::sensorToWorldMatrixChanged() {
|
||||
return _lastSentSensorToWorldMatrix != getSensorToWorldMatrix();
|
||||
}
|
||||
|
||||
bool AvatarData::additionalFlagsChanged() {
|
||||
return true; // FIXME!
|
||||
}
|
||||
|
||||
bool AvatarData::parentInfoChanged() {
|
||||
return (_lastSentParentID != getParentID()) || (_lastSentParentJointIndex != _parentJointIndex);
|
||||
}
|
||||
|
||||
bool AvatarData::faceTrackerInfoChanged() {
|
||||
return true; // FIXME!
|
||||
}
|
||||
|
||||
QByteArray AvatarData::toByteArray_NEW(AvatarDataDetail dataDetail) {
|
||||
bool cullSmallChanges = (dataDetail == CullSmallData);
|
||||
bool sendAll = (dataDetail == SendAllData);
|
||||
bool sendMinimum = (dataDetail == MinimumData);
|
||||
|
||||
// TODO: DRY this up to a shared method
|
||||
// that can pack any type given the number of bytes
|
||||
// and return the number of bytes to push the pointer
|
||||
lazyInitHeadData();
|
||||
|
||||
QByteArray avatarDataByteArray(udt::MAX_PACKET_SIZE, 0);
|
||||
unsigned char* destinationBuffer = reinterpret_cast<unsigned char*>(avatarDataByteArray.data());
|
||||
unsigned char* startPosition = destinationBuffer;
|
||||
unsigned char* packetStateFlagsAt = startPosition;
|
||||
|
||||
// psuedo code....
|
||||
// - determine which sections will be included
|
||||
// - create the packet has flags
|
||||
// - include each section in order
|
||||
|
||||
// FIXME - things to consider
|
||||
// - how to dry up this code?
|
||||
//
|
||||
// - the sections below are basically little repeats of each other, where they
|
||||
// cast the destination pointer to the section struct type, set the struct
|
||||
// members in some specific way (not just assigning), then advance the buffer,
|
||||
// and then remember the last value sent. This could be macro-ized and/or
|
||||
// templatized or lambda-ized
|
||||
//
|
||||
// - also, we could determine the "hasXXX" flags in the little sections,
|
||||
// and then set the actual flag values AFTER the rest are done...
|
||||
//
|
||||
// - this toByteArray() side-effects the AvatarData, is that safe? in particular
|
||||
// is it possible we'll call toByteArray() and then NOT actually use the result?
|
||||
|
||||
bool hasAvatarGlobalPosition = true; // always include global position
|
||||
bool hasAvatarLocalPosition = sendAll || avatarLocalPositionChanged();
|
||||
bool hasAvatarDimensions = sendAll || avatarDimensionsChanged();
|
||||
bool hasAvatarOrientation = sendAll || avatarOrientationChanged();
|
||||
bool hasAvatarScale = sendAll || avatarScaleChanged();
|
||||
bool hasLookAtPosition = sendAll || lookAtPositionChanged();
|
||||
bool hasAudioLoudness = sendAll || audioLoudnessChanged();
|
||||
bool hasSensorToWorldMatrix = sendAll || sensorToWorldMatrixChanged();
|
||||
bool hasAdditionalFlags = sendAll || additionalFlagsChanged();
|
||||
bool hasParentInfo = hasParent() && (sendAll || parentInfoChanged());
|
||||
bool hasFaceTrackerInfo = hasFaceTracker() && (sendAll || faceTrackerInfoChanged());
|
||||
bool hasJointData = !sendMinimum;
|
||||
|
||||
// Leading flags, to indicate how much data is actually included in the packet...
|
||||
AvatarDataPacket::HasFlags packetStateFlags =
|
||||
(hasAvatarGlobalPosition && AvatarDataPacket::PACKET_HAS_AVATAR_GLOBAL_POSITION)
|
||||
| (hasAvatarLocalPosition && AvatarDataPacket::PACKET_HAS_AVATAR_LOCAL_POSITION)
|
||||
| (hasAvatarDimensions && AvatarDataPacket::PACKET_HAS_AVATAR_DIMENSIONS)
|
||||
| (hasAvatarOrientation && AvatarDataPacket::PACKET_HAS_AVATAR_ORIENTATION)
|
||||
| (hasAvatarScale && AvatarDataPacket::PACKET_HAS_AVATAR_SCALE)
|
||||
| (hasLookAtPosition && AvatarDataPacket::PACKET_HAS_LOOK_AT_POSITION)
|
||||
| (hasAudioLoudness && AvatarDataPacket::PACKET_HAS_AUDIO_LOUDNESS)
|
||||
| (hasSensorToWorldMatrix && AvatarDataPacket::PACKET_HAS_SENSOR_TO_WORLD_MATRIX)
|
||||
| (hasAdditionalFlags && AvatarDataPacket::PACKET_HAS_ADDITIONAL_FLAGS)
|
||||
| (hasParentInfo && AvatarDataPacket::PACKET_HAS_PARENT_INFO)
|
||||
| (hasFaceTrackerInfo && AvatarDataPacket::PACKET_HAS_FACE_TRACKER_INFO)
|
||||
| (hasJointData && AvatarDataPacket::PACKET_HAS_JOINT_DATA);
|
||||
|
||||
memcpy(destinationBuffer, &packetStateFlags, sizeof(packetStateFlags));
|
||||
destinationBuffer += sizeof(packetStateFlags);
|
||||
|
||||
if (hasAvatarGlobalPosition) {
|
||||
auto data = reinterpret_cast<AvatarDataPacket::AvatarGlobalPosition*>(destinationBuffer);
|
||||
data->globalPosition[0] = _globalPosition.x;
|
||||
data->globalPosition[1] = _globalPosition.y;
|
||||
data->globalPosition[2] = _globalPosition.z;
|
||||
destinationBuffer += sizeof(AvatarDataPacket::AvatarGlobalPosition);
|
||||
_lastSentGlobalPosition = _globalPosition;
|
||||
}
|
||||
|
||||
// FIXME - I was told by tony this was "skeletal model position"-- but it seems to be
|
||||
// SpatiallyNestable::getLocalPosition() ... which AFAICT is almost always the same as
|
||||
// the global position (unless presumably you're on a parent)... we might be able to
|
||||
// include this in the parent info record
|
||||
if (hasAvatarLocalPosition) {
|
||||
auto data = reinterpret_cast<AvatarDataPacket::AvatarLocalPosition*>(destinationBuffer);
|
||||
auto localPosition = getLocalPosition();
|
||||
data->localPosition[0] = localPosition.x;
|
||||
data->localPosition[1] = localPosition.y;
|
||||
data->localPosition[2] = localPosition.z;
|
||||
destinationBuffer += sizeof(AvatarDataPacket::AvatarLocalPosition);
|
||||
_lastSentLocalPosition = localPosition;
|
||||
}
|
||||
|
||||
if (hasAvatarDimensions) {
|
||||
auto data = reinterpret_cast<AvatarDataPacket::AvatarDimensions*>(destinationBuffer);
|
||||
auto avatarDimensions = getPosition() - _globalBoundingBoxCorner;
|
||||
data->avatarDimensions[0] = avatarDimensions.x;
|
||||
data->avatarDimensions[1] = avatarDimensions.y;
|
||||
data->avatarDimensions[2] = avatarDimensions.z;
|
||||
destinationBuffer += sizeof(AvatarDataPacket::AvatarDimensions);
|
||||
_lastSentAvatarDimensions = avatarDimensions;
|
||||
}
|
||||
|
||||
if (hasAvatarOrientation) {
|
||||
auto data = reinterpret_cast<AvatarDataPacket::AvatarOrientation*>(destinationBuffer);
|
||||
auto localOrientation = getLocalOrientation();
|
||||
glm::vec3 bodyEulerAngles = glm::degrees(safeEulerAngles(localOrientation));
|
||||
packFloatAngleToTwoByte((uint8_t*)(data->localOrientation + 0), bodyEulerAngles.y);
|
||||
packFloatAngleToTwoByte((uint8_t*)(data->localOrientation + 1), bodyEulerAngles.x);
|
||||
packFloatAngleToTwoByte((uint8_t*)(data->localOrientation + 2), bodyEulerAngles.z);
|
||||
destinationBuffer += sizeof(AvatarDataPacket::AvatarOrientation);
|
||||
_lastSentLocalOrientation = localOrientation;
|
||||
}
|
||||
|
||||
if (hasAvatarScale) {
|
||||
auto data = reinterpret_cast<AvatarDataPacket::AvatarScale*>(destinationBuffer);
|
||||
auto scale = getDomainLimitedScale();
|
||||
packFloatRatioToTwoByte((uint8_t*)(&data->scale), scale);
|
||||
destinationBuffer += sizeof(AvatarDataPacket::AvatarScale);
|
||||
_lastSentScale = scale;
|
||||
}
|
||||
|
||||
if (hasLookAtPosition) {
|
||||
auto data = reinterpret_cast<AvatarDataPacket::LookAtPosition*>(destinationBuffer);
|
||||
auto lookAt = _headData->_lookAtPosition;
|
||||
data->lookAtPosition[0] = lookAt.x;
|
||||
data->lookAtPosition[1] = lookAt.y;
|
||||
data->lookAtPosition[2] = lookAt.z;
|
||||
destinationBuffer += sizeof(AvatarDataPacket::LookAtPosition);
|
||||
_lastSentLookAt = lookAt;
|
||||
}
|
||||
|
||||
if (hasAudioLoudness) {
|
||||
auto data = reinterpret_cast<AvatarDataPacket::AudioLoudness*>(destinationBuffer);
|
||||
auto audioLoudness = glm::min(_headData->_audioLoudness, MAX_AUDIO_LOUDNESS);
|
||||
packFloatScalarToSignedTwoByteFixed((uint8_t*)&data->audioLoudness, audioLoudness, AUDIO_LOUDNESS_RADIX);
|
||||
destinationBuffer += sizeof(AvatarDataPacket::AudioLoudness);
|
||||
_lastSentAudioLoudness = audioLoudness;
|
||||
}
|
||||
|
||||
if (hasSensorToWorldMatrix) {
|
||||
auto data = reinterpret_cast<AvatarDataPacket::SensorToWorldMatrix*>(destinationBuffer);
|
||||
glm::mat4 sensorToWorldMatrix = getSensorToWorldMatrix();
|
||||
packOrientationQuatToSixBytes(data->sensorToWorldQuat, glmExtractRotation(sensorToWorldMatrix));
|
||||
glm::vec3 scale = extractScale(sensorToWorldMatrix);
|
||||
packFloatScalarToSignedTwoByteFixed((uint8_t*)&data->sensorToWorldScale, scale.x, SENSOR_TO_WORLD_SCALE_RADIX);
|
||||
data->sensorToWorldTrans[0] = sensorToWorldMatrix[3][0];
|
||||
data->sensorToWorldTrans[1] = sensorToWorldMatrix[3][1];
|
||||
data->sensorToWorldTrans[2] = sensorToWorldMatrix[3][2];
|
||||
destinationBuffer += sizeof(AvatarDataPacket::SensorToWorldMatrix);
|
||||
_lastSentSensorToWorldMatrix = sensorToWorldMatrix;
|
||||
}
|
||||
|
||||
QUuid parentID = getParentID();
|
||||
|
||||
if (hasAdditionalFlags) {
|
||||
auto data = reinterpret_cast<AvatarDataPacket::AdditionalFlags*>(destinationBuffer);
|
||||
|
||||
uint8_t flags { 0 };
|
||||
|
||||
setSemiNibbleAt(flags, KEY_STATE_START_BIT, _keyState);
|
||||
|
||||
// hand state
|
||||
bool isFingerPointing = _handState & IS_FINGER_POINTING_FLAG;
|
||||
setSemiNibbleAt(flags, HAND_STATE_START_BIT, _handState & ~IS_FINGER_POINTING_FLAG);
|
||||
if (isFingerPointing) {
|
||||
setAtBit(flags, HAND_STATE_FINGER_POINTING_BIT);
|
||||
}
|
||||
// faceshift state
|
||||
if (_headData->_isFaceTrackerConnected) {
|
||||
setAtBit(flags, IS_FACESHIFT_CONNECTED);
|
||||
}
|
||||
// eye tracker state
|
||||
if (_headData->_isEyeTrackerConnected) {
|
||||
setAtBit(flags, IS_EYE_TRACKER_CONNECTED);
|
||||
}
|
||||
// referential state
|
||||
if (!parentID.isNull()) {
|
||||
setAtBit(flags, HAS_REFERENTIAL);
|
||||
}
|
||||
data->flags = flags;
|
||||
destinationBuffer += sizeof(AvatarDataPacket::AdditionalFlags);
|
||||
_lastSentAdditionalFlags = flags;
|
||||
}
|
||||
|
||||
if (hasParentInfo) {
|
||||
auto parentInfo = reinterpret_cast<AvatarDataPacket::ParentInfo*>(destinationBuffer);
|
||||
QByteArray referentialAsBytes = parentID.toRfc4122();
|
||||
memcpy(parentInfo->parentUUID, referentialAsBytes.data(), referentialAsBytes.size());
|
||||
parentInfo->parentJointIndex = _parentJointIndex;
|
||||
destinationBuffer += sizeof(AvatarDataPacket::ParentInfo);
|
||||
_lastSentParentID = parentID;
|
||||
_lastSentParentJointIndex = _parentJointIndex;
|
||||
}
|
||||
|
||||
// If it is connected, pack up the data
|
||||
if (hasFaceTrackerInfo) {
|
||||
auto faceTrackerInfo = reinterpret_cast<AvatarDataPacket::FaceTrackerInfo*>(destinationBuffer);
|
||||
|
||||
faceTrackerInfo->leftEyeBlink = _headData->_leftEyeBlink;
|
||||
faceTrackerInfo->rightEyeBlink = _headData->_rightEyeBlink;
|
||||
faceTrackerInfo->averageLoudness = _headData->_averageLoudness;
|
||||
faceTrackerInfo->browAudioLift = _headData->_browAudioLift;
|
||||
faceTrackerInfo->numBlendshapeCoefficients = _headData->_blendshapeCoefficients.size();
|
||||
destinationBuffer += sizeof(AvatarDataPacket::FaceTrackerInfo);
|
||||
|
||||
// followed by a variable number of float coefficients
|
||||
memcpy(destinationBuffer, _headData->_blendshapeCoefficients.data(), _headData->_blendshapeCoefficients.size() * sizeof(float));
|
||||
destinationBuffer += _headData->_blendshapeCoefficients.size() * sizeof(float);
|
||||
}
|
||||
|
||||
// If it is connected, pack up the data
|
||||
if (hasJointData) {
|
||||
QReadLocker readLock(&_jointDataLock);
|
||||
|
||||
// joint rotation data
|
||||
*destinationBuffer++ = _jointData.size();
|
||||
unsigned char* validityPosition = destinationBuffer;
|
||||
unsigned char validity = 0;
|
||||
int validityBit = 0;
|
||||
|
||||
#ifdef WANT_DEBUG
|
||||
int rotationSentCount = 0;
|
||||
unsigned char* beforeRotations = destinationBuffer;
|
||||
#endif
|
||||
|
||||
_lastSentJointData.resize(_jointData.size());
|
||||
|
||||
for (int i = 0; i < _jointData.size(); i++) {
|
||||
const JointData& data = _jointData[i];
|
||||
if (sendAll || _lastSentJointData[i].rotation != data.rotation) {
|
||||
if (sendAll ||
|
||||
!cullSmallChanges ||
|
||||
fabsf(glm::dot(data.rotation, _lastSentJointData[i].rotation)) <= AVATAR_MIN_ROTATION_DOT) {
|
||||
if (data.rotationSet) {
|
||||
validity |= (1 << validityBit);
|
||||
#ifdef WANT_DEBUG
|
||||
rotationSentCount++;
|
||||
#endif
|
||||
}
|
||||
}
|
||||
}
|
||||
if (++validityBit == BITS_IN_BYTE) {
|
||||
*destinationBuffer++ = validity;
|
||||
validityBit = validity = 0;
|
||||
}
|
||||
}
|
||||
if (validityBit != 0) {
|
||||
*destinationBuffer++ = validity;
|
||||
}
|
||||
|
||||
validityBit = 0;
|
||||
validity = *validityPosition++;
|
||||
for (int i = 0; i < _jointData.size(); i++) {
|
||||
const JointData& data = _jointData[i];
|
||||
if (validity & (1 << validityBit)) {
|
||||
destinationBuffer += packOrientationQuatToSixBytes(destinationBuffer, data.rotation);
|
||||
}
|
||||
if (++validityBit == BITS_IN_BYTE) {
|
||||
validityBit = 0;
|
||||
validity = *validityPosition++;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// joint translation data
|
||||
validityPosition = destinationBuffer;
|
||||
validity = 0;
|
||||
validityBit = 0;
|
||||
|
||||
#ifdef WANT_DEBUG
|
||||
int translationSentCount = 0;
|
||||
unsigned char* beforeTranslations = destinationBuffer;
|
||||
#endif
|
||||
|
||||
float maxTranslationDimension = 0.0;
|
||||
for (int i = 0; i < _jointData.size(); i++) {
|
||||
const JointData& data = _jointData[i];
|
||||
if (sendAll || _lastSentJointData[i].translation != data.translation) {
|
||||
if (sendAll ||
|
||||
!cullSmallChanges ||
|
||||
glm::distance(data.translation, _lastSentJointData[i].translation) > AVATAR_MIN_TRANSLATION) {
|
||||
if (data.translationSet) {
|
||||
validity |= (1 << validityBit);
|
||||
#ifdef WANT_DEBUG
|
||||
translationSentCount++;
|
||||
#endif
|
||||
maxTranslationDimension = glm::max(fabsf(data.translation.x), maxTranslationDimension);
|
||||
maxTranslationDimension = glm::max(fabsf(data.translation.y), maxTranslationDimension);
|
||||
maxTranslationDimension = glm::max(fabsf(data.translation.z), maxTranslationDimension);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (++validityBit == BITS_IN_BYTE) {
|
||||
*destinationBuffer++ = validity;
|
||||
validityBit = validity = 0;
|
||||
}
|
||||
}
|
||||
|
||||
if (validityBit != 0) {
|
||||
*destinationBuffer++ = validity;
|
||||
}
|
||||
|
||||
validityBit = 0;
|
||||
validity = *validityPosition++;
|
||||
for (int i = 0; i < _jointData.size(); i++) {
|
||||
const JointData& data = _jointData[i];
|
||||
if (validity & (1 << validityBit)) {
|
||||
destinationBuffer +=
|
||||
packFloatVec3ToSignedTwoByteFixed(destinationBuffer, data.translation, TRANSLATION_COMPRESSION_RADIX);
|
||||
}
|
||||
if (++validityBit == BITS_IN_BYTE) {
|
||||
validityBit = 0;
|
||||
validity = *validityPosition++;
|
||||
}
|
||||
}
|
||||
|
||||
// faux joints
|
||||
Transform controllerLeftHandTransform = Transform(getControllerLeftHandMatrix());
|
||||
destinationBuffer += packOrientationQuatToSixBytes(destinationBuffer, controllerLeftHandTransform.getRotation());
|
||||
destinationBuffer += packFloatVec3ToSignedTwoByteFixed(destinationBuffer, controllerLeftHandTransform.getTranslation(),
|
||||
TRANSLATION_COMPRESSION_RADIX);
|
||||
Transform controllerRightHandTransform = Transform(getControllerRightHandMatrix());
|
||||
destinationBuffer += packOrientationQuatToSixBytes(destinationBuffer, controllerRightHandTransform.getRotation());
|
||||
destinationBuffer += packFloatVec3ToSignedTwoByteFixed(destinationBuffer, controllerRightHandTransform.getTranslation(),
|
||||
TRANSLATION_COMPRESSION_RADIX);
|
||||
|
||||
#ifdef WANT_DEBUG
|
||||
if (sendAll) {
|
||||
qCDebug(avatars) << "AvatarData::toByteArray" << cullSmallChanges << sendAll
|
||||
<< "rotations:" << rotationSentCount << "translations:" << translationSentCount
|
||||
<< "largest:" << maxTranslationDimension
|
||||
<< "size:"
|
||||
<< (beforeRotations - startPosition) << "+"
|
||||
<< (beforeTranslations - beforeRotations) << "+"
|
||||
<< (destinationBuffer - beforeTranslations) << "="
|
||||
<< (destinationBuffer - startPosition);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
return avatarDataByteArray.left(destinationBuffer - startPosition);
|
||||
}
|
||||
|
||||
void AvatarData::doneEncoding(bool cullSmallChanges) {
|
||||
// The server has finished sending this version of the joint-data to other nodes. Update _lastSentJointData.
|
||||
QReadLocker readLock(&_jointDataLock);
|
||||
|
@ -473,6 +880,11 @@ const unsigned char* unpackFauxJoint(const unsigned char* sourceBuffer, ThreadSa
|
|||
|
||||
// read data in packet starting at byte offset and return number of bytes parsed
|
||||
int AvatarData::parseDataFromBuffer(const QByteArray& buffer) {
|
||||
return parseDataFromBuffer_NEW(buffer);
|
||||
}
|
||||
|
||||
// read data in packet starting at byte offset and return number of bytes parsed
|
||||
int AvatarData::parseDataFromBuffer_OLD(const QByteArray& buffer) {
|
||||
// lazily allocate memory for HeadData in case we're not an Avatar instance
|
||||
if (!_headData) {
|
||||
_headData = new HeadData(this);
|
||||
|
@ -714,6 +1126,300 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) {
|
|||
return numBytesRead;
|
||||
}
|
||||
|
||||
|
||||
// read data in packet starting at byte offset and return number of bytes parsed
|
||||
int AvatarData::parseDataFromBuffer_NEW(const QByteArray& buffer) {
|
||||
// lazily allocate memory for HeadData in case we're not an Avatar instance
|
||||
lazyInitHeadData();
|
||||
|
||||
AvatarDataPacket::HasFlags packetStateFlags;
|
||||
|
||||
const unsigned char* startPosition = reinterpret_cast<const unsigned char*>(buffer.data());
|
||||
const unsigned char* endPosition = startPosition + buffer.size();
|
||||
const unsigned char* sourceBuffer = startPosition;
|
||||
|
||||
// read the packet flags
|
||||
memcpy(&packetStateFlags, sourceBuffer, sizeof(packetStateFlags));
|
||||
|
||||
#define HAS_FLAG(B,F) ((B & F) == F)
|
||||
|
||||
bool hasAvatarGlobalPosition = HAS_FLAG(packetStateFlags, AvatarDataPacket::PACKET_HAS_AVATAR_GLOBAL_POSITION);
|
||||
bool hasAvatarLocalPosition = HAS_FLAG(packetStateFlags, AvatarDataPacket::PACKET_HAS_AVATAR_LOCAL_POSITION);
|
||||
bool hasAvatarDimensions = HAS_FLAG(packetStateFlags, AvatarDataPacket::PACKET_HAS_AVATAR_DIMENSIONS);
|
||||
bool hasAvatarOrientation = HAS_FLAG(packetStateFlags, AvatarDataPacket::PACKET_HAS_AVATAR_ORIENTATION);
|
||||
bool hasAvatarScale = HAS_FLAG(packetStateFlags, AvatarDataPacket::PACKET_HAS_AVATAR_SCALE);
|
||||
bool hasLookAtPosition = HAS_FLAG(packetStateFlags, AvatarDataPacket::PACKET_HAS_LOOK_AT_POSITION);
|
||||
bool hasAudioLoudness = HAS_FLAG(packetStateFlags, AvatarDataPacket::PACKET_HAS_AUDIO_LOUDNESS);
|
||||
bool hasSensorToWorldMatrix = HAS_FLAG(packetStateFlags, AvatarDataPacket::PACKET_HAS_SENSOR_TO_WORLD_MATRIX);
|
||||
bool hasAdditionalFlags = HAS_FLAG(packetStateFlags, AvatarDataPacket::PACKET_HAS_ADDITIONAL_FLAGS);
|
||||
bool hasParentInfo = HAS_FLAG(packetStateFlags, AvatarDataPacket::PACKET_HAS_PARENT_INFO);
|
||||
bool hasFaceTrackerInfo = HAS_FLAG(packetStateFlags, AvatarDataPacket::PACKET_HAS_FACE_TRACKER_INFO);
|
||||
bool hasJointData = HAS_FLAG(packetStateFlags, AvatarDataPacket::PACKET_HAS_JOINT_DATA);
|
||||
|
||||
sourceBuffer += sizeof(AvatarDataPacket::HasFlags);
|
||||
|
||||
quint64 now = usecTimestampNow();
|
||||
|
||||
if (hasAvatarGlobalPosition) {
|
||||
PACKET_READ_CHECK(AvatarGlobalPosition, sizeof(AvatarDataPacket::AvatarGlobalPosition));
|
||||
auto data = reinterpret_cast<const AvatarDataPacket::AvatarGlobalPosition*>(sourceBuffer);
|
||||
_globalPosition = glm::vec3(data->globalPosition[0], data->globalPosition[1], data->globalPosition[2]);
|
||||
sourceBuffer += sizeof(AvatarDataPacket::AvatarGlobalPosition);
|
||||
}
|
||||
|
||||
if (hasAvatarLocalPosition) {
|
||||
PACKET_READ_CHECK(AvatarLocalPosition, sizeof(AvatarDataPacket::AvatarLocalPosition));
|
||||
auto data = reinterpret_cast<const AvatarDataPacket::AvatarLocalPosition*>(sourceBuffer);
|
||||
glm::vec3 position = glm::vec3(data->localPosition[0], data->localPosition[1], data->localPosition[2]);
|
||||
if (isNaN(position)) {
|
||||
if (shouldLogError(now)) {
|
||||
qCWarning(avatars) << "Discard AvatarData packet: position NaN, uuid " << getSessionUUID();
|
||||
}
|
||||
return buffer.size();
|
||||
}
|
||||
setLocalPosition(position);
|
||||
sourceBuffer += sizeof(AvatarDataPacket::AvatarLocalPosition);
|
||||
}
|
||||
|
||||
if (hasAvatarDimensions) {
|
||||
PACKET_READ_CHECK(AvatarDimensions, sizeof(AvatarDataPacket::AvatarDimensions));
|
||||
auto data = reinterpret_cast<const AvatarDataPacket::AvatarDimensions*>(sourceBuffer);
|
||||
|
||||
// FIXME - this is suspicious looking!
|
||||
_globalBoundingBoxCorner = glm::vec3(data->avatarDimensions[0], data->avatarDimensions[1], data->avatarDimensions[2]);
|
||||
sourceBuffer += sizeof(AvatarDataPacket::AvatarDimensions);
|
||||
}
|
||||
|
||||
if (hasAvatarOrientation) {
|
||||
PACKET_READ_CHECK(AvatarOrientation, sizeof(AvatarDataPacket::AvatarOrientation));
|
||||
auto data = reinterpret_cast<const AvatarDataPacket::AvatarOrientation*>(sourceBuffer);
|
||||
float pitch, yaw, roll;
|
||||
unpackFloatAngleFromTwoByte(data->localOrientation + 0, &yaw);
|
||||
unpackFloatAngleFromTwoByte(data->localOrientation + 1, &pitch);
|
||||
unpackFloatAngleFromTwoByte(data->localOrientation + 2, &roll);
|
||||
if (isNaN(yaw) || isNaN(pitch) || isNaN(roll)) {
|
||||
if (shouldLogError(now)) {
|
||||
qCWarning(avatars) << "Discard AvatarData packet: localOriention is NaN, uuid " << getSessionUUID();
|
||||
}
|
||||
return buffer.size();
|
||||
}
|
||||
|
||||
glm::quat currentOrientation = getLocalOrientation();
|
||||
glm::vec3 newEulerAngles(pitch, yaw, roll);
|
||||
glm::quat newOrientation = glm::quat(glm::radians(newEulerAngles));
|
||||
if (currentOrientation != newOrientation) {
|
||||
_hasNewJointRotations = true;
|
||||
setLocalOrientation(newOrientation);
|
||||
}
|
||||
sourceBuffer += sizeof(AvatarDataPacket::AvatarOrientation);
|
||||
}
|
||||
|
||||
if (hasAvatarScale) {
|
||||
PACKET_READ_CHECK(AvatarScale, sizeof(AvatarDataPacket::AvatarScale));
|
||||
auto data = reinterpret_cast<const AvatarDataPacket::AvatarScale*>(sourceBuffer);
|
||||
float scale;
|
||||
unpackFloatRatioFromTwoByte((uint8_t*)&data->scale, scale);
|
||||
if (isNaN(scale)) {
|
||||
if (shouldLogError(now)) {
|
||||
qCWarning(avatars) << "Discard AvatarData packet: scale NaN, uuid " << getSessionUUID();
|
||||
}
|
||||
return buffer.size();
|
||||
}
|
||||
setTargetScale(scale);
|
||||
sourceBuffer += sizeof(AvatarDataPacket::AvatarScale);
|
||||
}
|
||||
|
||||
if (hasLookAtPosition) {
|
||||
PACKET_READ_CHECK(LookAtPosition, sizeof(AvatarDataPacket::LookAtPosition));
|
||||
auto data = reinterpret_cast<const AvatarDataPacket::LookAtPosition*>(sourceBuffer);
|
||||
glm::vec3 lookAt = glm::vec3(data->lookAtPosition[0], data->lookAtPosition[1], data->lookAtPosition[2]);
|
||||
if (isNaN(lookAt)) {
|
||||
if (shouldLogError(now)) {
|
||||
qCWarning(avatars) << "Discard AvatarData packet: lookAtPosition is NaN, uuid " << getSessionUUID();
|
||||
}
|
||||
return buffer.size();
|
||||
}
|
||||
_headData->_lookAtPosition = lookAt;
|
||||
sourceBuffer += sizeof(AvatarDataPacket::LookAtPosition);
|
||||
}
|
||||
|
||||
if (hasAudioLoudness) {
|
||||
PACKET_READ_CHECK(AudioLoudness, sizeof(AvatarDataPacket::AudioLoudness));
|
||||
auto data = reinterpret_cast<const AvatarDataPacket::AudioLoudness*>(sourceBuffer);
|
||||
float audioLoudness;
|
||||
unpackFloatScalarFromSignedTwoByteFixed((int16_t*)&data->audioLoudness, &audioLoudness, AUDIO_LOUDNESS_RADIX);
|
||||
|
||||
if (isNaN(audioLoudness)) {
|
||||
if (shouldLogError(now)) {
|
||||
qCWarning(avatars) << "Discard AvatarData packet: audioLoudness is NaN, uuid " << getSessionUUID();
|
||||
}
|
||||
return buffer.size();
|
||||
}
|
||||
_headData->_audioLoudness = audioLoudness;
|
||||
sourceBuffer += sizeof(AvatarDataPacket::AudioLoudness);
|
||||
}
|
||||
|
||||
if (hasSensorToWorldMatrix) {
|
||||
PACKET_READ_CHECK(SensorToWorldMatrix, sizeof(AvatarDataPacket::SensorToWorldMatrix));
|
||||
auto data = reinterpret_cast<const AvatarDataPacket::SensorToWorldMatrix*>(sourceBuffer);
|
||||
glm::quat sensorToWorldQuat;
|
||||
unpackOrientationQuatFromSixBytes(data->sensorToWorldQuat, sensorToWorldQuat);
|
||||
float sensorToWorldScale;
|
||||
unpackFloatScalarFromSignedTwoByteFixed((int16_t*)&data->sensorToWorldScale, &sensorToWorldScale, SENSOR_TO_WORLD_SCALE_RADIX);
|
||||
glm::vec3 sensorToWorldTrans(data->sensorToWorldTrans[0], data->sensorToWorldTrans[1], data->sensorToWorldTrans[2]);
|
||||
glm::mat4 sensorToWorldMatrix = createMatFromScaleQuatAndPos(glm::vec3(sensorToWorldScale), sensorToWorldQuat, sensorToWorldTrans);
|
||||
_sensorToWorldMatrixCache.set(sensorToWorldMatrix);
|
||||
sourceBuffer += sizeof(AvatarDataPacket::SensorToWorldMatrix);
|
||||
}
|
||||
|
||||
if (hasAdditionalFlags) {
|
||||
PACKET_READ_CHECK(AdditionalFlags, sizeof(AvatarDataPacket::AdditionalFlags));
|
||||
auto data = reinterpret_cast<const AvatarDataPacket::AdditionalFlags*>(sourceBuffer);
|
||||
uint8_t bitItems = data->flags;
|
||||
|
||||
// key state, stored as a semi-nibble in the bitItems
|
||||
_keyState = (KeyState)getSemiNibbleAt(bitItems, KEY_STATE_START_BIT);
|
||||
|
||||
// hand state, stored as a semi-nibble plus a bit in the bitItems
|
||||
// we store the hand state as well as other items in a shared bitset. The hand state is an octal, but is split
|
||||
// into two sections to maintain backward compatibility. The bits are ordered as such (0-7 left to right).
|
||||
// +---+-----+-----+--+
|
||||
// |x,x|H0,H1|x,x,x|H2|
|
||||
// +---+-----+-----+--+
|
||||
// Hand state - H0,H1,H2 is found in the 3rd, 4th, and 8th bits
|
||||
_handState = getSemiNibbleAt(bitItems, HAND_STATE_START_BIT)
|
||||
+ (oneAtBit(bitItems, HAND_STATE_FINGER_POINTING_BIT) ? IS_FINGER_POINTING_FLAG : 0);
|
||||
|
||||
_headData->_isFaceTrackerConnected = oneAtBit(bitItems, IS_FACESHIFT_CONNECTED);
|
||||
_headData->_isEyeTrackerConnected = oneAtBit(bitItems, IS_EYE_TRACKER_CONNECTED);
|
||||
|
||||
}
|
||||
|
||||
// FIXME -- make sure to handle the existance of a parent vs a change in the parent...
|
||||
//bool hasReferential = oneAtBit(bitItems, HAS_REFERENTIAL);
|
||||
if (hasParentInfo) {
|
||||
PACKET_READ_CHECK(ParentInfo, sizeof(AvatarDataPacket::ParentInfo));
|
||||
auto parentInfo = reinterpret_cast<const AvatarDataPacket::ParentInfo*>(sourceBuffer);
|
||||
sourceBuffer += sizeof(AvatarDataPacket::ParentInfo);
|
||||
|
||||
QByteArray byteArray((const char*)parentInfo->parentUUID, NUM_BYTES_RFC4122_UUID);
|
||||
_parentID = QUuid::fromRfc4122(byteArray);
|
||||
_parentJointIndex = parentInfo->parentJointIndex;
|
||||
} else {
|
||||
_parentID = QUuid();
|
||||
}
|
||||
|
||||
if (hasFaceTrackerInfo) {
|
||||
PACKET_READ_CHECK(FaceTrackerInfo, sizeof(AvatarDataPacket::FaceTrackerInfo));
|
||||
auto faceTrackerInfo = reinterpret_cast<const AvatarDataPacket::FaceTrackerInfo*>(sourceBuffer);
|
||||
sourceBuffer += sizeof(AvatarDataPacket::FaceTrackerInfo);
|
||||
|
||||
_headData->_leftEyeBlink = faceTrackerInfo->leftEyeBlink;
|
||||
_headData->_rightEyeBlink = faceTrackerInfo->rightEyeBlink;
|
||||
_headData->_averageLoudness = faceTrackerInfo->averageLoudness;
|
||||
_headData->_browAudioLift = faceTrackerInfo->browAudioLift;
|
||||
|
||||
int numCoefficients = faceTrackerInfo->numBlendshapeCoefficients;
|
||||
const int coefficientsSize = sizeof(float) * numCoefficients;
|
||||
PACKET_READ_CHECK(FaceTrackerCoefficients, coefficientsSize);
|
||||
_headData->_blendshapeCoefficients.resize(numCoefficients); // make sure there's room for the copy!
|
||||
memcpy(_headData->_blendshapeCoefficients.data(), sourceBuffer, coefficientsSize);
|
||||
sourceBuffer += coefficientsSize;
|
||||
}
|
||||
|
||||
if (hasJointData) {
|
||||
PACKET_READ_CHECK(NumJoints, sizeof(uint8_t));
|
||||
int numJoints = *sourceBuffer++;
|
||||
|
||||
const int bytesOfValidity = (int)ceil((float)numJoints / (float)BITS_IN_BYTE);
|
||||
PACKET_READ_CHECK(JointRotationValidityBits, bytesOfValidity);
|
||||
|
||||
int numValidJointRotations = 0;
|
||||
QVector<bool> validRotations;
|
||||
validRotations.resize(numJoints);
|
||||
{ // rotation validity bits
|
||||
unsigned char validity = 0;
|
||||
int validityBit = 0;
|
||||
for (int i = 0; i < numJoints; i++) {
|
||||
if (validityBit == 0) {
|
||||
validity = *sourceBuffer++;
|
||||
}
|
||||
bool valid = (bool)(validity & (1 << validityBit));
|
||||
if (valid) {
|
||||
++numValidJointRotations;
|
||||
}
|
||||
validRotations[i] = valid;
|
||||
validityBit = (validityBit + 1) % BITS_IN_BYTE;
|
||||
}
|
||||
}
|
||||
|
||||
// each joint rotation is stored in 6 bytes.
|
||||
QWriteLocker writeLock(&_jointDataLock);
|
||||
_jointData.resize(numJoints);
|
||||
|
||||
const int COMPRESSED_QUATERNION_SIZE = 6;
|
||||
PACKET_READ_CHECK(JointRotations, numValidJointRotations * COMPRESSED_QUATERNION_SIZE);
|
||||
for (int i = 0; i < numJoints; i++) {
|
||||
JointData& data = _jointData[i];
|
||||
if (validRotations[i]) {
|
||||
sourceBuffer += unpackOrientationQuatFromSixBytes(sourceBuffer, data.rotation);
|
||||
_hasNewJointRotations = true;
|
||||
data.rotationSet = true;
|
||||
}
|
||||
}
|
||||
|
||||
PACKET_READ_CHECK(JointTranslationValidityBits, bytesOfValidity);
|
||||
|
||||
// get translation validity bits -- these indicate which translations were packed
|
||||
int numValidJointTranslations = 0;
|
||||
QVector<bool> validTranslations;
|
||||
validTranslations.resize(numJoints);
|
||||
{ // translation validity bits
|
||||
unsigned char validity = 0;
|
||||
int validityBit = 0;
|
||||
for (int i = 0; i < numJoints; i++) {
|
||||
if (validityBit == 0) {
|
||||
validity = *sourceBuffer++;
|
||||
}
|
||||
bool valid = (bool)(validity & (1 << validityBit));
|
||||
if (valid) {
|
||||
++numValidJointTranslations;
|
||||
}
|
||||
validTranslations[i] = valid;
|
||||
validityBit = (validityBit + 1) % BITS_IN_BYTE;
|
||||
}
|
||||
} // 1 + bytesOfValidity bytes
|
||||
|
||||
// each joint translation component is stored in 6 bytes.
|
||||
const int COMPRESSED_TRANSLATION_SIZE = 6;
|
||||
PACKET_READ_CHECK(JointTranslation, numValidJointTranslations * COMPRESSED_TRANSLATION_SIZE);
|
||||
|
||||
for (int i = 0; i < numJoints; i++) {
|
||||
JointData& data = _jointData[i];
|
||||
if (validTranslations[i]) {
|
||||
sourceBuffer += unpackFloatVec3FromSignedTwoByteFixed(sourceBuffer, data.translation, TRANSLATION_COMPRESSION_RADIX);
|
||||
_hasNewJointTranslations = true;
|
||||
data.translationSet = true;
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef WANT_DEBUG
|
||||
if (numValidJointRotations > 15) {
|
||||
qCDebug(avatars) << "RECEIVING -- rotations:" << numValidJointRotations
|
||||
<< "translations:" << numValidJointTranslations
|
||||
<< "size:" << (int)(sourceBuffer - startPosition);
|
||||
}
|
||||
#endif
|
||||
// faux joints
|
||||
sourceBuffer = unpackFauxJoint(sourceBuffer, _controllerLeftHandMatrixCache);
|
||||
sourceBuffer = unpackFauxJoint(sourceBuffer, _controllerRightHandMatrixCache);
|
||||
}
|
||||
|
||||
int numBytesRead = sourceBuffer - startPosition;
|
||||
_averageBytesReceived.updateAverage(numBytesRead);
|
||||
return numBytesRead;
|
||||
}
|
||||
|
||||
int AvatarData::getAverageBytesReceivedPerSecond() const {
|
||||
return lrint(_averageBytesReceived.getAverageSampleValuePerSecond());
|
||||
}
|
||||
|
|
|
@ -84,20 +84,169 @@ const quint32 AVATAR_MOTION_SCRIPTABLE_BITS =
|
|||
const qint64 AVATAR_SILENCE_THRESHOLD_USECS = 5 * USECS_PER_SECOND;
|
||||
|
||||
|
||||
|
||||
// Bitset of state flags - we store the key state, hand state, Faceshift, eye tracking, and existence of
|
||||
// referential data in this bit set. The hand state is an octal, but is split into two sections to maintain
|
||||
// backward compatibility. The bits are ordered as such (0-7 left to right).
|
||||
// +-----+-----+-+-+-+--+
|
||||
// |K0,K1|H0,H1|F|E|R|H2|
|
||||
// +-----+-----+-+-+-+--+
|
||||
// Key state - K0,K1 is found in the 1st and 2nd bits
|
||||
// Hand state - H0,H1,H2 is found in the 3rd, 4th, and 8th bits
|
||||
// Faceshift - F is found in the 5th bit
|
||||
// Eye tracker - E is found in the 6th bit
|
||||
// Referential Data - R is found in the 7th bit
|
||||
const int KEY_STATE_START_BIT = 0; // 1st and 2nd bits
|
||||
const int HAND_STATE_START_BIT = 2; // 3rd and 4th bits
|
||||
const int IS_FACESHIFT_CONNECTED = 4; // 5th bit
|
||||
const int IS_EYE_TRACKER_CONNECTED = 5; // 6th bit (was CHAT_CIRCLING)
|
||||
const int HAS_REFERENTIAL = 6; // 7th bit
|
||||
const int HAND_STATE_FINGER_POINTING_BIT = 7; // 8th bit
|
||||
|
||||
|
||||
const char HAND_STATE_NULL = 0;
|
||||
const char LEFT_HAND_POINTING_FLAG = 1;
|
||||
const char RIGHT_HAND_POINTING_FLAG = 2;
|
||||
const char IS_FINGER_POINTING_FLAG = 4;
|
||||
|
||||
// AvatarData state flags - we store the details about the packet encoding in the first byte,
|
||||
// before the "header" structure
|
||||
const char AVATARDATA_FLAGS_MINIMUM = 0;
|
||||
|
||||
using smallFloat = uint16_t; // a compressed float with less precision, user defined radix
|
||||
|
||||
namespace AvatarDataPacket {
|
||||
// Packet State Flags - we store the details about the existence of other records in this bitset:
|
||||
// AvatarGlobalPosition, Avatar Faceshift, eye tracking, and existence of
|
||||
using HasFlags = uint16_t;
|
||||
const HasFlags PACKET_HAS_AVATAR_GLOBAL_POSITION = 1U << 0;
|
||||
const HasFlags PACKET_HAS_AVATAR_LOCAL_POSITION = 1U << 1; // FIXME - can this be in the PARENT_INFO??
|
||||
const HasFlags PACKET_HAS_AVATAR_DIMENSIONS = 1U << 2;
|
||||
const HasFlags PACKET_HAS_AVATAR_ORIENTATION = 1U << 3;
|
||||
const HasFlags PACKET_HAS_AVATAR_SCALE = 1U << 4;
|
||||
const HasFlags PACKET_HAS_LOOK_AT_POSITION = 1U << 5;
|
||||
const HasFlags PACKET_HAS_AUDIO_LOUDNESS = 1U << 6;
|
||||
const HasFlags PACKET_HAS_SENSOR_TO_WORLD_MATRIX = 1U << 7;
|
||||
const HasFlags PACKET_HAS_ADDITIONAL_FLAGS = 1U << 8;
|
||||
const HasFlags PACKET_HAS_PARENT_INFO = 1U << 9;
|
||||
const HasFlags PACKET_HAS_FACE_TRACKER_INFO = 1U << 10;
|
||||
const HasFlags PACKET_HAS_JOINT_DATA = 1U << 11;
|
||||
|
||||
// NOTE: AvatarDataPackets start with a uint16_t sequence number that is not reflected in the Header structure.
|
||||
|
||||
PACKED_BEGIN struct Header {
|
||||
uint8_t packetStateFlags; // state flags, currently used to indicate if the packet is a minimal or fuller packet
|
||||
HasFlags packetHasFlags; // state flags, indicated which additional records are included in the packet
|
||||
// bit 0 - has AvatarGlobalPosition
|
||||
// bit 1 - has AvatarLocalPosition
|
||||
// bit 2 - has AvatarDimensions
|
||||
// bit 3 - has AvatarOrientation
|
||||
// bit 4 - has AvatarScale
|
||||
// bit 5 - has LookAtPosition
|
||||
// bit 6 - has AudioLoudness
|
||||
// bit 7 - has SensorToWorldMatrix
|
||||
// bit 8 - has AdditionalFlags
|
||||
// bit 9 - has ParentInfo
|
||||
// bit 10 - has FaceTrackerInfo
|
||||
// bit 11 - has JointData
|
||||
} PACKED_END;
|
||||
const size_t HEADER_SIZE = 1;
|
||||
const size_t HEADER_SIZE = 2;
|
||||
|
||||
PACKED_BEGIN struct MinimalAvatarInfo {
|
||||
PACKED_BEGIN struct AvatarGlobalPosition {
|
||||
float globalPosition[3]; // avatar's position
|
||||
} PACKED_END;
|
||||
const size_t MINIMAL_AVATAR_INFO_SIZE = 12;
|
||||
const size_t AVATAR_GLOBAL_POSITION_SIZE = 12;
|
||||
|
||||
PACKED_BEGIN struct AvatarLocalPosition {
|
||||
float localPosition[3]; // this appears to be the avatar local position??
|
||||
// this is a reduced precision radix
|
||||
// FIXME - could this be changed into compressed floats?
|
||||
} PACKED_END;
|
||||
const size_t AVATAR_LOCAL_POSITION_SIZE = 12;
|
||||
|
||||
PACKED_BEGIN struct AvatarDimensions {
|
||||
float avatarDimensions[3]; // avatar's bounding box in world space units, but relative to the
|
||||
// position. Assumed to be centered around the world position
|
||||
// FIXME - could this be changed into compressed floats?
|
||||
} PACKED_END;
|
||||
const size_t AVATAR_DIMENSIONS_SIZE = 12;
|
||||
|
||||
|
||||
PACKED_BEGIN struct AvatarOrientation {
|
||||
smallFloat localOrientation[3]; // avatar's local euler angles (degrees, compressed) relative to the
|
||||
// thing it's attached to, or world relative if not attached
|
||||
} PACKED_END;
|
||||
const size_t AVATAR_ORIENTATION_SIZE = 6;
|
||||
|
||||
PACKED_BEGIN struct AvatarScale {
|
||||
smallFloat scale; // avatar's scale, (compressed) 'ratio' encoding uses sign bit as flag.
|
||||
} PACKED_END;
|
||||
const size_t AVATAR_SCALE_SIZE = 2;
|
||||
|
||||
PACKED_BEGIN struct LookAtPosition {
|
||||
float lookAtPosition[3]; // world space position that eyes are focusing on.
|
||||
// FIXME - unless the person has an eye tracker, this is simulated...
|
||||
// a) maybe we can just have the client calculate this
|
||||
// b) at distance this will be hard to discern and can likely be
|
||||
// descimated or dropped completely
|
||||
//
|
||||
// POTENTIAL SAVINGS - 12 bytes
|
||||
} PACKED_END;
|
||||
const size_t LOOK_AT_POSITION_SIZE = 12;
|
||||
|
||||
PACKED_BEGIN struct AudioLoudness {
|
||||
smallFloat audioLoudness; // current loudness of microphone, (compressed)
|
||||
} PACKED_END;
|
||||
const size_t AUDIO_LOUDNESS_SIZE = 2;
|
||||
|
||||
PACKED_BEGIN struct SensorToWorldMatrix {
|
||||
// FIXME - these 20 bytes are only used by viewers if my avatar has "attachments"
|
||||
// we could save these bytes if no attachments are active.
|
||||
//
|
||||
// POTENTIAL SAVINGS - 20 bytes
|
||||
|
||||
uint8_t sensorToWorldQuat[6]; // 6 byte compressed quaternion part of sensor to world matrix
|
||||
uint16_t sensorToWorldScale; // uniform scale of sensor to world matrix
|
||||
float sensorToWorldTrans[3]; // fourth column of sensor to world matrix
|
||||
// FIXME - sensorToWorldTrans might be able to be better compressed if it was
|
||||
// relative to the avatar position.
|
||||
} PACKED_END;
|
||||
const size_t SENSOR_TO_WORLD_SIZE = 20;
|
||||
|
||||
PACKED_BEGIN struct AdditionalFlags {
|
||||
uint8_t flags; // additional flags: hand state, key state, eye tracking
|
||||
} PACKED_END;
|
||||
const size_t ADDITIONAL_FLAGS_SIZE = 1;
|
||||
|
||||
// only present if HAS_REFERENTIAL flag is set in AvatarInfo.flags
|
||||
PACKED_BEGIN struct ParentInfo {
|
||||
uint8_t parentUUID[16]; // rfc 4122 encoded
|
||||
uint16_t parentJointIndex;
|
||||
} PACKED_END;
|
||||
const size_t PARENT_INFO_SIZE = 18;
|
||||
|
||||
// only present if IS_FACESHIFT_CONNECTED flag is set in AvatarInfo.flags
|
||||
PACKED_BEGIN struct FaceTrackerInfo {
|
||||
float leftEyeBlink;
|
||||
float rightEyeBlink;
|
||||
float averageLoudness;
|
||||
float browAudioLift;
|
||||
uint8_t numBlendshapeCoefficients;
|
||||
// float blendshapeCoefficients[numBlendshapeCoefficients];
|
||||
} PACKED_END;
|
||||
const size_t FACE_TRACKER_INFO_SIZE = 17;
|
||||
|
||||
// variable length structure follows
|
||||
/*
|
||||
struct JointData {
|
||||
uint8_t numJoints;
|
||||
uint8_t rotationValidityBits[ceil(numJoints / 8)]; // one bit per joint, if true then a compressed rotation follows.
|
||||
SixByteQuat rotation[numValidRotations]; // encodeded and compressed by packOrientationQuatToSixBytes()
|
||||
uint8_t translationValidityBits[ceil(numJoints / 8)]; // one bit per joint, if true then a compressed translation follows.
|
||||
SixByteTrans translation[numValidTranslations]; // encodeded and compressed by packFloatVec3ToSignedTwoByteFixed()
|
||||
};
|
||||
*/
|
||||
|
||||
// OLD FORMAT....
|
||||
PACKED_BEGIN struct AvatarInfo {
|
||||
// FIXME - this has 8 unqiue items, we could use a simple header byte to indicate whether or not the fields
|
||||
// exist in the packet and have changed since last being sent.
|
||||
|
@ -156,66 +305,8 @@ namespace AvatarDataPacket {
|
|||
uint8_t flags;
|
||||
} PACKED_END;
|
||||
const size_t AVATAR_INFO_SIZE = 79;
|
||||
|
||||
// only present if HAS_REFERENTIAL flag is set in AvatarInfo.flags
|
||||
PACKED_BEGIN struct ParentInfo {
|
||||
uint8_t parentUUID[16]; // rfc 4122 encoded
|
||||
uint16_t parentJointIndex;
|
||||
} PACKED_END;
|
||||
const size_t PARENT_INFO_SIZE = 18;
|
||||
|
||||
// only present if IS_FACESHIFT_CONNECTED flag is set in AvatarInfo.flags
|
||||
PACKED_BEGIN struct FaceTrackerInfo {
|
||||
float leftEyeBlink;
|
||||
float rightEyeBlink;
|
||||
float averageLoudness;
|
||||
float browAudioLift;
|
||||
uint8_t numBlendshapeCoefficients;
|
||||
// float blendshapeCoefficients[numBlendshapeCoefficients];
|
||||
} PACKED_END;
|
||||
const size_t FACE_TRACKER_INFO_SIZE = 17;
|
||||
|
||||
// variable length structure follows
|
||||
/*
|
||||
struct JointData {
|
||||
uint8_t numJoints;
|
||||
uint8_t rotationValidityBits[ceil(numJoints / 8)]; // one bit per joint, if true then a compressed rotation follows.
|
||||
SixByteQuat rotation[numValidRotations]; // encodeded and compressed by packOrientationQuatToSixBytes()
|
||||
uint8_t translationValidityBits[ceil(numJoints / 8)]; // one bit per joint, if true then a compressed translation follows.
|
||||
SixByteTrans translation[numValidTranslations]; // encodeded and compressed by packFloatVec3ToSignedTwoByteFixed()
|
||||
};
|
||||
*/
|
||||
}
|
||||
|
||||
|
||||
// Bitset of state flags - we store the key state, hand state, Faceshift, eye tracking, and existence of
|
||||
// referential data in this bit set. The hand state is an octal, but is split into two sections to maintain
|
||||
// backward compatibility. The bits are ordered as such (0-7 left to right).
|
||||
// +-----+-----+-+-+-+--+
|
||||
// |K0,K1|H0,H1|F|E|R|H2|
|
||||
// +-----+-----+-+-+-+--+
|
||||
// Key state - K0,K1 is found in the 1st and 2nd bits
|
||||
// Hand state - H0,H1,H2 is found in the 3rd, 4th, and 8th bits
|
||||
// Faceshift - F is found in the 5th bit
|
||||
// Eye tracker - E is found in the 6th bit
|
||||
// Referential Data - R is found in the 7th bit
|
||||
const int KEY_STATE_START_BIT = 0; // 1st and 2nd bits
|
||||
const int HAND_STATE_START_BIT = 2; // 3rd and 4th bits
|
||||
const int IS_FACESHIFT_CONNECTED = 4; // 5th bit
|
||||
const int IS_EYE_TRACKER_CONNECTED = 5; // 6th bit (was CHAT_CIRCLING)
|
||||
const int HAS_REFERENTIAL = 6; // 7th bit
|
||||
const int HAND_STATE_FINGER_POINTING_BIT = 7; // 8th bit
|
||||
|
||||
const char HAND_STATE_NULL = 0;
|
||||
const char LEFT_HAND_POINTING_FLAG = 1;
|
||||
const char RIGHT_HAND_POINTING_FLAG = 2;
|
||||
const char IS_FINGER_POINTING_FLAG = 4;
|
||||
|
||||
// AvatarData state flags - we store the details about the packet encoding in the first byte,
|
||||
// before the "header" structure
|
||||
const char AVATARDATA_FLAGS_MINIMUM = 0;
|
||||
|
||||
|
||||
static const float MAX_AVATAR_SCALE = 1000.0f;
|
||||
static const float MIN_AVATAR_SCALE = .005f;
|
||||
|
||||
|
@ -512,6 +603,29 @@ public slots:
|
|||
float getTargetScale() { return _targetScale; }
|
||||
|
||||
protected:
|
||||
void lazyInitHeadData();
|
||||
|
||||
bool avatarLocalPositionChanged();
|
||||
bool avatarDimensionsChanged();
|
||||
bool avatarOrientationChanged();
|
||||
bool avatarScaleChanged();
|
||||
bool lookAtPositionChanged();
|
||||
bool audioLoudnessChanged();
|
||||
bool sensorToWorldMatrixChanged();
|
||||
bool additionalFlagsChanged();
|
||||
|
||||
bool hasParent() { return !getParentID().isNull(); }
|
||||
bool parentInfoChanged();
|
||||
|
||||
bool hasFaceTracker() { return _headData ? _headData->_isFaceTrackerConnected : false; }
|
||||
bool faceTrackerInfoChanged();
|
||||
|
||||
QByteArray toByteArray_OLD(AvatarDataDetail dataDetail);
|
||||
QByteArray toByteArray_NEW(AvatarDataDetail dataDetail);
|
||||
|
||||
int parseDataFromBuffer_OLD(const QByteArray& buffer);
|
||||
int parseDataFromBuffer_NEW(const QByteArray& buffer);
|
||||
|
||||
glm::vec3 _handPosition;
|
||||
virtual const QString& getSessionDisplayNameForTransport() const { return _sessionDisplayName; }
|
||||
virtual void maybeUpdateSessionDisplayNameFromTransport(const QString& sessionDisplayName) { } // No-op in AvatarMixer
|
||||
|
@ -571,7 +685,21 @@ protected:
|
|||
// _globalPosition is sent along with localPosition + parent because the avatar-mixer doesn't know
|
||||
// where Entities are located. This is currently only used by the mixer to decide how often to send
|
||||
// updates about one avatar to another.
|
||||
glm::vec3 _globalPosition;
|
||||
glm::vec3 _globalPosition { 0, 0, 0 };
|
||||
|
||||
glm::vec3 _lastSentGlobalPosition { 0, 0, 0 };
|
||||
glm::vec3 _lastSentLocalPosition { 0, 0, 0 };
|
||||
glm::vec3 _lastSentAvatarDimensions { 0, 0, 0 };
|
||||
glm::quat _lastSentLocalOrientation;
|
||||
float _lastSentScale { 0 };
|
||||
glm::vec3 _lastSentLookAt { 0, 0, 0 };
|
||||
float _lastSentAudioLoudness { 0 };
|
||||
glm::mat4 _lastSentSensorToWorldMatrix;
|
||||
uint8_t _lastSentAdditionalFlags { 0 };
|
||||
QUuid _lastSentParentID;
|
||||
quint16 _lastSentParentJointIndex { -1 };
|
||||
|
||||
|
||||
glm::vec3 _globalBoundingBoxCorner;
|
||||
|
||||
mutable ReadWriteLockable _avatarEntitiesLock;
|
||||
|
|
|
@ -53,7 +53,7 @@ PacketVersion versionForPacketType(PacketType packetType) {
|
|||
case PacketType::AvatarData:
|
||||
case PacketType::BulkAvatarData:
|
||||
case PacketType::KillAvatar:
|
||||
return static_cast<PacketVersion>(AvatarMixerPacketVersion::SessionDisplayName);
|
||||
return static_cast<PacketVersion>(AvatarMixerPacketVersion::VariableAvatarData);
|
||||
case PacketType::ICEServerHeartbeat:
|
||||
return 18; // ICE Server Heartbeat signing
|
||||
case PacketType::AssetGetInfo:
|
||||
|
|
|
@ -207,7 +207,8 @@ enum class AvatarMixerPacketVersion : PacketVersion {
|
|||
SensorToWorldMat,
|
||||
HandControllerJoints,
|
||||
HasKillAvatarReason,
|
||||
SessionDisplayName
|
||||
SessionDisplayName,
|
||||
VariableAvatarData
|
||||
};
|
||||
|
||||
enum class DomainConnectRequestVersion : PacketVersion {
|
||||
|
|
Loading…
Reference in a new issue