From 82dd9412ab1decdf34a156c620cc7249818777a9 Mon Sep 17 00:00:00 2001 From: Brad Hefta-Gaub <brad@highfidelity.io> Date: Wed, 21 Dec 2016 17:21:28 -0800 Subject: [PATCH 01/43] tweaks, using packed scalar for audio loudness --- assignment-client/src/avatars/AvatarMixer.cpp | 2 + libraries/avatars/src/AvatarData.cpp | 106 +++++++---------- libraries/avatars/src/AvatarData.h | 108 ++++++++++++++++++ 3 files changed, 151 insertions(+), 65 deletions(-) diff --git a/assignment-client/src/avatars/AvatarMixer.cpp b/assignment-client/src/avatars/AvatarMixer.cpp index d8d0b10fea..f052bb3a53 100644 --- a/assignment-client/src/avatars/AvatarMixer.cpp +++ b/assignment-client/src/avatars/AvatarMixer.cpp @@ -398,6 +398,8 @@ void AvatarMixer::broadcastAvatarData() { } numAvatarDataBytes += avatarPacketList->write(otherNode->getUUID().toRfc4122()); + + //qDebug() << "about to write data for:" << otherNode->getUUID(); numAvatarDataBytes += avatarPacketList->write(otherAvatar.toByteArray(detail)); avatarPacketList->endSegment(); diff --git a/libraries/avatars/src/AvatarData.cpp b/libraries/avatars/src/AvatarData.cpp index f8805d3fc4..6ab12f9b66 100644 --- a/libraries/avatars/src/AvatarData.cpp +++ b/libraries/avatars/src/AvatarData.cpp @@ -49,67 +49,9 @@ const glm::vec3 DEFAULT_LOCAL_AABOX_SCALE(1.0f); const QString AvatarData::FRAME_NAME = "com.highfidelity.recording.AvatarData"; -namespace AvatarDataPacket { - - // NOTE: AvatarDataPackets start with a uint16_t sequence number that is not reflected in the Header structure. - - PACKED_BEGIN struct Header { - uint8_t packetStateFlags; // state flags, currently used to indicate if the packet is a minimal or fuller packet - } PACKED_END; - const size_t HEADER_SIZE = 1; - - PACKED_BEGIN struct MinimalAvatarInfo { - float globalPosition[3]; // avatar's position - } PACKED_END; - const size_t MINIMAL_AVATAR_INFO_SIZE = 12; - - PACKED_BEGIN struct AvatarInfo { - float position[3]; // skeletal model's position - float globalPosition[3]; // avatar's position - float globalBoundingBoxCorner[3]; // global position of the lowest corner of the avatar's bounding box - uint16_t localOrientation[3]; // avatar's local euler angles (degrees, compressed) relative to the thing it's attached to - uint16_t scale; // (compressed) 'ratio' encoding uses sign bit as flag. - float lookAtPosition[3]; // world space position that eyes are focusing on. - float audioLoudness; // current loundess of microphone - uint8_t sensorToWorldQuat[6]; // 6 byte compressed quaternion part of sensor to world matrix - uint16_t sensorToWorldScale; // uniform scale of sensor to world matrix - float sensorToWorldTrans[3]; // fourth column of sensor to world matrix - uint8_t flags; - } PACKED_END; - const size_t AVATAR_INFO_SIZE = 81; - - // only present if HAS_REFERENTIAL flag is set in AvatarInfo.flags - PACKED_BEGIN struct ParentInfo { - uint8_t parentUUID[16]; // rfc 4122 encoded - uint16_t parentJointIndex; - } PACKED_END; - const size_t PARENT_INFO_SIZE = 18; - - // only present if IS_FACESHIFT_CONNECTED flag is set in AvatarInfo.flags - PACKED_BEGIN struct FaceTrackerInfo { - float leftEyeBlink; - float rightEyeBlink; - float averageLoudness; - float browAudioLift; - uint8_t numBlendshapeCoefficients; - // float blendshapeCoefficients[numBlendshapeCoefficients]; - } PACKED_END; - const size_t FACE_TRACKER_INFO_SIZE = 17; - - // variable length structure follows - /* - struct JointData { - uint8_t numJoints; - uint8_t rotationValidityBits[ceil(numJoints / 8)]; // one bit per joint, if true then a compressed rotation follows. - SixByteQuat rotation[numValidRotations]; // encodeded and compressed by packOrientationQuatToSixBytes() - uint8_t translationValidityBits[ceil(numJoints / 8)]; // one bit per joint, if true then a compressed translation follows. - SixByteTrans translation[numValidTranslations]; // encodeded and compressed by packFloatVec3ToSignedTwoByteFixed() - }; - */ -} - static const int TRANSLATION_COMPRESSION_RADIX = 12; static const int SENSOR_TO_WORLD_SCALE_RADIX = 10; +static const int AUDIO_LOUDNESS_RADIX = 10; #define ASSERT(COND) do { if (!(COND)) { abort(); } } while(0) @@ -230,12 +172,14 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail) { destinationBuffer += sizeof(_globalPosition); } else { auto avatarInfo = reinterpret_cast<AvatarDataPacket::AvatarInfo*>(destinationBuffer); - avatarInfo->position[0] = getLocalPosition().x; - avatarInfo->position[1] = getLocalPosition().y; - avatarInfo->position[2] = getLocalPosition().z; avatarInfo->globalPosition[0] = _globalPosition.x; avatarInfo->globalPosition[1] = _globalPosition.y; avatarInfo->globalPosition[2] = _globalPosition.z; + + avatarInfo->position[0] = getLocalPosition().x; + avatarInfo->position[1] = getLocalPosition().y; + avatarInfo->position[2] = getLocalPosition().z; + avatarInfo->globalBoundingBoxCorner[0] = getPosition().x - _globalBoundingBoxCorner.x; avatarInfo->globalBoundingBoxCorner[1] = getPosition().y - _globalBoundingBoxCorner.y; avatarInfo->globalBoundingBoxCorner[2] = getPosition().z - _globalBoundingBoxCorner.z; @@ -248,7 +192,8 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail) { avatarInfo->lookAtPosition[0] = _headData->_lookAtPosition.x; avatarInfo->lookAtPosition[1] = _headData->_lookAtPosition.y; avatarInfo->lookAtPosition[2] = _headData->_lookAtPosition.z; - avatarInfo->audioLoudness = _headData->_audioLoudness; + + packFloatScalarToSignedTwoByteFixed((uint8_t*)&avatarInfo->audioLoudness, _headData->_audioLoudness, AUDIO_LOUDNESS_RADIX); glm::mat4 sensorToWorldMatrix = getSensorToWorldMatrix(); packOrientationQuatToSixBytes(avatarInfo->sensorToWorldQuat, glmExtractRotation(sensorToWorldMatrix)); @@ -280,6 +225,33 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail) { } destinationBuffer += sizeof(AvatarDataPacket::AvatarInfo); + #if 0 // debugging + #define COMPARE_MEMBER_V3(L, R, M) { if (L.M[0] != R.M[0] || L.M[1] != R.M[1] || L.M[2] != R.M[2]) { qCDebug(avatars) << #M " changed - old:" << "{" << L.M[0] << "," << L.M[1] << ", " << L.M[2] << "}" << " new:" "{" << R.M[0] << "," << R.M[1] << ", " << R.M[2] << "}"; } } + #define COMPARE_MEMBER_F(L, R, M) { if (L.M != R.M) { qCDebug(avatars) << #M " changed - old:" << L.M << " new:" << R.M; } } + + qCDebug(avatars) << "--------------"; + COMPARE_MEMBER_V3(_lastAvatarInfo, (*avatarInfo), position); + COMPARE_MEMBER_V3(_lastAvatarInfo, (*avatarInfo), globalPosition); + COMPARE_MEMBER_V3(_lastAvatarInfo, (*avatarInfo), globalBoundingBoxCorner); + COMPARE_MEMBER_V3(_lastAvatarInfo, (*avatarInfo), localOrientation); + COMPARE_MEMBER_F(_lastAvatarInfo, (*avatarInfo), scale); + COMPARE_MEMBER_V3(_lastAvatarInfo, (*avatarInfo), lookAtPosition); + COMPARE_MEMBER_F(_lastAvatarInfo, (*avatarInfo), audioLoudness); + + if (_lastSensorToWorldMatrix != sensorToWorldMatrix) { + qCDebug(avatars) << "sensorToWorldMatrix changed - old:" << _lastSensorToWorldMatrix << "new:" << sensorToWorldMatrix; + } + //COMPARE_MEMBER_V3(_lastAvatarInfo, (*avatarInfo), sensorToWorldQuat); + COMPARE_MEMBER_F(_lastAvatarInfo, (*avatarInfo), sensorToWorldScale); + COMPARE_MEMBER_V3(_lastAvatarInfo, (*avatarInfo), sensorToWorldTrans); + COMPARE_MEMBER_F(_lastAvatarInfo, (*avatarInfo), flags); + + memcpy(&_lastAvatarInfo, avatarInfo, sizeof(_lastAvatarInfo)); + _lastSensorToWorldMatrix = sensorToWorldMatrix; + + #endif + + if (!parentID.isNull()) { auto parentInfo = reinterpret_cast<AvatarDataPacket::ParentInfo*>(destinationBuffer); QByteArray referentialAsBytes = parentID.toRfc4122(); @@ -527,8 +499,8 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) { auto avatarInfo = reinterpret_cast<const AvatarDataPacket::AvatarInfo*>(sourceBuffer); sourceBuffer += sizeof(AvatarDataPacket::AvatarInfo); - glm::vec3 position = glm::vec3(avatarInfo->position[0], avatarInfo->position[1], avatarInfo->position[2]); _globalPosition = glm::vec3(avatarInfo->globalPosition[0], avatarInfo->globalPosition[1], avatarInfo->globalPosition[2]); + glm::vec3 position = glm::vec3(avatarInfo->position[0], avatarInfo->position[1], avatarInfo->position[2]); _globalBoundingBoxCorner = glm::vec3(avatarInfo->globalBoundingBoxCorner[0], avatarInfo->globalBoundingBoxCorner[1], avatarInfo->globalBoundingBoxCorner[2]); if (isNaN(position)) { if (shouldLogError(now)) { @@ -576,7 +548,11 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) { } _headData->_lookAtPosition = lookAt; - float audioLoudness = avatarInfo->audioLoudness; + + float audioLoudness; + unpackFloatScalarFromSignedTwoByteFixed((int16_t*)&avatarInfo->audioLoudness, &audioLoudness, AUDIO_LOUDNESS_RADIX); + + // FIXME - is this really needed? if (isNaN(audioLoudness)) { if (shouldLogError(now)) { qCWarning(avatars) << "Discard AvatarData packet: audioLoudness is NaN, uuid " << getSessionUUID(); diff --git a/libraries/avatars/src/AvatarData.h b/libraries/avatars/src/AvatarData.h index db06d52092..0a01cf9a9a 100644 --- a/libraries/avatars/src/AvatarData.h +++ b/libraries/avatars/src/AvatarData.h @@ -83,6 +83,111 @@ const quint32 AVATAR_MOTION_SCRIPTABLE_BITS = const qint64 AVATAR_SILENCE_THRESHOLD_USECS = 5 * USECS_PER_SECOND; + +namespace AvatarDataPacket { + + // NOTE: AvatarDataPackets start with a uint16_t sequence number that is not reflected in the Header structure. + + PACKED_BEGIN struct Header { + uint8_t packetStateFlags; // state flags, currently used to indicate if the packet is a minimal or fuller packet + } PACKED_END; + const size_t HEADER_SIZE = 1; + + PACKED_BEGIN struct MinimalAvatarInfo { + float globalPosition[3]; // avatar's position + } PACKED_END; + const size_t MINIMAL_AVATAR_INFO_SIZE = 12; + + PACKED_BEGIN struct AvatarInfo { + // FIXME - this has 8 unqiue items, we could use a simple header byte to indicate whether or not the fields + // exist in the packet and have changed since last being sent. + float globalPosition[3]; // avatar's position + // FIXME - possible savings: + // a) could be encoded as relative to last known position, most movements + // will be withing a smaller radix + // b) would still need an intermittent absolute value. + + float position[3]; // skeletal model's position + // FIXME - this used to account for a registration offset from the avatar's position + // to the position of the skeletal model/mesh. This relative offset doesn't change from + // frame to frame, instead only changes when the model changes, it could be moved to the + // identity packet and/or only included when it changes. + // if it's encoded relative to the globalPosition, it could be reduced to a smaller radix + // + // POTENTIAL SAVINGS - 12 bytes + + float globalBoundingBoxCorner[3]; // global position of the lowest corner of the avatar's bounding box + // FIXME - this would change less frequently if it was the dimensions of the bounding box + // instead of the corner. + // + // POTENTIAL SAVINGS - 12 bytes + + uint16_t localOrientation[3]; // avatar's local euler angles (degrees, compressed) relative to the thing it's attached to + uint16_t scale; // (compressed) 'ratio' encoding uses sign bit as flag. + // FIXME - this doesn't change every frame + // + // POTENTIAL SAVINGS - 2 bytes + + float lookAtPosition[3]; // world space position that eyes are focusing on. + // FIXME - unless the person has an eye tracker, this is simulated... + // a) maybe we can just have the client calculate this + // b) at distance this will be hard to discern and can likely be + // descimated or dropped completely + // + // POTENTIAL SAVINGS - 12 bytes + + uint16_t audioLoudness; // current loundess of microphone + // FIXME - + // a) this could probably be decimated with a smaller radix <<< DONE + // b) this doesn't change every frame + // + // POTENTIAL SAVINGS - 4-2 bytes + + // FIXME - these 20 bytes are only used by viewers if my avatar has "attachments" + // we could save these bytes if no attachments are active. + // + // POTENTIAL SAVINGS - 20 bytes + + uint8_t sensorToWorldQuat[6]; // 6 byte compressed quaternion part of sensor to world matrix + uint16_t sensorToWorldScale; // uniform scale of sensor to world matrix + float sensorToWorldTrans[3]; // fourth column of sensor to world matrix + // FIXME - sensorToWorldTrans might be able to be better compressed if it was + // relative to the avatar position. + uint8_t flags; + } PACKED_END; + const size_t AVATAR_INFO_SIZE = 79; + + // only present if HAS_REFERENTIAL flag is set in AvatarInfo.flags + PACKED_BEGIN struct ParentInfo { + uint8_t parentUUID[16]; // rfc 4122 encoded + uint16_t parentJointIndex; + } PACKED_END; + const size_t PARENT_INFO_SIZE = 18; + + // only present if IS_FACESHIFT_CONNECTED flag is set in AvatarInfo.flags + PACKED_BEGIN struct FaceTrackerInfo { + float leftEyeBlink; + float rightEyeBlink; + float averageLoudness; + float browAudioLift; + uint8_t numBlendshapeCoefficients; + // float blendshapeCoefficients[numBlendshapeCoefficients]; + } PACKED_END; + const size_t FACE_TRACKER_INFO_SIZE = 17; + + // variable length structure follows + /* + struct JointData { + uint8_t numJoints; + uint8_t rotationValidityBits[ceil(numJoints / 8)]; // one bit per joint, if true then a compressed rotation follows. + SixByteQuat rotation[numValidRotations]; // encodeded and compressed by packOrientationQuatToSixBytes() + uint8_t translationValidityBits[ceil(numJoints / 8)]; // one bit per joint, if true then a compressed translation follows. + SixByteTrans translation[numValidTranslations]; // encodeded and compressed by packFloatVec3ToSignedTwoByteFixed() + }; + */ +} + + // Bitset of state flags - we store the key state, hand state, Faceshift, eye tracking, and existence of // referential data in this bit set. The hand state is an octal, but is split into two sections to maintain // backward compatibility. The bits are ordered as such (0-7 left to right). @@ -482,6 +587,9 @@ protected: int getFauxJointIndex(const QString& name) const; + AvatarDataPacket::AvatarInfo _lastAvatarInfo; + glm::mat4 _lastSensorToWorldMatrix; + private: friend void avatarStateFromFrame(const QByteArray& frameData, AvatarData* _avatar); static QUrl _defaultFullAvatarModelUrl; From 977cda3d2e7cb6de094cf6f077ad2deb477ab9af Mon Sep 17 00:00:00 2001 From: Brad Hefta-Gaub <brad@highfidelity.io> Date: Wed, 21 Dec 2016 19:10:19 -0800 Subject: [PATCH 02/43] adjust audio loudness radix so we can support the entire range --- libraries/avatars/src/AvatarData.cpp | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/libraries/avatars/src/AvatarData.cpp b/libraries/avatars/src/AvatarData.cpp index 6ab12f9b66..8e1d708182 100644 --- a/libraries/avatars/src/AvatarData.cpp +++ b/libraries/avatars/src/AvatarData.cpp @@ -51,7 +51,7 @@ const QString AvatarData::FRAME_NAME = "com.highfidelity.recording.AvatarData"; static const int TRANSLATION_COMPRESSION_RADIX = 12; static const int SENSOR_TO_WORLD_SCALE_RADIX = 10; -static const int AUDIO_LOUDNESS_RADIX = 10; +static const int AUDIO_LOUDNESS_RADIX = 2; #define ASSERT(COND) do { if (!(COND)) { abort(); } } while(0) @@ -193,7 +193,8 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail) { avatarInfo->lookAtPosition[1] = _headData->_lookAtPosition.y; avatarInfo->lookAtPosition[2] = _headData->_lookAtPosition.z; - packFloatScalarToSignedTwoByteFixed((uint8_t*)&avatarInfo->audioLoudness, _headData->_audioLoudness, AUDIO_LOUDNESS_RADIX); + packFloatScalarToSignedTwoByteFixed((uint8_t*)&avatarInfo->audioLoudness, + glm::min(_headData->_audioLoudness, MAX_AUDIO_LOUDNESS), AUDIO_LOUDNESS_RADIX); glm::mat4 sensorToWorldMatrix = getSensorToWorldMatrix(); packOrientationQuatToSixBytes(avatarInfo->sensorToWorldQuat, glmExtractRotation(sensorToWorldMatrix)); From 02a6060b5e193b1b34e44a32cc521ff17bf1a041 Mon Sep 17 00:00:00 2001 From: Brad Hefta-Gaub <brad@highfidelity.io> Date: Thu, 22 Dec 2016 16:16:44 -0800 Subject: [PATCH 03/43] hacking on new format --- libraries/avatars/src/AvatarData.cpp | 712 +++++++++++++++++- libraries/avatars/src/AvatarData.h | 254 +++++-- .../networking/src/udt/PacketHeaders.cpp | 2 +- libraries/networking/src/udt/PacketHeaders.h | 3 +- 4 files changed, 903 insertions(+), 68 deletions(-) diff --git a/libraries/avatars/src/AvatarData.cpp b/libraries/avatars/src/AvatarData.cpp index 8e1d708182..9e5dc4ab96 100644 --- a/libraries/avatars/src/AvatarData.cpp +++ b/libraries/avatars/src/AvatarData.cpp @@ -52,6 +52,7 @@ const QString AvatarData::FRAME_NAME = "com.highfidelity.recording.AvatarData"; static const int TRANSLATION_COMPRESSION_RADIX = 12; static const int SENSOR_TO_WORLD_SCALE_RADIX = 10; static const int AUDIO_LOUDNESS_RADIX = 2; +static const int MODEL_OFFSET_RADIX = 6; #define ASSERT(COND) do { if (!(COND)) { abort(); } } while(0) @@ -77,10 +78,21 @@ AvatarData::AvatarData() : setBodyRoll(0.0f); ASSERT(sizeof(AvatarDataPacket::Header) == AvatarDataPacket::HEADER_SIZE); - ASSERT(sizeof(AvatarDataPacket::MinimalAvatarInfo) == AvatarDataPacket::MINIMAL_AVATAR_INFO_SIZE); - ASSERT(sizeof(AvatarDataPacket::AvatarInfo) == AvatarDataPacket::AVATAR_INFO_SIZE); + ASSERT(sizeof(AvatarDataPacket::AvatarGlobalPosition) == AvatarDataPacket::AVATAR_GLOBAL_POSITION_SIZE); + ASSERT(sizeof(AvatarDataPacket::AvatarLocalPosition) == AvatarDataPacket::AVATAR_LOCAL_POSITION_SIZE); + ASSERT(sizeof(AvatarDataPacket::AvatarDimensions) == AvatarDataPacket::AVATAR_DIMENSIONS_SIZE); + ASSERT(sizeof(AvatarDataPacket::AvatarOrientation) == AvatarDataPacket::AVATAR_ORIENTATION_SIZE); + ASSERT(sizeof(AvatarDataPacket::AvatarScale) == AvatarDataPacket::AVATAR_SCALE_SIZE); + ASSERT(sizeof(AvatarDataPacket::LookAtPosition) == AvatarDataPacket::LOOK_AT_POSITION_SIZE); + ASSERT(sizeof(AvatarDataPacket::AudioLoudness) == AvatarDataPacket::AUDIO_LOUDNESS_SIZE); + ASSERT(sizeof(AvatarDataPacket::SensorToWorldMatrix) == AvatarDataPacket::SENSOR_TO_WORLD_SIZE); + ASSERT(sizeof(AvatarDataPacket::AdditionalFlags) == AvatarDataPacket::ADDITIONAL_FLAGS_SIZE); ASSERT(sizeof(AvatarDataPacket::ParentInfo) == AvatarDataPacket::PARENT_INFO_SIZE); ASSERT(sizeof(AvatarDataPacket::FaceTrackerInfo) == AvatarDataPacket::FACE_TRACKER_INFO_SIZE); + + // Old format... + ASSERT(sizeof(AvatarDataPacket::AvatarInfo) == AvatarDataPacket::AVATAR_INFO_SIZE); + } AvatarData::~AvatarData() { @@ -136,8 +148,11 @@ void AvatarData::setHandPosition(const glm::vec3& handPosition) { _handPosition = glm::inverse(getOrientation()) * (handPosition - getPosition()); } - QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail) { + return toByteArray_NEW(dataDetail); +} + +QByteArray AvatarData::toByteArray_OLD(AvatarDataDetail dataDetail) { bool cullSmallChanges = (dataDetail == CullSmallData); bool sendAll = (dataDetail == SendAllData); bool sendMinimum = (dataDetail == MinimumData); @@ -408,6 +423,398 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail) { return avatarDataByteArray.left(destinationBuffer - startPosition); } +void AvatarData::lazyInitHeadData() { + // lazily allocate memory for HeadData in case we're not an Avatar instance + if (!_headData) { + _headData = new HeadData(this); + } + if (_forceFaceTrackerConnected) { + _headData->_isFaceTrackerConnected = true; + } +} + + +bool AvatarData::avatarLocalPositionChanged() { + return _lastSentLocalPosition != getLocalPosition(); +} + +bool AvatarData::avatarDimensionsChanged() { + auto avatarDimensions = getPosition() - _globalBoundingBoxCorner; + return _lastSentAvatarDimensions != avatarDimensions; +} + +bool AvatarData::avatarOrientationChanged() { + return _lastSentLocalOrientation != getLocalOrientation(); +} + +bool AvatarData::avatarScaleChanged() { + return _lastSentScale != getDomainLimitedScale(); +} + +bool AvatarData::lookAtPositionChanged() { + return _lastSentLookAt != _headData->_lookAtPosition; +} + +bool AvatarData::audioLoudnessChanged() { + return _lastSentAudioLoudness != glm::min(_headData->_audioLoudness, MAX_AUDIO_LOUDNESS); +} + +bool AvatarData::sensorToWorldMatrixChanged() { + return _lastSentSensorToWorldMatrix != getSensorToWorldMatrix(); +} + +bool AvatarData::additionalFlagsChanged() { + return true; // FIXME! +} + +bool AvatarData::parentInfoChanged() { + return (_lastSentParentID != getParentID()) || (_lastSentParentJointIndex != _parentJointIndex); +} + +bool AvatarData::faceTrackerInfoChanged() { + return true; // FIXME! +} + +QByteArray AvatarData::toByteArray_NEW(AvatarDataDetail dataDetail) { + bool cullSmallChanges = (dataDetail == CullSmallData); + bool sendAll = (dataDetail == SendAllData); + bool sendMinimum = (dataDetail == MinimumData); + + // TODO: DRY this up to a shared method + // that can pack any type given the number of bytes + // and return the number of bytes to push the pointer + lazyInitHeadData(); + + QByteArray avatarDataByteArray(udt::MAX_PACKET_SIZE, 0); + unsigned char* destinationBuffer = reinterpret_cast<unsigned char*>(avatarDataByteArray.data()); + unsigned char* startPosition = destinationBuffer; + unsigned char* packetStateFlagsAt = startPosition; + + // psuedo code.... + // - determine which sections will be included + // - create the packet has flags + // - include each section in order + + // FIXME - things to consider + // - how to dry up this code? + // + // - the sections below are basically little repeats of each other, where they + // cast the destination pointer to the section struct type, set the struct + // members in some specific way (not just assigning), then advance the buffer, + // and then remember the last value sent. This could be macro-ized and/or + // templatized or lambda-ized + // + // - also, we could determine the "hasXXX" flags in the little sections, + // and then set the actual flag values AFTER the rest are done... + // + // - this toByteArray() side-effects the AvatarData, is that safe? in particular + // is it possible we'll call toByteArray() and then NOT actually use the result? + + bool hasAvatarGlobalPosition = true; // always include global position + bool hasAvatarLocalPosition = sendAll || avatarLocalPositionChanged(); + bool hasAvatarDimensions = sendAll || avatarDimensionsChanged(); + bool hasAvatarOrientation = sendAll || avatarOrientationChanged(); + bool hasAvatarScale = sendAll || avatarScaleChanged(); + bool hasLookAtPosition = sendAll || lookAtPositionChanged(); + bool hasAudioLoudness = sendAll || audioLoudnessChanged(); + bool hasSensorToWorldMatrix = sendAll || sensorToWorldMatrixChanged(); + bool hasAdditionalFlags = sendAll || additionalFlagsChanged(); + bool hasParentInfo = hasParent() && (sendAll || parentInfoChanged()); + bool hasFaceTrackerInfo = hasFaceTracker() && (sendAll || faceTrackerInfoChanged()); + bool hasJointData = !sendMinimum; + + // Leading flags, to indicate how much data is actually included in the packet... + AvatarDataPacket::HasFlags packetStateFlags = + (hasAvatarGlobalPosition && AvatarDataPacket::PACKET_HAS_AVATAR_GLOBAL_POSITION) + | (hasAvatarLocalPosition && AvatarDataPacket::PACKET_HAS_AVATAR_LOCAL_POSITION) + | (hasAvatarDimensions && AvatarDataPacket::PACKET_HAS_AVATAR_DIMENSIONS) + | (hasAvatarOrientation && AvatarDataPacket::PACKET_HAS_AVATAR_ORIENTATION) + | (hasAvatarScale && AvatarDataPacket::PACKET_HAS_AVATAR_SCALE) + | (hasLookAtPosition && AvatarDataPacket::PACKET_HAS_LOOK_AT_POSITION) + | (hasAudioLoudness && AvatarDataPacket::PACKET_HAS_AUDIO_LOUDNESS) + | (hasSensorToWorldMatrix && AvatarDataPacket::PACKET_HAS_SENSOR_TO_WORLD_MATRIX) + | (hasAdditionalFlags && AvatarDataPacket::PACKET_HAS_ADDITIONAL_FLAGS) + | (hasParentInfo && AvatarDataPacket::PACKET_HAS_PARENT_INFO) + | (hasFaceTrackerInfo && AvatarDataPacket::PACKET_HAS_FACE_TRACKER_INFO) + | (hasJointData && AvatarDataPacket::PACKET_HAS_JOINT_DATA); + + memcpy(destinationBuffer, &packetStateFlags, sizeof(packetStateFlags)); + destinationBuffer += sizeof(packetStateFlags); + + if (hasAvatarGlobalPosition) { + auto data = reinterpret_cast<AvatarDataPacket::AvatarGlobalPosition*>(destinationBuffer); + data->globalPosition[0] = _globalPosition.x; + data->globalPosition[1] = _globalPosition.y; + data->globalPosition[2] = _globalPosition.z; + destinationBuffer += sizeof(AvatarDataPacket::AvatarGlobalPosition); + _lastSentGlobalPosition = _globalPosition; + } + + // FIXME - I was told by tony this was "skeletal model position"-- but it seems to be + // SpatiallyNestable::getLocalPosition() ... which AFAICT is almost always the same as + // the global position (unless presumably you're on a parent)... we might be able to + // include this in the parent info record + if (hasAvatarLocalPosition) { + auto data = reinterpret_cast<AvatarDataPacket::AvatarLocalPosition*>(destinationBuffer); + auto localPosition = getLocalPosition(); + data->localPosition[0] = localPosition.x; + data->localPosition[1] = localPosition.y; + data->localPosition[2] = localPosition.z; + destinationBuffer += sizeof(AvatarDataPacket::AvatarLocalPosition); + _lastSentLocalPosition = localPosition; + } + + if (hasAvatarDimensions) { + auto data = reinterpret_cast<AvatarDataPacket::AvatarDimensions*>(destinationBuffer); + auto avatarDimensions = getPosition() - _globalBoundingBoxCorner; + data->avatarDimensions[0] = avatarDimensions.x; + data->avatarDimensions[1] = avatarDimensions.y; + data->avatarDimensions[2] = avatarDimensions.z; + destinationBuffer += sizeof(AvatarDataPacket::AvatarDimensions); + _lastSentAvatarDimensions = avatarDimensions; + } + + if (hasAvatarOrientation) { + auto data = reinterpret_cast<AvatarDataPacket::AvatarOrientation*>(destinationBuffer); + auto localOrientation = getLocalOrientation(); + glm::vec3 bodyEulerAngles = glm::degrees(safeEulerAngles(localOrientation)); + packFloatAngleToTwoByte((uint8_t*)(data->localOrientation + 0), bodyEulerAngles.y); + packFloatAngleToTwoByte((uint8_t*)(data->localOrientation + 1), bodyEulerAngles.x); + packFloatAngleToTwoByte((uint8_t*)(data->localOrientation + 2), bodyEulerAngles.z); + destinationBuffer += sizeof(AvatarDataPacket::AvatarOrientation); + _lastSentLocalOrientation = localOrientation; + } + + if (hasAvatarScale) { + auto data = reinterpret_cast<AvatarDataPacket::AvatarScale*>(destinationBuffer); + auto scale = getDomainLimitedScale(); + packFloatRatioToTwoByte((uint8_t*)(&data->scale), scale); + destinationBuffer += sizeof(AvatarDataPacket::AvatarScale); + _lastSentScale = scale; + } + + if (hasLookAtPosition) { + auto data = reinterpret_cast<AvatarDataPacket::LookAtPosition*>(destinationBuffer); + auto lookAt = _headData->_lookAtPosition; + data->lookAtPosition[0] = lookAt.x; + data->lookAtPosition[1] = lookAt.y; + data->lookAtPosition[2] = lookAt.z; + destinationBuffer += sizeof(AvatarDataPacket::LookAtPosition); + _lastSentLookAt = lookAt; + } + + if (hasAudioLoudness) { + auto data = reinterpret_cast<AvatarDataPacket::AudioLoudness*>(destinationBuffer); + auto audioLoudness = glm::min(_headData->_audioLoudness, MAX_AUDIO_LOUDNESS); + packFloatScalarToSignedTwoByteFixed((uint8_t*)&data->audioLoudness, audioLoudness, AUDIO_LOUDNESS_RADIX); + destinationBuffer += sizeof(AvatarDataPacket::AudioLoudness); + _lastSentAudioLoudness = audioLoudness; + } + + if (hasSensorToWorldMatrix) { + auto data = reinterpret_cast<AvatarDataPacket::SensorToWorldMatrix*>(destinationBuffer); + glm::mat4 sensorToWorldMatrix = getSensorToWorldMatrix(); + packOrientationQuatToSixBytes(data->sensorToWorldQuat, glmExtractRotation(sensorToWorldMatrix)); + glm::vec3 scale = extractScale(sensorToWorldMatrix); + packFloatScalarToSignedTwoByteFixed((uint8_t*)&data->sensorToWorldScale, scale.x, SENSOR_TO_WORLD_SCALE_RADIX); + data->sensorToWorldTrans[0] = sensorToWorldMatrix[3][0]; + data->sensorToWorldTrans[1] = sensorToWorldMatrix[3][1]; + data->sensorToWorldTrans[2] = sensorToWorldMatrix[3][2]; + destinationBuffer += sizeof(AvatarDataPacket::SensorToWorldMatrix); + _lastSentSensorToWorldMatrix = sensorToWorldMatrix; + } + + QUuid parentID = getParentID(); + + if (hasAdditionalFlags) { + auto data = reinterpret_cast<AvatarDataPacket::AdditionalFlags*>(destinationBuffer); + + uint8_t flags { 0 }; + + setSemiNibbleAt(flags, KEY_STATE_START_BIT, _keyState); + + // hand state + bool isFingerPointing = _handState & IS_FINGER_POINTING_FLAG; + setSemiNibbleAt(flags, HAND_STATE_START_BIT, _handState & ~IS_FINGER_POINTING_FLAG); + if (isFingerPointing) { + setAtBit(flags, HAND_STATE_FINGER_POINTING_BIT); + } + // faceshift state + if (_headData->_isFaceTrackerConnected) { + setAtBit(flags, IS_FACESHIFT_CONNECTED); + } + // eye tracker state + if (_headData->_isEyeTrackerConnected) { + setAtBit(flags, IS_EYE_TRACKER_CONNECTED); + } + // referential state + if (!parentID.isNull()) { + setAtBit(flags, HAS_REFERENTIAL); + } + data->flags = flags; + destinationBuffer += sizeof(AvatarDataPacket::AdditionalFlags); + _lastSentAdditionalFlags = flags; + } + + if (hasParentInfo) { + auto parentInfo = reinterpret_cast<AvatarDataPacket::ParentInfo*>(destinationBuffer); + QByteArray referentialAsBytes = parentID.toRfc4122(); + memcpy(parentInfo->parentUUID, referentialAsBytes.data(), referentialAsBytes.size()); + parentInfo->parentJointIndex = _parentJointIndex; + destinationBuffer += sizeof(AvatarDataPacket::ParentInfo); + _lastSentParentID = parentID; + _lastSentParentJointIndex = _parentJointIndex; + } + + // If it is connected, pack up the data + if (hasFaceTrackerInfo) { + auto faceTrackerInfo = reinterpret_cast<AvatarDataPacket::FaceTrackerInfo*>(destinationBuffer); + + faceTrackerInfo->leftEyeBlink = _headData->_leftEyeBlink; + faceTrackerInfo->rightEyeBlink = _headData->_rightEyeBlink; + faceTrackerInfo->averageLoudness = _headData->_averageLoudness; + faceTrackerInfo->browAudioLift = _headData->_browAudioLift; + faceTrackerInfo->numBlendshapeCoefficients = _headData->_blendshapeCoefficients.size(); + destinationBuffer += sizeof(AvatarDataPacket::FaceTrackerInfo); + + // followed by a variable number of float coefficients + memcpy(destinationBuffer, _headData->_blendshapeCoefficients.data(), _headData->_blendshapeCoefficients.size() * sizeof(float)); + destinationBuffer += _headData->_blendshapeCoefficients.size() * sizeof(float); + } + + // If it is connected, pack up the data + if (hasJointData) { + QReadLocker readLock(&_jointDataLock); + + // joint rotation data + *destinationBuffer++ = _jointData.size(); + unsigned char* validityPosition = destinationBuffer; + unsigned char validity = 0; + int validityBit = 0; + +#ifdef WANT_DEBUG + int rotationSentCount = 0; + unsigned char* beforeRotations = destinationBuffer; +#endif + + _lastSentJointData.resize(_jointData.size()); + + for (int i = 0; i < _jointData.size(); i++) { + const JointData& data = _jointData[i]; + if (sendAll || _lastSentJointData[i].rotation != data.rotation) { + if (sendAll || + !cullSmallChanges || + fabsf(glm::dot(data.rotation, _lastSentJointData[i].rotation)) <= AVATAR_MIN_ROTATION_DOT) { + if (data.rotationSet) { + validity |= (1 << validityBit); +#ifdef WANT_DEBUG + rotationSentCount++; +#endif + } + } + } + if (++validityBit == BITS_IN_BYTE) { + *destinationBuffer++ = validity; + validityBit = validity = 0; + } + } + if (validityBit != 0) { + *destinationBuffer++ = validity; + } + + validityBit = 0; + validity = *validityPosition++; + for (int i = 0; i < _jointData.size(); i++) { + const JointData& data = _jointData[i]; + if (validity & (1 << validityBit)) { + destinationBuffer += packOrientationQuatToSixBytes(destinationBuffer, data.rotation); + } + if (++validityBit == BITS_IN_BYTE) { + validityBit = 0; + validity = *validityPosition++; + } + } + + + // joint translation data + validityPosition = destinationBuffer; + validity = 0; + validityBit = 0; + +#ifdef WANT_DEBUG + int translationSentCount = 0; + unsigned char* beforeTranslations = destinationBuffer; +#endif + + float maxTranslationDimension = 0.0; + for (int i = 0; i < _jointData.size(); i++) { + const JointData& data = _jointData[i]; + if (sendAll || _lastSentJointData[i].translation != data.translation) { + if (sendAll || + !cullSmallChanges || + glm::distance(data.translation, _lastSentJointData[i].translation) > AVATAR_MIN_TRANSLATION) { + if (data.translationSet) { + validity |= (1 << validityBit); +#ifdef WANT_DEBUG + translationSentCount++; +#endif + maxTranslationDimension = glm::max(fabsf(data.translation.x), maxTranslationDimension); + maxTranslationDimension = glm::max(fabsf(data.translation.y), maxTranslationDimension); + maxTranslationDimension = glm::max(fabsf(data.translation.z), maxTranslationDimension); + } + } + } + if (++validityBit == BITS_IN_BYTE) { + *destinationBuffer++ = validity; + validityBit = validity = 0; + } + } + + if (validityBit != 0) { + *destinationBuffer++ = validity; + } + + validityBit = 0; + validity = *validityPosition++; + for (int i = 0; i < _jointData.size(); i++) { + const JointData& data = _jointData[i]; + if (validity & (1 << validityBit)) { + destinationBuffer += + packFloatVec3ToSignedTwoByteFixed(destinationBuffer, data.translation, TRANSLATION_COMPRESSION_RADIX); + } + if (++validityBit == BITS_IN_BYTE) { + validityBit = 0; + validity = *validityPosition++; + } + } + + // faux joints + Transform controllerLeftHandTransform = Transform(getControllerLeftHandMatrix()); + destinationBuffer += packOrientationQuatToSixBytes(destinationBuffer, controllerLeftHandTransform.getRotation()); + destinationBuffer += packFloatVec3ToSignedTwoByteFixed(destinationBuffer, controllerLeftHandTransform.getTranslation(), + TRANSLATION_COMPRESSION_RADIX); + Transform controllerRightHandTransform = Transform(getControllerRightHandMatrix()); + destinationBuffer += packOrientationQuatToSixBytes(destinationBuffer, controllerRightHandTransform.getRotation()); + destinationBuffer += packFloatVec3ToSignedTwoByteFixed(destinationBuffer, controllerRightHandTransform.getTranslation(), + TRANSLATION_COMPRESSION_RADIX); + +#ifdef WANT_DEBUG + if (sendAll) { + qCDebug(avatars) << "AvatarData::toByteArray" << cullSmallChanges << sendAll + << "rotations:" << rotationSentCount << "translations:" << translationSentCount + << "largest:" << maxTranslationDimension + << "size:" + << (beforeRotations - startPosition) << "+" + << (beforeTranslations - beforeRotations) << "+" + << (destinationBuffer - beforeTranslations) << "=" + << (destinationBuffer - startPosition); + } +#endif + } + + return avatarDataByteArray.left(destinationBuffer - startPosition); +} + void AvatarData::doneEncoding(bool cullSmallChanges) { // The server has finished sending this version of the joint-data to other nodes. Update _lastSentJointData. QReadLocker readLock(&_jointDataLock); @@ -473,6 +880,11 @@ const unsigned char* unpackFauxJoint(const unsigned char* sourceBuffer, ThreadSa // read data in packet starting at byte offset and return number of bytes parsed int AvatarData::parseDataFromBuffer(const QByteArray& buffer) { + return parseDataFromBuffer_NEW(buffer); +} + +// read data in packet starting at byte offset and return number of bytes parsed +int AvatarData::parseDataFromBuffer_OLD(const QByteArray& buffer) { // lazily allocate memory for HeadData in case we're not an Avatar instance if (!_headData) { _headData = new HeadData(this); @@ -714,6 +1126,300 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) { return numBytesRead; } + +// read data in packet starting at byte offset and return number of bytes parsed +int AvatarData::parseDataFromBuffer_NEW(const QByteArray& buffer) { + // lazily allocate memory for HeadData in case we're not an Avatar instance + lazyInitHeadData(); + + AvatarDataPacket::HasFlags packetStateFlags; + + const unsigned char* startPosition = reinterpret_cast<const unsigned char*>(buffer.data()); + const unsigned char* endPosition = startPosition + buffer.size(); + const unsigned char* sourceBuffer = startPosition; + + // read the packet flags + memcpy(&packetStateFlags, sourceBuffer, sizeof(packetStateFlags)); + + #define HAS_FLAG(B,F) ((B & F) == F) + + bool hasAvatarGlobalPosition = HAS_FLAG(packetStateFlags, AvatarDataPacket::PACKET_HAS_AVATAR_GLOBAL_POSITION); + bool hasAvatarLocalPosition = HAS_FLAG(packetStateFlags, AvatarDataPacket::PACKET_HAS_AVATAR_LOCAL_POSITION); + bool hasAvatarDimensions = HAS_FLAG(packetStateFlags, AvatarDataPacket::PACKET_HAS_AVATAR_DIMENSIONS); + bool hasAvatarOrientation = HAS_FLAG(packetStateFlags, AvatarDataPacket::PACKET_HAS_AVATAR_ORIENTATION); + bool hasAvatarScale = HAS_FLAG(packetStateFlags, AvatarDataPacket::PACKET_HAS_AVATAR_SCALE); + bool hasLookAtPosition = HAS_FLAG(packetStateFlags, AvatarDataPacket::PACKET_HAS_LOOK_AT_POSITION); + bool hasAudioLoudness = HAS_FLAG(packetStateFlags, AvatarDataPacket::PACKET_HAS_AUDIO_LOUDNESS); + bool hasSensorToWorldMatrix = HAS_FLAG(packetStateFlags, AvatarDataPacket::PACKET_HAS_SENSOR_TO_WORLD_MATRIX); + bool hasAdditionalFlags = HAS_FLAG(packetStateFlags, AvatarDataPacket::PACKET_HAS_ADDITIONAL_FLAGS); + bool hasParentInfo = HAS_FLAG(packetStateFlags, AvatarDataPacket::PACKET_HAS_PARENT_INFO); + bool hasFaceTrackerInfo = HAS_FLAG(packetStateFlags, AvatarDataPacket::PACKET_HAS_FACE_TRACKER_INFO); + bool hasJointData = HAS_FLAG(packetStateFlags, AvatarDataPacket::PACKET_HAS_JOINT_DATA); + + sourceBuffer += sizeof(AvatarDataPacket::HasFlags); + + quint64 now = usecTimestampNow(); + + if (hasAvatarGlobalPosition) { + PACKET_READ_CHECK(AvatarGlobalPosition, sizeof(AvatarDataPacket::AvatarGlobalPosition)); + auto data = reinterpret_cast<const AvatarDataPacket::AvatarGlobalPosition*>(sourceBuffer); + _globalPosition = glm::vec3(data->globalPosition[0], data->globalPosition[1], data->globalPosition[2]); + sourceBuffer += sizeof(AvatarDataPacket::AvatarGlobalPosition); + } + + if (hasAvatarLocalPosition) { + PACKET_READ_CHECK(AvatarLocalPosition, sizeof(AvatarDataPacket::AvatarLocalPosition)); + auto data = reinterpret_cast<const AvatarDataPacket::AvatarLocalPosition*>(sourceBuffer); + glm::vec3 position = glm::vec3(data->localPosition[0], data->localPosition[1], data->localPosition[2]); + if (isNaN(position)) { + if (shouldLogError(now)) { + qCWarning(avatars) << "Discard AvatarData packet: position NaN, uuid " << getSessionUUID(); + } + return buffer.size(); + } + setLocalPosition(position); + sourceBuffer += sizeof(AvatarDataPacket::AvatarLocalPosition); + } + + if (hasAvatarDimensions) { + PACKET_READ_CHECK(AvatarDimensions, sizeof(AvatarDataPacket::AvatarDimensions)); + auto data = reinterpret_cast<const AvatarDataPacket::AvatarDimensions*>(sourceBuffer); + + // FIXME - this is suspicious looking! + _globalBoundingBoxCorner = glm::vec3(data->avatarDimensions[0], data->avatarDimensions[1], data->avatarDimensions[2]); + sourceBuffer += sizeof(AvatarDataPacket::AvatarDimensions); + } + + if (hasAvatarOrientation) { + PACKET_READ_CHECK(AvatarOrientation, sizeof(AvatarDataPacket::AvatarOrientation)); + auto data = reinterpret_cast<const AvatarDataPacket::AvatarOrientation*>(sourceBuffer); + float pitch, yaw, roll; + unpackFloatAngleFromTwoByte(data->localOrientation + 0, &yaw); + unpackFloatAngleFromTwoByte(data->localOrientation + 1, &pitch); + unpackFloatAngleFromTwoByte(data->localOrientation + 2, &roll); + if (isNaN(yaw) || isNaN(pitch) || isNaN(roll)) { + if (shouldLogError(now)) { + qCWarning(avatars) << "Discard AvatarData packet: localOriention is NaN, uuid " << getSessionUUID(); + } + return buffer.size(); + } + + glm::quat currentOrientation = getLocalOrientation(); + glm::vec3 newEulerAngles(pitch, yaw, roll); + glm::quat newOrientation = glm::quat(glm::radians(newEulerAngles)); + if (currentOrientation != newOrientation) { + _hasNewJointRotations = true; + setLocalOrientation(newOrientation); + } + sourceBuffer += sizeof(AvatarDataPacket::AvatarOrientation); + } + + if (hasAvatarScale) { + PACKET_READ_CHECK(AvatarScale, sizeof(AvatarDataPacket::AvatarScale)); + auto data = reinterpret_cast<const AvatarDataPacket::AvatarScale*>(sourceBuffer); + float scale; + unpackFloatRatioFromTwoByte((uint8_t*)&data->scale, scale); + if (isNaN(scale)) { + if (shouldLogError(now)) { + qCWarning(avatars) << "Discard AvatarData packet: scale NaN, uuid " << getSessionUUID(); + } + return buffer.size(); + } + setTargetScale(scale); + sourceBuffer += sizeof(AvatarDataPacket::AvatarScale); + } + + if (hasLookAtPosition) { + PACKET_READ_CHECK(LookAtPosition, sizeof(AvatarDataPacket::LookAtPosition)); + auto data = reinterpret_cast<const AvatarDataPacket::LookAtPosition*>(sourceBuffer); + glm::vec3 lookAt = glm::vec3(data->lookAtPosition[0], data->lookAtPosition[1], data->lookAtPosition[2]); + if (isNaN(lookAt)) { + if (shouldLogError(now)) { + qCWarning(avatars) << "Discard AvatarData packet: lookAtPosition is NaN, uuid " << getSessionUUID(); + } + return buffer.size(); + } + _headData->_lookAtPosition = lookAt; + sourceBuffer += sizeof(AvatarDataPacket::LookAtPosition); + } + + if (hasAudioLoudness) { + PACKET_READ_CHECK(AudioLoudness, sizeof(AvatarDataPacket::AudioLoudness)); + auto data = reinterpret_cast<const AvatarDataPacket::AudioLoudness*>(sourceBuffer); + float audioLoudness; + unpackFloatScalarFromSignedTwoByteFixed((int16_t*)&data->audioLoudness, &audioLoudness, AUDIO_LOUDNESS_RADIX); + + if (isNaN(audioLoudness)) { + if (shouldLogError(now)) { + qCWarning(avatars) << "Discard AvatarData packet: audioLoudness is NaN, uuid " << getSessionUUID(); + } + return buffer.size(); + } + _headData->_audioLoudness = audioLoudness; + sourceBuffer += sizeof(AvatarDataPacket::AudioLoudness); + } + + if (hasSensorToWorldMatrix) { + PACKET_READ_CHECK(SensorToWorldMatrix, sizeof(AvatarDataPacket::SensorToWorldMatrix)); + auto data = reinterpret_cast<const AvatarDataPacket::SensorToWorldMatrix*>(sourceBuffer); + glm::quat sensorToWorldQuat; + unpackOrientationQuatFromSixBytes(data->sensorToWorldQuat, sensorToWorldQuat); + float sensorToWorldScale; + unpackFloatScalarFromSignedTwoByteFixed((int16_t*)&data->sensorToWorldScale, &sensorToWorldScale, SENSOR_TO_WORLD_SCALE_RADIX); + glm::vec3 sensorToWorldTrans(data->sensorToWorldTrans[0], data->sensorToWorldTrans[1], data->sensorToWorldTrans[2]); + glm::mat4 sensorToWorldMatrix = createMatFromScaleQuatAndPos(glm::vec3(sensorToWorldScale), sensorToWorldQuat, sensorToWorldTrans); + _sensorToWorldMatrixCache.set(sensorToWorldMatrix); + sourceBuffer += sizeof(AvatarDataPacket::SensorToWorldMatrix); + } + + if (hasAdditionalFlags) { + PACKET_READ_CHECK(AdditionalFlags, sizeof(AvatarDataPacket::AdditionalFlags)); + auto data = reinterpret_cast<const AvatarDataPacket::AdditionalFlags*>(sourceBuffer); + uint8_t bitItems = data->flags; + + // key state, stored as a semi-nibble in the bitItems + _keyState = (KeyState)getSemiNibbleAt(bitItems, KEY_STATE_START_BIT); + + // hand state, stored as a semi-nibble plus a bit in the bitItems + // we store the hand state as well as other items in a shared bitset. The hand state is an octal, but is split + // into two sections to maintain backward compatibility. The bits are ordered as such (0-7 left to right). + // +---+-----+-----+--+ + // |x,x|H0,H1|x,x,x|H2| + // +---+-----+-----+--+ + // Hand state - H0,H1,H2 is found in the 3rd, 4th, and 8th bits + _handState = getSemiNibbleAt(bitItems, HAND_STATE_START_BIT) + + (oneAtBit(bitItems, HAND_STATE_FINGER_POINTING_BIT) ? IS_FINGER_POINTING_FLAG : 0); + + _headData->_isFaceTrackerConnected = oneAtBit(bitItems, IS_FACESHIFT_CONNECTED); + _headData->_isEyeTrackerConnected = oneAtBit(bitItems, IS_EYE_TRACKER_CONNECTED); + + } + + // FIXME -- make sure to handle the existance of a parent vs a change in the parent... + //bool hasReferential = oneAtBit(bitItems, HAS_REFERENTIAL); + if (hasParentInfo) { + PACKET_READ_CHECK(ParentInfo, sizeof(AvatarDataPacket::ParentInfo)); + auto parentInfo = reinterpret_cast<const AvatarDataPacket::ParentInfo*>(sourceBuffer); + sourceBuffer += sizeof(AvatarDataPacket::ParentInfo); + + QByteArray byteArray((const char*)parentInfo->parentUUID, NUM_BYTES_RFC4122_UUID); + _parentID = QUuid::fromRfc4122(byteArray); + _parentJointIndex = parentInfo->parentJointIndex; + } else { + _parentID = QUuid(); + } + + if (hasFaceTrackerInfo) { + PACKET_READ_CHECK(FaceTrackerInfo, sizeof(AvatarDataPacket::FaceTrackerInfo)); + auto faceTrackerInfo = reinterpret_cast<const AvatarDataPacket::FaceTrackerInfo*>(sourceBuffer); + sourceBuffer += sizeof(AvatarDataPacket::FaceTrackerInfo); + + _headData->_leftEyeBlink = faceTrackerInfo->leftEyeBlink; + _headData->_rightEyeBlink = faceTrackerInfo->rightEyeBlink; + _headData->_averageLoudness = faceTrackerInfo->averageLoudness; + _headData->_browAudioLift = faceTrackerInfo->browAudioLift; + + int numCoefficients = faceTrackerInfo->numBlendshapeCoefficients; + const int coefficientsSize = sizeof(float) * numCoefficients; + PACKET_READ_CHECK(FaceTrackerCoefficients, coefficientsSize); + _headData->_blendshapeCoefficients.resize(numCoefficients); // make sure there's room for the copy! + memcpy(_headData->_blendshapeCoefficients.data(), sourceBuffer, coefficientsSize); + sourceBuffer += coefficientsSize; + } + + if (hasJointData) { + PACKET_READ_CHECK(NumJoints, sizeof(uint8_t)); + int numJoints = *sourceBuffer++; + + const int bytesOfValidity = (int)ceil((float)numJoints / (float)BITS_IN_BYTE); + PACKET_READ_CHECK(JointRotationValidityBits, bytesOfValidity); + + int numValidJointRotations = 0; + QVector<bool> validRotations; + validRotations.resize(numJoints); + { // rotation validity bits + unsigned char validity = 0; + int validityBit = 0; + for (int i = 0; i < numJoints; i++) { + if (validityBit == 0) { + validity = *sourceBuffer++; + } + bool valid = (bool)(validity & (1 << validityBit)); + if (valid) { + ++numValidJointRotations; + } + validRotations[i] = valid; + validityBit = (validityBit + 1) % BITS_IN_BYTE; + } + } + + // each joint rotation is stored in 6 bytes. + QWriteLocker writeLock(&_jointDataLock); + _jointData.resize(numJoints); + + const int COMPRESSED_QUATERNION_SIZE = 6; + PACKET_READ_CHECK(JointRotations, numValidJointRotations * COMPRESSED_QUATERNION_SIZE); + for (int i = 0; i < numJoints; i++) { + JointData& data = _jointData[i]; + if (validRotations[i]) { + sourceBuffer += unpackOrientationQuatFromSixBytes(sourceBuffer, data.rotation); + _hasNewJointRotations = true; + data.rotationSet = true; + } + } + + PACKET_READ_CHECK(JointTranslationValidityBits, bytesOfValidity); + + // get translation validity bits -- these indicate which translations were packed + int numValidJointTranslations = 0; + QVector<bool> validTranslations; + validTranslations.resize(numJoints); + { // translation validity bits + unsigned char validity = 0; + int validityBit = 0; + for (int i = 0; i < numJoints; i++) { + if (validityBit == 0) { + validity = *sourceBuffer++; + } + bool valid = (bool)(validity & (1 << validityBit)); + if (valid) { + ++numValidJointTranslations; + } + validTranslations[i] = valid; + validityBit = (validityBit + 1) % BITS_IN_BYTE; + } + } // 1 + bytesOfValidity bytes + + // each joint translation component is stored in 6 bytes. + const int COMPRESSED_TRANSLATION_SIZE = 6; + PACKET_READ_CHECK(JointTranslation, numValidJointTranslations * COMPRESSED_TRANSLATION_SIZE); + + for (int i = 0; i < numJoints; i++) { + JointData& data = _jointData[i]; + if (validTranslations[i]) { + sourceBuffer += unpackFloatVec3FromSignedTwoByteFixed(sourceBuffer, data.translation, TRANSLATION_COMPRESSION_RADIX); + _hasNewJointTranslations = true; + data.translationSet = true; + } + } + +#ifdef WANT_DEBUG + if (numValidJointRotations > 15) { + qCDebug(avatars) << "RECEIVING -- rotations:" << numValidJointRotations + << "translations:" << numValidJointTranslations + << "size:" << (int)(sourceBuffer - startPosition); + } +#endif + // faux joints + sourceBuffer = unpackFauxJoint(sourceBuffer, _controllerLeftHandMatrixCache); + sourceBuffer = unpackFauxJoint(sourceBuffer, _controllerRightHandMatrixCache); + } + + int numBytesRead = sourceBuffer - startPosition; + _averageBytesReceived.updateAverage(numBytesRead); + return numBytesRead; +} + int AvatarData::getAverageBytesReceivedPerSecond() const { return lrint(_averageBytesReceived.getAverageSampleValuePerSecond()); } diff --git a/libraries/avatars/src/AvatarData.h b/libraries/avatars/src/AvatarData.h index 0a01cf9a9a..de34df4331 100644 --- a/libraries/avatars/src/AvatarData.h +++ b/libraries/avatars/src/AvatarData.h @@ -84,20 +84,169 @@ const quint32 AVATAR_MOTION_SCRIPTABLE_BITS = const qint64 AVATAR_SILENCE_THRESHOLD_USECS = 5 * USECS_PER_SECOND; + +// Bitset of state flags - we store the key state, hand state, Faceshift, eye tracking, and existence of +// referential data in this bit set. The hand state is an octal, but is split into two sections to maintain +// backward compatibility. The bits are ordered as such (0-7 left to right). +// +-----+-----+-+-+-+--+ +// |K0,K1|H0,H1|F|E|R|H2| +// +-----+-----+-+-+-+--+ +// Key state - K0,K1 is found in the 1st and 2nd bits +// Hand state - H0,H1,H2 is found in the 3rd, 4th, and 8th bits +// Faceshift - F is found in the 5th bit +// Eye tracker - E is found in the 6th bit +// Referential Data - R is found in the 7th bit +const int KEY_STATE_START_BIT = 0; // 1st and 2nd bits +const int HAND_STATE_START_BIT = 2; // 3rd and 4th bits +const int IS_FACESHIFT_CONNECTED = 4; // 5th bit +const int IS_EYE_TRACKER_CONNECTED = 5; // 6th bit (was CHAT_CIRCLING) +const int HAS_REFERENTIAL = 6; // 7th bit +const int HAND_STATE_FINGER_POINTING_BIT = 7; // 8th bit + + +const char HAND_STATE_NULL = 0; +const char LEFT_HAND_POINTING_FLAG = 1; +const char RIGHT_HAND_POINTING_FLAG = 2; +const char IS_FINGER_POINTING_FLAG = 4; + +// AvatarData state flags - we store the details about the packet encoding in the first byte, +// before the "header" structure +const char AVATARDATA_FLAGS_MINIMUM = 0; + +using smallFloat = uint16_t; // a compressed float with less precision, user defined radix + namespace AvatarDataPacket { + // Packet State Flags - we store the details about the existence of other records in this bitset: + // AvatarGlobalPosition, Avatar Faceshift, eye tracking, and existence of + using HasFlags = uint16_t; + const HasFlags PACKET_HAS_AVATAR_GLOBAL_POSITION = 1U << 0; + const HasFlags PACKET_HAS_AVATAR_LOCAL_POSITION = 1U << 1; // FIXME - can this be in the PARENT_INFO?? + const HasFlags PACKET_HAS_AVATAR_DIMENSIONS = 1U << 2; + const HasFlags PACKET_HAS_AVATAR_ORIENTATION = 1U << 3; + const HasFlags PACKET_HAS_AVATAR_SCALE = 1U << 4; + const HasFlags PACKET_HAS_LOOK_AT_POSITION = 1U << 5; + const HasFlags PACKET_HAS_AUDIO_LOUDNESS = 1U << 6; + const HasFlags PACKET_HAS_SENSOR_TO_WORLD_MATRIX = 1U << 7; + const HasFlags PACKET_HAS_ADDITIONAL_FLAGS = 1U << 8; + const HasFlags PACKET_HAS_PARENT_INFO = 1U << 9; + const HasFlags PACKET_HAS_FACE_TRACKER_INFO = 1U << 10; + const HasFlags PACKET_HAS_JOINT_DATA = 1U << 11; // NOTE: AvatarDataPackets start with a uint16_t sequence number that is not reflected in the Header structure. PACKED_BEGIN struct Header { - uint8_t packetStateFlags; // state flags, currently used to indicate if the packet is a minimal or fuller packet + HasFlags packetHasFlags; // state flags, indicated which additional records are included in the packet + // bit 0 - has AvatarGlobalPosition + // bit 1 - has AvatarLocalPosition + // bit 2 - has AvatarDimensions + // bit 3 - has AvatarOrientation + // bit 4 - has AvatarScale + // bit 5 - has LookAtPosition + // bit 6 - has AudioLoudness + // bit 7 - has SensorToWorldMatrix + // bit 8 - has AdditionalFlags + // bit 9 - has ParentInfo + // bit 10 - has FaceTrackerInfo + // bit 11 - has JointData } PACKED_END; - const size_t HEADER_SIZE = 1; + const size_t HEADER_SIZE = 2; - PACKED_BEGIN struct MinimalAvatarInfo { + PACKED_BEGIN struct AvatarGlobalPosition { float globalPosition[3]; // avatar's position } PACKED_END; - const size_t MINIMAL_AVATAR_INFO_SIZE = 12; + const size_t AVATAR_GLOBAL_POSITION_SIZE = 12; + PACKED_BEGIN struct AvatarLocalPosition { + float localPosition[3]; // this appears to be the avatar local position?? + // this is a reduced precision radix + // FIXME - could this be changed into compressed floats? + } PACKED_END; + const size_t AVATAR_LOCAL_POSITION_SIZE = 12; + + PACKED_BEGIN struct AvatarDimensions { + float avatarDimensions[3]; // avatar's bounding box in world space units, but relative to the + // position. Assumed to be centered around the world position + // FIXME - could this be changed into compressed floats? + } PACKED_END; + const size_t AVATAR_DIMENSIONS_SIZE = 12; + + + PACKED_BEGIN struct AvatarOrientation { + smallFloat localOrientation[3]; // avatar's local euler angles (degrees, compressed) relative to the + // thing it's attached to, or world relative if not attached + } PACKED_END; + const size_t AVATAR_ORIENTATION_SIZE = 6; + + PACKED_BEGIN struct AvatarScale { + smallFloat scale; // avatar's scale, (compressed) 'ratio' encoding uses sign bit as flag. + } PACKED_END; + const size_t AVATAR_SCALE_SIZE = 2; + + PACKED_BEGIN struct LookAtPosition { + float lookAtPosition[3]; // world space position that eyes are focusing on. + // FIXME - unless the person has an eye tracker, this is simulated... + // a) maybe we can just have the client calculate this + // b) at distance this will be hard to discern and can likely be + // descimated or dropped completely + // + // POTENTIAL SAVINGS - 12 bytes + } PACKED_END; + const size_t LOOK_AT_POSITION_SIZE = 12; + + PACKED_BEGIN struct AudioLoudness { + smallFloat audioLoudness; // current loudness of microphone, (compressed) + } PACKED_END; + const size_t AUDIO_LOUDNESS_SIZE = 2; + + PACKED_BEGIN struct SensorToWorldMatrix { + // FIXME - these 20 bytes are only used by viewers if my avatar has "attachments" + // we could save these bytes if no attachments are active. + // + // POTENTIAL SAVINGS - 20 bytes + + uint8_t sensorToWorldQuat[6]; // 6 byte compressed quaternion part of sensor to world matrix + uint16_t sensorToWorldScale; // uniform scale of sensor to world matrix + float sensorToWorldTrans[3]; // fourth column of sensor to world matrix + // FIXME - sensorToWorldTrans might be able to be better compressed if it was + // relative to the avatar position. + } PACKED_END; + const size_t SENSOR_TO_WORLD_SIZE = 20; + + PACKED_BEGIN struct AdditionalFlags { + uint8_t flags; // additional flags: hand state, key state, eye tracking + } PACKED_END; + const size_t ADDITIONAL_FLAGS_SIZE = 1; + + // only present if HAS_REFERENTIAL flag is set in AvatarInfo.flags + PACKED_BEGIN struct ParentInfo { + uint8_t parentUUID[16]; // rfc 4122 encoded + uint16_t parentJointIndex; + } PACKED_END; + const size_t PARENT_INFO_SIZE = 18; + + // only present if IS_FACESHIFT_CONNECTED flag is set in AvatarInfo.flags + PACKED_BEGIN struct FaceTrackerInfo { + float leftEyeBlink; + float rightEyeBlink; + float averageLoudness; + float browAudioLift; + uint8_t numBlendshapeCoefficients; + // float blendshapeCoefficients[numBlendshapeCoefficients]; + } PACKED_END; + const size_t FACE_TRACKER_INFO_SIZE = 17; + + // variable length structure follows + /* + struct JointData { + uint8_t numJoints; + uint8_t rotationValidityBits[ceil(numJoints / 8)]; // one bit per joint, if true then a compressed rotation follows. + SixByteQuat rotation[numValidRotations]; // encodeded and compressed by packOrientationQuatToSixBytes() + uint8_t translationValidityBits[ceil(numJoints / 8)]; // one bit per joint, if true then a compressed translation follows. + SixByteTrans translation[numValidTranslations]; // encodeded and compressed by packFloatVec3ToSignedTwoByteFixed() + }; + */ + + // OLD FORMAT.... PACKED_BEGIN struct AvatarInfo { // FIXME - this has 8 unqiue items, we could use a simple header byte to indicate whether or not the fields // exist in the packet and have changed since last being sent. @@ -156,66 +305,8 @@ namespace AvatarDataPacket { uint8_t flags; } PACKED_END; const size_t AVATAR_INFO_SIZE = 79; - - // only present if HAS_REFERENTIAL flag is set in AvatarInfo.flags - PACKED_BEGIN struct ParentInfo { - uint8_t parentUUID[16]; // rfc 4122 encoded - uint16_t parentJointIndex; - } PACKED_END; - const size_t PARENT_INFO_SIZE = 18; - - // only present if IS_FACESHIFT_CONNECTED flag is set in AvatarInfo.flags - PACKED_BEGIN struct FaceTrackerInfo { - float leftEyeBlink; - float rightEyeBlink; - float averageLoudness; - float browAudioLift; - uint8_t numBlendshapeCoefficients; - // float blendshapeCoefficients[numBlendshapeCoefficients]; - } PACKED_END; - const size_t FACE_TRACKER_INFO_SIZE = 17; - - // variable length structure follows - /* - struct JointData { - uint8_t numJoints; - uint8_t rotationValidityBits[ceil(numJoints / 8)]; // one bit per joint, if true then a compressed rotation follows. - SixByteQuat rotation[numValidRotations]; // encodeded and compressed by packOrientationQuatToSixBytes() - uint8_t translationValidityBits[ceil(numJoints / 8)]; // one bit per joint, if true then a compressed translation follows. - SixByteTrans translation[numValidTranslations]; // encodeded and compressed by packFloatVec3ToSignedTwoByteFixed() - }; - */ } - -// Bitset of state flags - we store the key state, hand state, Faceshift, eye tracking, and existence of -// referential data in this bit set. The hand state is an octal, but is split into two sections to maintain -// backward compatibility. The bits are ordered as such (0-7 left to right). -// +-----+-----+-+-+-+--+ -// |K0,K1|H0,H1|F|E|R|H2| -// +-----+-----+-+-+-+--+ -// Key state - K0,K1 is found in the 1st and 2nd bits -// Hand state - H0,H1,H2 is found in the 3rd, 4th, and 8th bits -// Faceshift - F is found in the 5th bit -// Eye tracker - E is found in the 6th bit -// Referential Data - R is found in the 7th bit -const int KEY_STATE_START_BIT = 0; // 1st and 2nd bits -const int HAND_STATE_START_BIT = 2; // 3rd and 4th bits -const int IS_FACESHIFT_CONNECTED = 4; // 5th bit -const int IS_EYE_TRACKER_CONNECTED = 5; // 6th bit (was CHAT_CIRCLING) -const int HAS_REFERENTIAL = 6; // 7th bit -const int HAND_STATE_FINGER_POINTING_BIT = 7; // 8th bit - -const char HAND_STATE_NULL = 0; -const char LEFT_HAND_POINTING_FLAG = 1; -const char RIGHT_HAND_POINTING_FLAG = 2; -const char IS_FINGER_POINTING_FLAG = 4; - -// AvatarData state flags - we store the details about the packet encoding in the first byte, -// before the "header" structure -const char AVATARDATA_FLAGS_MINIMUM = 0; - - static const float MAX_AVATAR_SCALE = 1000.0f; static const float MIN_AVATAR_SCALE = .005f; @@ -512,6 +603,29 @@ public slots: float getTargetScale() { return _targetScale; } protected: + void lazyInitHeadData(); + + bool avatarLocalPositionChanged(); + bool avatarDimensionsChanged(); + bool avatarOrientationChanged(); + bool avatarScaleChanged(); + bool lookAtPositionChanged(); + bool audioLoudnessChanged(); + bool sensorToWorldMatrixChanged(); + bool additionalFlagsChanged(); + + bool hasParent() { return !getParentID().isNull(); } + bool parentInfoChanged(); + + bool hasFaceTracker() { return _headData ? _headData->_isFaceTrackerConnected : false; } + bool faceTrackerInfoChanged(); + + QByteArray toByteArray_OLD(AvatarDataDetail dataDetail); + QByteArray toByteArray_NEW(AvatarDataDetail dataDetail); + + int parseDataFromBuffer_OLD(const QByteArray& buffer); + int parseDataFromBuffer_NEW(const QByteArray& buffer); + glm::vec3 _handPosition; virtual const QString& getSessionDisplayNameForTransport() const { return _sessionDisplayName; } virtual void maybeUpdateSessionDisplayNameFromTransport(const QString& sessionDisplayName) { } // No-op in AvatarMixer @@ -571,7 +685,21 @@ protected: // _globalPosition is sent along with localPosition + parent because the avatar-mixer doesn't know // where Entities are located. This is currently only used by the mixer to decide how often to send // updates about one avatar to another. - glm::vec3 _globalPosition; + glm::vec3 _globalPosition { 0, 0, 0 }; + + glm::vec3 _lastSentGlobalPosition { 0, 0, 0 }; + glm::vec3 _lastSentLocalPosition { 0, 0, 0 }; + glm::vec3 _lastSentAvatarDimensions { 0, 0, 0 }; + glm::quat _lastSentLocalOrientation; + float _lastSentScale { 0 }; + glm::vec3 _lastSentLookAt { 0, 0, 0 }; + float _lastSentAudioLoudness { 0 }; + glm::mat4 _lastSentSensorToWorldMatrix; + uint8_t _lastSentAdditionalFlags { 0 }; + QUuid _lastSentParentID; + quint16 _lastSentParentJointIndex { -1 }; + + glm::vec3 _globalBoundingBoxCorner; mutable ReadWriteLockable _avatarEntitiesLock; diff --git a/libraries/networking/src/udt/PacketHeaders.cpp b/libraries/networking/src/udt/PacketHeaders.cpp index 8c43aa2bc4..17d78e9f3d 100644 --- a/libraries/networking/src/udt/PacketHeaders.cpp +++ b/libraries/networking/src/udt/PacketHeaders.cpp @@ -53,7 +53,7 @@ PacketVersion versionForPacketType(PacketType packetType) { case PacketType::AvatarData: case PacketType::BulkAvatarData: case PacketType::KillAvatar: - return static_cast<PacketVersion>(AvatarMixerPacketVersion::SessionDisplayName); + return static_cast<PacketVersion>(AvatarMixerPacketVersion::VariableAvatarData); case PacketType::ICEServerHeartbeat: return 18; // ICE Server Heartbeat signing case PacketType::AssetGetInfo: diff --git a/libraries/networking/src/udt/PacketHeaders.h b/libraries/networking/src/udt/PacketHeaders.h index 162e565b83..27cdf4abbe 100644 --- a/libraries/networking/src/udt/PacketHeaders.h +++ b/libraries/networking/src/udt/PacketHeaders.h @@ -207,7 +207,8 @@ enum class AvatarMixerPacketVersion : PacketVersion { SensorToWorldMat, HandControllerJoints, HasKillAvatarReason, - SessionDisplayName + SessionDisplayName, + VariableAvatarData }; enum class DomainConnectRequestVersion : PacketVersion { From 62b4a132a0d1bd1d7b615bfb9669e5f03219dfbd Mon Sep 17 00:00:00 2001 From: Brad Hefta-Gaub <brad@highfidelity.io> Date: Fri, 23 Dec 2016 09:02:13 -0800 Subject: [PATCH 04/43] more hacking --- libraries/avatars/src/AvatarData.cpp | 45 ++++++++++++++++++++-------- 1 file changed, 33 insertions(+), 12 deletions(-) diff --git a/libraries/avatars/src/AvatarData.cpp b/libraries/avatars/src/AvatarData.cpp index 9e5dc4ab96..be8a8bf87f 100644 --- a/libraries/avatars/src/AvatarData.cpp +++ b/libraries/avatars/src/AvatarData.cpp @@ -510,6 +510,7 @@ QByteArray AvatarData::toByteArray_NEW(AvatarDataDetail dataDetail) { // - this toByteArray() side-effects the AvatarData, is that safe? in particular // is it possible we'll call toByteArray() and then NOT actually use the result? + bool hasAvatarGlobalPosition = true; // always include global position bool hasAvatarLocalPosition = sendAll || avatarLocalPositionChanged(); bool hasAvatarDimensions = sendAll || avatarDimensionsChanged(); @@ -523,20 +524,26 @@ QByteArray AvatarData::toByteArray_NEW(AvatarDataDetail dataDetail) { bool hasFaceTrackerInfo = hasFaceTracker() && (sendAll || faceTrackerInfoChanged()); bool hasJointData = !sendMinimum; + //qDebug() << __FUNCTION__ << "sendAll:" << sendAll; + //qDebug() << "hasAvatarGlobalPosition:" << hasAvatarGlobalPosition; + //qDebug() << "hasAvatarOrientation:" << hasAvatarOrientation; + // Leading flags, to indicate how much data is actually included in the packet... AvatarDataPacket::HasFlags packetStateFlags = - (hasAvatarGlobalPosition && AvatarDataPacket::PACKET_HAS_AVATAR_GLOBAL_POSITION) - | (hasAvatarLocalPosition && AvatarDataPacket::PACKET_HAS_AVATAR_LOCAL_POSITION) - | (hasAvatarDimensions && AvatarDataPacket::PACKET_HAS_AVATAR_DIMENSIONS) - | (hasAvatarOrientation && AvatarDataPacket::PACKET_HAS_AVATAR_ORIENTATION) - | (hasAvatarScale && AvatarDataPacket::PACKET_HAS_AVATAR_SCALE) - | (hasLookAtPosition && AvatarDataPacket::PACKET_HAS_LOOK_AT_POSITION) - | (hasAudioLoudness && AvatarDataPacket::PACKET_HAS_AUDIO_LOUDNESS) - | (hasSensorToWorldMatrix && AvatarDataPacket::PACKET_HAS_SENSOR_TO_WORLD_MATRIX) - | (hasAdditionalFlags && AvatarDataPacket::PACKET_HAS_ADDITIONAL_FLAGS) - | (hasParentInfo && AvatarDataPacket::PACKET_HAS_PARENT_INFO) - | (hasFaceTrackerInfo && AvatarDataPacket::PACKET_HAS_FACE_TRACKER_INFO) - | (hasJointData && AvatarDataPacket::PACKET_HAS_JOINT_DATA); + (hasAvatarGlobalPosition ? AvatarDataPacket::PACKET_HAS_AVATAR_GLOBAL_POSITION : 0) + | (hasAvatarLocalPosition ? AvatarDataPacket::PACKET_HAS_AVATAR_LOCAL_POSITION : 0) + | (hasAvatarDimensions ? AvatarDataPacket::PACKET_HAS_AVATAR_DIMENSIONS : 0) + | (hasAvatarOrientation ? AvatarDataPacket::PACKET_HAS_AVATAR_ORIENTATION : 0) + | (hasAvatarScale ? AvatarDataPacket::PACKET_HAS_AVATAR_SCALE : 0) + | (hasLookAtPosition ? AvatarDataPacket::PACKET_HAS_LOOK_AT_POSITION : 0) + | (hasAudioLoudness ? AvatarDataPacket::PACKET_HAS_AUDIO_LOUDNESS : 0) + | (hasSensorToWorldMatrix ? AvatarDataPacket::PACKET_HAS_SENSOR_TO_WORLD_MATRIX : 0) + | (hasAdditionalFlags ? AvatarDataPacket::PACKET_HAS_ADDITIONAL_FLAGS : 0) + | (hasParentInfo ? AvatarDataPacket::PACKET_HAS_PARENT_INFO : 0) + | (hasFaceTrackerInfo ? AvatarDataPacket::PACKET_HAS_FACE_TRACKER_INFO : 0) + | (hasJointData ? AvatarDataPacket::PACKET_HAS_JOINT_DATA : 0); + + //qDebug() << __FUNCTION__ << "packetStateFlags:" << packetStateFlags; memcpy(destinationBuffer, &packetStateFlags, sizeof(packetStateFlags)); destinationBuffer += sizeof(packetStateFlags); @@ -548,6 +555,8 @@ QByteArray AvatarData::toByteArray_NEW(AvatarDataDetail dataDetail) { data->globalPosition[2] = _globalPosition.z; destinationBuffer += sizeof(AvatarDataPacket::AvatarGlobalPosition); _lastSentGlobalPosition = _globalPosition; + + //qDebug() << "hasAvatarGlobalPosition _globalPosition:" << _globalPosition; } // FIXME - I was told by tony this was "skeletal model position"-- but it seems to be @@ -1165,6 +1174,7 @@ int AvatarData::parseDataFromBuffer_NEW(const QByteArray& buffer) { auto data = reinterpret_cast<const AvatarDataPacket::AvatarGlobalPosition*>(sourceBuffer); _globalPosition = glm::vec3(data->globalPosition[0], data->globalPosition[1], data->globalPosition[2]); sourceBuffer += sizeof(AvatarDataPacket::AvatarGlobalPosition); + //qDebug() << "hasAvatarGlobalPosition _globalPosition:" << _globalPosition; } if (hasAvatarLocalPosition) { @@ -1179,6 +1189,7 @@ int AvatarData::parseDataFromBuffer_NEW(const QByteArray& buffer) { } setLocalPosition(position); sourceBuffer += sizeof(AvatarDataPacket::AvatarLocalPosition); + //qDebug() << "hasAvatarLocalPosition position:" << position; } if (hasAvatarDimensions) { @@ -1188,6 +1199,7 @@ int AvatarData::parseDataFromBuffer_NEW(const QByteArray& buffer) { // FIXME - this is suspicious looking! _globalBoundingBoxCorner = glm::vec3(data->avatarDimensions[0], data->avatarDimensions[1], data->avatarDimensions[2]); sourceBuffer += sizeof(AvatarDataPacket::AvatarDimensions); + //qDebug() << "hasAvatarDimensions _globalBoundingBoxCorner:" << _globalBoundingBoxCorner; } if (hasAvatarOrientation) { @@ -1212,6 +1224,7 @@ int AvatarData::parseDataFromBuffer_NEW(const QByteArray& buffer) { setLocalOrientation(newOrientation); } sourceBuffer += sizeof(AvatarDataPacket::AvatarOrientation); + //qDebug() << "hasAvatarOrientation newOrientation:" << newOrientation; } if (hasAvatarScale) { @@ -1227,6 +1240,7 @@ int AvatarData::parseDataFromBuffer_NEW(const QByteArray& buffer) { } setTargetScale(scale); sourceBuffer += sizeof(AvatarDataPacket::AvatarScale); + //qDebug() << "hasAvatarOrientation scale:" << scale; } if (hasLookAtPosition) { @@ -1241,6 +1255,7 @@ int AvatarData::parseDataFromBuffer_NEW(const QByteArray& buffer) { } _headData->_lookAtPosition = lookAt; sourceBuffer += sizeof(AvatarDataPacket::LookAtPosition); + //qDebug() << "hasLookAtPosition lookAt:" << lookAt; } if (hasAudioLoudness) { @@ -1257,6 +1272,7 @@ int AvatarData::parseDataFromBuffer_NEW(const QByteArray& buffer) { } _headData->_audioLoudness = audioLoudness; sourceBuffer += sizeof(AvatarDataPacket::AudioLoudness); + //qDebug() << "hasAudioLoudness audioLoudness:" << audioLoudness; } if (hasSensorToWorldMatrix) { @@ -1270,6 +1286,7 @@ int AvatarData::parseDataFromBuffer_NEW(const QByteArray& buffer) { glm::mat4 sensorToWorldMatrix = createMatFromScaleQuatAndPos(glm::vec3(sensorToWorldScale), sensorToWorldQuat, sensorToWorldTrans); _sensorToWorldMatrixCache.set(sensorToWorldMatrix); sourceBuffer += sizeof(AvatarDataPacket::SensorToWorldMatrix); + //qDebug() << "hasSensorToWorldMatrix sensorToWorldMatrix:" << sensorToWorldMatrix; } if (hasAdditionalFlags) { @@ -1293,6 +1310,7 @@ int AvatarData::parseDataFromBuffer_NEW(const QByteArray& buffer) { _headData->_isFaceTrackerConnected = oneAtBit(bitItems, IS_FACESHIFT_CONNECTED); _headData->_isEyeTrackerConnected = oneAtBit(bitItems, IS_EYE_TRACKER_CONNECTED); + //qDebug() << "hasAdditionalFlags bitItems:" << bitItems; } // FIXME -- make sure to handle the existance of a parent vs a change in the parent... @@ -1305,6 +1323,7 @@ int AvatarData::parseDataFromBuffer_NEW(const QByteArray& buffer) { QByteArray byteArray((const char*)parentInfo->parentUUID, NUM_BYTES_RFC4122_UUID); _parentID = QUuid::fromRfc4122(byteArray); _parentJointIndex = parentInfo->parentJointIndex; + //qDebug() << "hasParentInfo _parentID:" << _parentID; } else { _parentID = QUuid(); } @@ -1325,11 +1344,13 @@ int AvatarData::parseDataFromBuffer_NEW(const QByteArray& buffer) { _headData->_blendshapeCoefficients.resize(numCoefficients); // make sure there's room for the copy! memcpy(_headData->_blendshapeCoefficients.data(), sourceBuffer, coefficientsSize); sourceBuffer += coefficientsSize; + //qDebug() << "hasFaceTrackerInfo numCoefficients:" << numCoefficients; } if (hasJointData) { PACKET_READ_CHECK(NumJoints, sizeof(uint8_t)); int numJoints = *sourceBuffer++; + //qDebug() << "hasJointData numJoints:" << numJoints; const int bytesOfValidity = (int)ceil((float)numJoints / (float)BITS_IN_BYTE); PACKET_READ_CHECK(JointRotationValidityBits, bytesOfValidity); From 01d48a5e27dc4e26391ed6e5f53be3b3f23d1e4e Mon Sep 17 00:00:00 2001 From: Brad Hefta-Gaub <brad@highfidelity.io> Date: Fri, 23 Dec 2016 10:18:48 -0800 Subject: [PATCH 05/43] grrr --- libraries/avatars/src/AvatarData.cpp | 47 +++++++++++++++++++++++++--- 1 file changed, 42 insertions(+), 5 deletions(-) diff --git a/libraries/avatars/src/AvatarData.cpp b/libraries/avatars/src/AvatarData.cpp index be8a8bf87f..d5bf4ee5d1 100644 --- a/libraries/avatars/src/AvatarData.cpp +++ b/libraries/avatars/src/AvatarData.cpp @@ -510,6 +510,7 @@ QByteArray AvatarData::toByteArray_NEW(AvatarDataDetail dataDetail) { // - this toByteArray() side-effects the AvatarData, is that safe? in particular // is it possible we'll call toByteArray() and then NOT actually use the result? + sendAll = true; bool hasAvatarGlobalPosition = true; // always include global position bool hasAvatarLocalPosition = sendAll || avatarLocalPositionChanged(); @@ -522,7 +523,7 @@ QByteArray AvatarData::toByteArray_NEW(AvatarDataDetail dataDetail) { bool hasAdditionalFlags = sendAll || additionalFlagsChanged(); bool hasParentInfo = hasParent() && (sendAll || parentInfoChanged()); bool hasFaceTrackerInfo = hasFaceTracker() && (sendAll || faceTrackerInfoChanged()); - bool hasJointData = !sendMinimum; + bool hasJointData = sendAll || !sendMinimum; //qDebug() << __FUNCTION__ << "sendAll:" << sendAll; //qDebug() << "hasAvatarGlobalPosition:" << hasAvatarGlobalPosition; @@ -610,6 +611,7 @@ QByteArray AvatarData::toByteArray_NEW(AvatarDataDetail dataDetail) { data->lookAtPosition[2] = lookAt.z; destinationBuffer += sizeof(AvatarDataPacket::LookAtPosition); _lastSentLookAt = lookAt; + //qDebug() << "hasLookAtPosition lookAt:" << lookAt; } if (hasAudioLoudness) { @@ -618,6 +620,7 @@ QByteArray AvatarData::toByteArray_NEW(AvatarDataDetail dataDetail) { packFloatScalarToSignedTwoByteFixed((uint8_t*)&data->audioLoudness, audioLoudness, AUDIO_LOUDNESS_RADIX); destinationBuffer += sizeof(AvatarDataPacket::AudioLoudness); _lastSentAudioLoudness = audioLoudness; + //qDebug() << "hasAudioLoudness audioLoudness:" << audioLoudness; } if (hasSensorToWorldMatrix) { @@ -663,6 +666,12 @@ QByteArray AvatarData::toByteArray_NEW(AvatarDataDetail dataDetail) { data->flags = flags; destinationBuffer += sizeof(AvatarDataPacket::AdditionalFlags); _lastSentAdditionalFlags = flags; + + //qDebug() << "hasAdditionalFlags _keyState:" << _keyState; + //qDebug() << "hasAdditionalFlags _handState:" << _handState; + //qDebug() << "hasAdditionalFlags _isFaceTrackerConnected:" << _headData->_isFaceTrackerConnected; + //qDebug() << "hasAdditionalFlags _isEyeTrackerConnected:" << _headData->_isEyeTrackerConnected; + //qDebug() << "hasAdditionalFlags bitItems:" << flags; } if (hasParentInfo) { @@ -695,8 +704,16 @@ QByteArray AvatarData::toByteArray_NEW(AvatarDataDetail dataDetail) { if (hasJointData) { QReadLocker readLock(&_jointDataLock); + int rotationSentCount = 0; + int translationSentCount = 0; + + // joint rotation data - *destinationBuffer++ = _jointData.size(); + int numJoints = _jointData.size(); + *destinationBuffer++ = (uint8_t)numJoints; + + //qDebug() << "hasJointData numJoints:" << numJoints; + unsigned char* validityPosition = destinationBuffer; unsigned char validity = 0; int validityBit = 0; @@ -716,7 +733,7 @@ QByteArray AvatarData::toByteArray_NEW(AvatarDataDetail dataDetail) { fabsf(glm::dot(data.rotation, _lastSentJointData[i].rotation)) <= AVATAR_MIN_ROTATION_DOT) { if (data.rotationSet) { validity |= (1 << validityBit); -#ifdef WANT_DEBUG +#if 1 //def WANT_DEBUG rotationSentCount++; #endif } @@ -764,7 +781,7 @@ QByteArray AvatarData::toByteArray_NEW(AvatarDataDetail dataDetail) { glm::distance(data.translation, _lastSentJointData[i].translation) > AVATAR_MIN_TRANSLATION) { if (data.translationSet) { validity |= (1 << validityBit); -#ifdef WANT_DEBUG +#if 1 //def WANT_DEBUG translationSentCount++; #endif maxTranslationDimension = glm::max(fabsf(data.translation.x), maxTranslationDimension); @@ -807,6 +824,9 @@ QByteArray AvatarData::toByteArray_NEW(AvatarDataDetail dataDetail) { destinationBuffer += packFloatVec3ToSignedTwoByteFixed(destinationBuffer, controllerRightHandTransform.getTranslation(), TRANSLATION_COMPRESSION_RADIX); + //qDebug() << "hasJointData rotationSentCount:" << rotationSentCount << "translationSentCount:" << translationSentCount; + + #ifdef WANT_DEBUG if (sendAll) { qCDebug(avatars) << "AvatarData::toByteArray" << cullSmallChanges << sendAll @@ -821,6 +841,8 @@ QByteArray AvatarData::toByteArray_NEW(AvatarDataDetail dataDetail) { #endif } + int avatarDataSize = destinationBuffer - startPosition; + //qDebug() << "avatarDataSize:" << avatarDataSize; return avatarDataByteArray.left(destinationBuffer - startPosition); } @@ -1149,6 +1171,12 @@ int AvatarData::parseDataFromBuffer_NEW(const QByteArray& buffer) { // read the packet flags memcpy(&packetStateFlags, sourceBuffer, sizeof(packetStateFlags)); + sourceBuffer += sizeof(packetStateFlags); + + //qDebug() << __FUNCTION__ << "packetStateFlags:" << packetStateFlags; + //qDebug() << "buffer size:" << buffer.size(); + + #define HAS_FLAG(B,F) ((B & F) == F) @@ -1165,7 +1193,6 @@ int AvatarData::parseDataFromBuffer_NEW(const QByteArray& buffer) { bool hasFaceTrackerInfo = HAS_FLAG(packetStateFlags, AvatarDataPacket::PACKET_HAS_FACE_TRACKER_INFO); bool hasJointData = HAS_FLAG(packetStateFlags, AvatarDataPacket::PACKET_HAS_JOINT_DATA); - sourceBuffer += sizeof(AvatarDataPacket::HasFlags); quint64 now = usecTimestampNow(); @@ -1307,10 +1334,17 @@ int AvatarData::parseDataFromBuffer_NEW(const QByteArray& buffer) { _handState = getSemiNibbleAt(bitItems, HAND_STATE_START_BIT) + (oneAtBit(bitItems, HAND_STATE_FINGER_POINTING_BIT) ? IS_FINGER_POINTING_FLAG : 0); + _headData->_isFaceTrackerConnected = oneAtBit(bitItems, IS_FACESHIFT_CONNECTED); _headData->_isEyeTrackerConnected = oneAtBit(bitItems, IS_EYE_TRACKER_CONNECTED); + //qDebug() << "hasAdditionalFlags _keyState:" << _keyState; + //qDebug() << "hasAdditionalFlags _handState:" << _handState; + //qDebug() << "hasAdditionalFlags _isFaceTrackerConnected:" << _headData->_isFaceTrackerConnected; + //qDebug() << "hasAdditionalFlags _isEyeTrackerConnected:" << _headData->_isEyeTrackerConnected; + //qDebug() << "hasAdditionalFlags bitItems:" << bitItems; + sourceBuffer += sizeof(AvatarDataPacket::AdditionalFlags); } // FIXME -- make sure to handle the existance of a parent vs a change in the parent... @@ -1434,6 +1468,9 @@ int AvatarData::parseDataFromBuffer_NEW(const QByteArray& buffer) { // faux joints sourceBuffer = unpackFauxJoint(sourceBuffer, _controllerLeftHandMatrixCache); sourceBuffer = unpackFauxJoint(sourceBuffer, _controllerRightHandMatrixCache); + + //qDebug() << "hasJointData numValidJointRotations:" << numValidJointRotations << "numValidJointTranslations:" << numValidJointTranslations; + } int numBytesRead = sourceBuffer - startPosition; From 474cb604f1acdbb6b46833a80132d41302cb4255 Mon Sep 17 00:00:00 2001 From: Brad Hefta-Gaub <brad@highfidelity.io> Date: Fri, 23 Dec 2016 10:43:34 -0800 Subject: [PATCH 06/43] fix unix error, don't send all all the time --- libraries/avatars/src/AvatarData.cpp | 2 +- libraries/avatars/src/AvatarData.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/libraries/avatars/src/AvatarData.cpp b/libraries/avatars/src/AvatarData.cpp index d5bf4ee5d1..3139807b09 100644 --- a/libraries/avatars/src/AvatarData.cpp +++ b/libraries/avatars/src/AvatarData.cpp @@ -510,7 +510,7 @@ QByteArray AvatarData::toByteArray_NEW(AvatarDataDetail dataDetail) { // - this toByteArray() side-effects the AvatarData, is that safe? in particular // is it possible we'll call toByteArray() and then NOT actually use the result? - sendAll = true; + //sendAll = true; bool hasAvatarGlobalPosition = true; // always include global position bool hasAvatarLocalPosition = sendAll || avatarLocalPositionChanged(); diff --git a/libraries/avatars/src/AvatarData.h b/libraries/avatars/src/AvatarData.h index de34df4331..c055c2af5a 100644 --- a/libraries/avatars/src/AvatarData.h +++ b/libraries/avatars/src/AvatarData.h @@ -697,7 +697,7 @@ protected: glm::mat4 _lastSentSensorToWorldMatrix; uint8_t _lastSentAdditionalFlags { 0 }; QUuid _lastSentParentID; - quint16 _lastSentParentJointIndex { -1 }; + quint16 _lastSentParentJointIndex { 0 }; glm::vec3 _globalBoundingBoxCorner; From be61052368e43543b9207c42f2a360ecd50b3912 Mon Sep 17 00:00:00 2001 From: Brad Hefta-Gaub <brad@highfidelity.io> Date: Mon, 26 Dec 2016 12:12:23 -0800 Subject: [PATCH 07/43] check point --- libraries/avatars/src/AvatarData.cpp | 557 ++------------------------- libraries/avatars/src/AvatarData.h | 6 - 2 files changed, 29 insertions(+), 534 deletions(-) diff --git a/libraries/avatars/src/AvatarData.cpp b/libraries/avatars/src/AvatarData.cpp index 3139807b09..ca16854bd8 100644 --- a/libraries/avatars/src/AvatarData.cpp +++ b/libraries/avatars/src/AvatarData.cpp @@ -148,281 +148,6 @@ void AvatarData::setHandPosition(const glm::vec3& handPosition) { _handPosition = glm::inverse(getOrientation()) * (handPosition - getPosition()); } -QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail) { - return toByteArray_NEW(dataDetail); -} - -QByteArray AvatarData::toByteArray_OLD(AvatarDataDetail dataDetail) { - bool cullSmallChanges = (dataDetail == CullSmallData); - bool sendAll = (dataDetail == SendAllData); - bool sendMinimum = (dataDetail == MinimumData); - // TODO: DRY this up to a shared method - // that can pack any type given the number of bytes - // and return the number of bytes to push the pointer - - // lazily allocate memory for HeadData in case we're not an Avatar instance - if (!_headData) { - _headData = new HeadData(this); - } - if (_forceFaceTrackerConnected) { - _headData->_isFaceTrackerConnected = true; - } - - QByteArray avatarDataByteArray(udt::MAX_PACKET_SIZE, 0); - - unsigned char* destinationBuffer = reinterpret_cast<unsigned char*>(avatarDataByteArray.data()); - unsigned char* startPosition = destinationBuffer; - - // Leading flags, to indicate how much data is actually included in the packet... - uint8_t packetStateFlags = 0; - if (sendMinimum) { - setAtBit(packetStateFlags, AVATARDATA_FLAGS_MINIMUM); - } - - memcpy(destinationBuffer, &packetStateFlags, sizeof(packetStateFlags)); - destinationBuffer += sizeof(packetStateFlags); - - if (sendMinimum) { - memcpy(destinationBuffer, &_globalPosition, sizeof(_globalPosition)); - destinationBuffer += sizeof(_globalPosition); - } else { - auto avatarInfo = reinterpret_cast<AvatarDataPacket::AvatarInfo*>(destinationBuffer); - avatarInfo->globalPosition[0] = _globalPosition.x; - avatarInfo->globalPosition[1] = _globalPosition.y; - avatarInfo->globalPosition[2] = _globalPosition.z; - - avatarInfo->position[0] = getLocalPosition().x; - avatarInfo->position[1] = getLocalPosition().y; - avatarInfo->position[2] = getLocalPosition().z; - - avatarInfo->globalBoundingBoxCorner[0] = getPosition().x - _globalBoundingBoxCorner.x; - avatarInfo->globalBoundingBoxCorner[1] = getPosition().y - _globalBoundingBoxCorner.y; - avatarInfo->globalBoundingBoxCorner[2] = getPosition().z - _globalBoundingBoxCorner.z; - - glm::vec3 bodyEulerAngles = glm::degrees(safeEulerAngles(getLocalOrientation())); - packFloatAngleToTwoByte((uint8_t*)(avatarInfo->localOrientation + 0), bodyEulerAngles.y); - packFloatAngleToTwoByte((uint8_t*)(avatarInfo->localOrientation + 1), bodyEulerAngles.x); - packFloatAngleToTwoByte((uint8_t*)(avatarInfo->localOrientation + 2), bodyEulerAngles.z); - packFloatRatioToTwoByte((uint8_t*)(&avatarInfo->scale), getDomainLimitedScale()); - avatarInfo->lookAtPosition[0] = _headData->_lookAtPosition.x; - avatarInfo->lookAtPosition[1] = _headData->_lookAtPosition.y; - avatarInfo->lookAtPosition[2] = _headData->_lookAtPosition.z; - - packFloatScalarToSignedTwoByteFixed((uint8_t*)&avatarInfo->audioLoudness, - glm::min(_headData->_audioLoudness, MAX_AUDIO_LOUDNESS), AUDIO_LOUDNESS_RADIX); - - glm::mat4 sensorToWorldMatrix = getSensorToWorldMatrix(); - packOrientationQuatToSixBytes(avatarInfo->sensorToWorldQuat, glmExtractRotation(sensorToWorldMatrix)); - glm::vec3 scale = extractScale(sensorToWorldMatrix); - packFloatScalarToSignedTwoByteFixed((uint8_t*)&avatarInfo->sensorToWorldScale, scale.x, SENSOR_TO_WORLD_SCALE_RADIX); - avatarInfo->sensorToWorldTrans[0] = sensorToWorldMatrix[3][0]; - avatarInfo->sensorToWorldTrans[1] = sensorToWorldMatrix[3][1]; - avatarInfo->sensorToWorldTrans[2] = sensorToWorldMatrix[3][2]; - - setSemiNibbleAt(avatarInfo->flags, KEY_STATE_START_BIT, _keyState); - // hand state - bool isFingerPointing = _handState & IS_FINGER_POINTING_FLAG; - setSemiNibbleAt(avatarInfo->flags, HAND_STATE_START_BIT, _handState & ~IS_FINGER_POINTING_FLAG); - if (isFingerPointing) { - setAtBit(avatarInfo->flags, HAND_STATE_FINGER_POINTING_BIT); - } - // faceshift state - if (_headData->_isFaceTrackerConnected) { - setAtBit(avatarInfo->flags, IS_FACESHIFT_CONNECTED); - } - // eye tracker state - if (_headData->_isEyeTrackerConnected) { - setAtBit(avatarInfo->flags, IS_EYE_TRACKER_CONNECTED); - } - // referential state - QUuid parentID = getParentID(); - if (!parentID.isNull()) { - setAtBit(avatarInfo->flags, HAS_REFERENTIAL); - } - destinationBuffer += sizeof(AvatarDataPacket::AvatarInfo); - - #if 0 // debugging - #define COMPARE_MEMBER_V3(L, R, M) { if (L.M[0] != R.M[0] || L.M[1] != R.M[1] || L.M[2] != R.M[2]) { qCDebug(avatars) << #M " changed - old:" << "{" << L.M[0] << "," << L.M[1] << ", " << L.M[2] << "}" << " new:" "{" << R.M[0] << "," << R.M[1] << ", " << R.M[2] << "}"; } } - #define COMPARE_MEMBER_F(L, R, M) { if (L.M != R.M) { qCDebug(avatars) << #M " changed - old:" << L.M << " new:" << R.M; } } - - qCDebug(avatars) << "--------------"; - COMPARE_MEMBER_V3(_lastAvatarInfo, (*avatarInfo), position); - COMPARE_MEMBER_V3(_lastAvatarInfo, (*avatarInfo), globalPosition); - COMPARE_MEMBER_V3(_lastAvatarInfo, (*avatarInfo), globalBoundingBoxCorner); - COMPARE_MEMBER_V3(_lastAvatarInfo, (*avatarInfo), localOrientation); - COMPARE_MEMBER_F(_lastAvatarInfo, (*avatarInfo), scale); - COMPARE_MEMBER_V3(_lastAvatarInfo, (*avatarInfo), lookAtPosition); - COMPARE_MEMBER_F(_lastAvatarInfo, (*avatarInfo), audioLoudness); - - if (_lastSensorToWorldMatrix != sensorToWorldMatrix) { - qCDebug(avatars) << "sensorToWorldMatrix changed - old:" << _lastSensorToWorldMatrix << "new:" << sensorToWorldMatrix; - } - //COMPARE_MEMBER_V3(_lastAvatarInfo, (*avatarInfo), sensorToWorldQuat); - COMPARE_MEMBER_F(_lastAvatarInfo, (*avatarInfo), sensorToWorldScale); - COMPARE_MEMBER_V3(_lastAvatarInfo, (*avatarInfo), sensorToWorldTrans); - COMPARE_MEMBER_F(_lastAvatarInfo, (*avatarInfo), flags); - - memcpy(&_lastAvatarInfo, avatarInfo, sizeof(_lastAvatarInfo)); - _lastSensorToWorldMatrix = sensorToWorldMatrix; - - #endif - - - if (!parentID.isNull()) { - auto parentInfo = reinterpret_cast<AvatarDataPacket::ParentInfo*>(destinationBuffer); - QByteArray referentialAsBytes = parentID.toRfc4122(); - memcpy(parentInfo->parentUUID, referentialAsBytes.data(), referentialAsBytes.size()); - parentInfo->parentJointIndex = _parentJointIndex; - destinationBuffer += sizeof(AvatarDataPacket::ParentInfo); - } - - // If it is connected, pack up the data - if (_headData->_isFaceTrackerConnected) { - auto faceTrackerInfo = reinterpret_cast<AvatarDataPacket::FaceTrackerInfo*>(destinationBuffer); - - faceTrackerInfo->leftEyeBlink = _headData->_leftEyeBlink; - faceTrackerInfo->rightEyeBlink = _headData->_rightEyeBlink; - faceTrackerInfo->averageLoudness = _headData->_averageLoudness; - faceTrackerInfo->browAudioLift = _headData->_browAudioLift; - faceTrackerInfo->numBlendshapeCoefficients = _headData->_blendshapeCoefficients.size(); - destinationBuffer += sizeof(AvatarDataPacket::FaceTrackerInfo); - - // followed by a variable number of float coefficients - memcpy(destinationBuffer, _headData->_blendshapeCoefficients.data(), _headData->_blendshapeCoefficients.size() * sizeof(float)); - destinationBuffer += _headData->_blendshapeCoefficients.size() * sizeof(float); - } - - QReadLocker readLock(&_jointDataLock); - - // joint rotation data - *destinationBuffer++ = _jointData.size(); - unsigned char* validityPosition = destinationBuffer; - unsigned char validity = 0; - int validityBit = 0; - - #ifdef WANT_DEBUG - int rotationSentCount = 0; - unsigned char* beforeRotations = destinationBuffer; - #endif - - _lastSentJointData.resize(_jointData.size()); - - for (int i=0; i < _jointData.size(); i++) { - const JointData& data = _jointData[i]; - if (sendAll || _lastSentJointData[i].rotation != data.rotation) { - if (sendAll || - !cullSmallChanges || - fabsf(glm::dot(data.rotation, _lastSentJointData[i].rotation)) <= AVATAR_MIN_ROTATION_DOT) { - if (data.rotationSet) { - validity |= (1 << validityBit); - #ifdef WANT_DEBUG - rotationSentCount++; - #endif - } - } - } - if (++validityBit == BITS_IN_BYTE) { - *destinationBuffer++ = validity; - validityBit = validity = 0; - } - } - if (validityBit != 0) { - *destinationBuffer++ = validity; - } - - validityBit = 0; - validity = *validityPosition++; - for (int i = 0; i < _jointData.size(); i ++) { - const JointData& data = _jointData[i]; - if (validity & (1 << validityBit)) { - destinationBuffer += packOrientationQuatToSixBytes(destinationBuffer, data.rotation); - } - if (++validityBit == BITS_IN_BYTE) { - validityBit = 0; - validity = *validityPosition++; - } - } - - - // joint translation data - validityPosition = destinationBuffer; - validity = 0; - validityBit = 0; - - #ifdef WANT_DEBUG - int translationSentCount = 0; - unsigned char* beforeTranslations = destinationBuffer; - #endif - - float maxTranslationDimension = 0.0; - for (int i=0; i < _jointData.size(); i++) { - const JointData& data = _jointData[i]; - if (sendAll || _lastSentJointData[i].translation != data.translation) { - if (sendAll || - !cullSmallChanges || - glm::distance(data.translation, _lastSentJointData[i].translation) > AVATAR_MIN_TRANSLATION) { - if (data.translationSet) { - validity |= (1 << validityBit); - #ifdef WANT_DEBUG - translationSentCount++; - #endif - maxTranslationDimension = glm::max(fabsf(data.translation.x), maxTranslationDimension); - maxTranslationDimension = glm::max(fabsf(data.translation.y), maxTranslationDimension); - maxTranslationDimension = glm::max(fabsf(data.translation.z), maxTranslationDimension); - } - } - } - if (++validityBit == BITS_IN_BYTE) { - *destinationBuffer++ = validity; - validityBit = validity = 0; - } - } - - if (validityBit != 0) { - *destinationBuffer++ = validity; - } - - validityBit = 0; - validity = *validityPosition++; - for (int i = 0; i < _jointData.size(); i ++) { - const JointData& data = _jointData[i]; - if (validity & (1 << validityBit)) { - destinationBuffer += - packFloatVec3ToSignedTwoByteFixed(destinationBuffer, data.translation, TRANSLATION_COMPRESSION_RADIX); - } - if (++validityBit == BITS_IN_BYTE) { - validityBit = 0; - validity = *validityPosition++; - } - } - - // faux joints - Transform controllerLeftHandTransform = Transform(getControllerLeftHandMatrix()); - destinationBuffer += packOrientationQuatToSixBytes(destinationBuffer, controllerLeftHandTransform.getRotation()); - destinationBuffer += packFloatVec3ToSignedTwoByteFixed(destinationBuffer, controllerLeftHandTransform.getTranslation(), - TRANSLATION_COMPRESSION_RADIX); - Transform controllerRightHandTransform = Transform(getControllerRightHandMatrix()); - destinationBuffer += packOrientationQuatToSixBytes(destinationBuffer, controllerRightHandTransform.getRotation()); - destinationBuffer += packFloatVec3ToSignedTwoByteFixed(destinationBuffer, controllerRightHandTransform.getTranslation(), - TRANSLATION_COMPRESSION_RADIX); - - #ifdef WANT_DEBUG - if (sendAll) { - qCDebug(avatars) << "AvatarData::toByteArray" << cullSmallChanges << sendAll - << "rotations:" << rotationSentCount << "translations:" << translationSentCount - << "largest:" << maxTranslationDimension - << "size:" - << (beforeRotations - startPosition) << "+" - << (beforeTranslations - beforeRotations) << "+" - << (destinationBuffer - beforeTranslations) << "=" - << (destinationBuffer - startPosition); - } - #endif - } - - return avatarDataByteArray.left(destinationBuffer - startPosition); -} - void AvatarData::lazyInitHeadData() { // lazily allocate memory for HeadData in case we're not an Avatar instance if (!_headData) { @@ -475,7 +200,7 @@ bool AvatarData::faceTrackerInfoChanged() { return true; // FIXME! } -QByteArray AvatarData::toByteArray_NEW(AvatarDataDetail dataDetail) { +QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail) { bool cullSmallChanges = (dataDetail == CullSmallData); bool sendAll = (dataDetail == SendAllData); bool sendMinimum = (dataDetail == MinimumData); @@ -510,7 +235,32 @@ QByteArray AvatarData::toByteArray_NEW(AvatarDataDetail dataDetail) { // - this toByteArray() side-effects the AvatarData, is that safe? in particular // is it possible we'll call toByteArray() and then NOT actually use the result? - //sendAll = true; + // TODO - + // typical -- 1jd 0ft 0p 1af 1stw 0loud 1look 0s 0o 1d 1lp 1gp + // + // 1) make the dimensions really be dimensions instead of corner - 12bytes - 4.3kbps + // 2) determine if local position really only matters for parent - 12bytes - 4.3kbps + // 3) AdditionalFlags - only send if changed - 1byte - 0.36 kpbs + // 4) SensorToWorld - should we only send this for avatars with attachments?? - 20bytes - 7.2kbps + // + // ----- Subtotal -- non-joint savings --- 16.2kbps --- ~12% savings? + // + // Joints -- + // 63 rotations * 6 bytes = 136kbps + // 3 translations * 6 bytes = 6.48kbps + // + // FIXME + // - if you enter a space bubble, and then back away, the avatar has wrong orientation until "send all" happens... + // this is an iFrame issue... what to do about that? + // + // - probably - if the avatar was out of view, then came in view, it would also not correctly do an iFrame + // + // - in the AvatarMixer, there's a single AvatarData per connected avatar, that means that this + // "last sent" strategy, actually won't work, because the serialization of the byte array will + // iterate through a bunch of avatars in a loop, the first one will get the full data, then + // the others will be partial. + // we need some way of keeping track of what was sent the last time. + bool hasAvatarGlobalPosition = true; // always include global position bool hasAvatarLocalPosition = sendAll || avatarLocalPositionChanged(); @@ -544,7 +294,7 @@ QByteArray AvatarData::toByteArray_NEW(AvatarDataDetail dataDetail) { | (hasFaceTrackerInfo ? AvatarDataPacket::PACKET_HAS_FACE_TRACKER_INFO : 0) | (hasJointData ? AvatarDataPacket::PACKET_HAS_JOINT_DATA : 0); - //qDebug() << __FUNCTION__ << "packetStateFlags:" << packetStateFlags; + qDebug() << __FUNCTION__ << "packetStateFlags:" << packetStateFlags; memcpy(destinationBuffer, &packetStateFlags, sizeof(packetStateFlags)); destinationBuffer += sizeof(packetStateFlags); @@ -911,255 +661,6 @@ const unsigned char* unpackFauxJoint(const unsigned char* sourceBuffer, ThreadSa // read data in packet starting at byte offset and return number of bytes parsed int AvatarData::parseDataFromBuffer(const QByteArray& buffer) { - return parseDataFromBuffer_NEW(buffer); -} - -// read data in packet starting at byte offset and return number of bytes parsed -int AvatarData::parseDataFromBuffer_OLD(const QByteArray& buffer) { - // lazily allocate memory for HeadData in case we're not an Avatar instance - if (!_headData) { - _headData = new HeadData(this); - } - - uint8_t packetStateFlags = buffer.at(0); - bool minimumSent = oneAtBit(packetStateFlags, AVATARDATA_FLAGS_MINIMUM); - - const unsigned char* startPosition = reinterpret_cast<const unsigned char*>(buffer.data()); - const unsigned char* endPosition = startPosition + buffer.size(); - const unsigned char* sourceBuffer = startPosition + sizeof(packetStateFlags); // skip the flags!! - - // if this is the minimum, then it only includes the flags - if (minimumSent) { - memcpy(&_globalPosition, sourceBuffer, sizeof(_globalPosition)); - sourceBuffer += sizeof(_globalPosition); - int numBytesRead = (sourceBuffer - startPosition); - _averageBytesReceived.updateAverage(numBytesRead); - return numBytesRead; - } - - quint64 now = usecTimestampNow(); - - PACKET_READ_CHECK(AvatarInfo, sizeof(AvatarDataPacket::AvatarInfo)); - auto avatarInfo = reinterpret_cast<const AvatarDataPacket::AvatarInfo*>(sourceBuffer); - sourceBuffer += sizeof(AvatarDataPacket::AvatarInfo); - - _globalPosition = glm::vec3(avatarInfo->globalPosition[0], avatarInfo->globalPosition[1], avatarInfo->globalPosition[2]); - glm::vec3 position = glm::vec3(avatarInfo->position[0], avatarInfo->position[1], avatarInfo->position[2]); - _globalBoundingBoxCorner = glm::vec3(avatarInfo->globalBoundingBoxCorner[0], avatarInfo->globalBoundingBoxCorner[1], avatarInfo->globalBoundingBoxCorner[2]); - if (isNaN(position)) { - if (shouldLogError(now)) { - qCWarning(avatars) << "Discard AvatarData packet: position NaN, uuid " << getSessionUUID(); - } - return buffer.size(); - } - setLocalPosition(position); - - float pitch, yaw, roll; - unpackFloatAngleFromTwoByte(avatarInfo->localOrientation + 0, &yaw); - unpackFloatAngleFromTwoByte(avatarInfo->localOrientation + 1, &pitch); - unpackFloatAngleFromTwoByte(avatarInfo->localOrientation + 2, &roll); - if (isNaN(yaw) || isNaN(pitch) || isNaN(roll)) { - if (shouldLogError(now)) { - qCWarning(avatars) << "Discard AvatarData packet: localOriention is NaN, uuid " << getSessionUUID(); - } - return buffer.size(); - } - - glm::quat currentOrientation = getLocalOrientation(); - glm::vec3 newEulerAngles(pitch, yaw, roll); - glm::quat newOrientation = glm::quat(glm::radians(newEulerAngles)); - if (currentOrientation != newOrientation) { - _hasNewJointRotations = true; - setLocalOrientation(newOrientation); - } - - float scale; - unpackFloatRatioFromTwoByte((uint8_t*)&avatarInfo->scale, scale); - if (isNaN(scale)) { - if (shouldLogError(now)) { - qCWarning(avatars) << "Discard AvatarData packet: scale NaN, uuid " << getSessionUUID(); - } - return buffer.size(); - } - setTargetScale(scale); - - glm::vec3 lookAt = glm::vec3(avatarInfo->lookAtPosition[0], avatarInfo->lookAtPosition[1], avatarInfo->lookAtPosition[2]); - if (isNaN(lookAt)) { - if (shouldLogError(now)) { - qCWarning(avatars) << "Discard AvatarData packet: lookAtPosition is NaN, uuid " << getSessionUUID(); - } - return buffer.size(); - } - _headData->_lookAtPosition = lookAt; - - - float audioLoudness; - unpackFloatScalarFromSignedTwoByteFixed((int16_t*)&avatarInfo->audioLoudness, &audioLoudness, AUDIO_LOUDNESS_RADIX); - - // FIXME - is this really needed? - if (isNaN(audioLoudness)) { - if (shouldLogError(now)) { - qCWarning(avatars) << "Discard AvatarData packet: audioLoudness is NaN, uuid " << getSessionUUID(); - } - return buffer.size(); - } - _headData->_audioLoudness = audioLoudness; - - glm::quat sensorToWorldQuat; - unpackOrientationQuatFromSixBytes(avatarInfo->sensorToWorldQuat, sensorToWorldQuat); - float sensorToWorldScale; - unpackFloatScalarFromSignedTwoByteFixed((int16_t*)&avatarInfo->sensorToWorldScale, &sensorToWorldScale, SENSOR_TO_WORLD_SCALE_RADIX); - glm::vec3 sensorToWorldTrans(avatarInfo->sensorToWorldTrans[0], avatarInfo->sensorToWorldTrans[1], avatarInfo->sensorToWorldTrans[2]); - glm::mat4 sensorToWorldMatrix = createMatFromScaleQuatAndPos(glm::vec3(sensorToWorldScale), sensorToWorldQuat, sensorToWorldTrans); - - _sensorToWorldMatrixCache.set(sensorToWorldMatrix); - - { // bitFlags and face data - uint8_t bitItems = avatarInfo->flags; - - // key state, stored as a semi-nibble in the bitItems - _keyState = (KeyState)getSemiNibbleAt(bitItems, KEY_STATE_START_BIT); - - // hand state, stored as a semi-nibble plus a bit in the bitItems - // we store the hand state as well as other items in a shared bitset. The hand state is an octal, but is split - // into two sections to maintain backward compatibility. The bits are ordered as such (0-7 left to right). - // +---+-----+-----+--+ - // |x,x|H0,H1|x,x,x|H2| - // +---+-----+-----+--+ - // Hand state - H0,H1,H2 is found in the 3rd, 4th, and 8th bits - _handState = getSemiNibbleAt(bitItems, HAND_STATE_START_BIT) - + (oneAtBit(bitItems, HAND_STATE_FINGER_POINTING_BIT) ? IS_FINGER_POINTING_FLAG : 0); - - _headData->_isFaceTrackerConnected = oneAtBit(bitItems, IS_FACESHIFT_CONNECTED); - _headData->_isEyeTrackerConnected = oneAtBit(bitItems, IS_EYE_TRACKER_CONNECTED); - bool hasReferential = oneAtBit(bitItems, HAS_REFERENTIAL); - - if (hasReferential) { - PACKET_READ_CHECK(ParentInfo, sizeof(AvatarDataPacket::ParentInfo)); - auto parentInfo = reinterpret_cast<const AvatarDataPacket::ParentInfo*>(sourceBuffer); - sourceBuffer += sizeof(AvatarDataPacket::ParentInfo); - - QByteArray byteArray((const char*)parentInfo->parentUUID, NUM_BYTES_RFC4122_UUID); - _parentID = QUuid::fromRfc4122(byteArray); - _parentJointIndex = parentInfo->parentJointIndex; - } else { - _parentID = QUuid(); - } - - if (_headData->_isFaceTrackerConnected) { - PACKET_READ_CHECK(FaceTrackerInfo, sizeof(AvatarDataPacket::FaceTrackerInfo)); - auto faceTrackerInfo = reinterpret_cast<const AvatarDataPacket::FaceTrackerInfo*>(sourceBuffer); - sourceBuffer += sizeof(AvatarDataPacket::FaceTrackerInfo); - - _headData->_leftEyeBlink = faceTrackerInfo->leftEyeBlink; - _headData->_rightEyeBlink = faceTrackerInfo->rightEyeBlink; - _headData->_averageLoudness = faceTrackerInfo->averageLoudness; - _headData->_browAudioLift = faceTrackerInfo->browAudioLift; - - int numCoefficients = faceTrackerInfo->numBlendshapeCoefficients; - const int coefficientsSize = sizeof(float) * numCoefficients; - PACKET_READ_CHECK(FaceTrackerCoefficients, coefficientsSize); - _headData->_blendshapeCoefficients.resize(numCoefficients); // make sure there's room for the copy! - memcpy(_headData->_blendshapeCoefficients.data(), sourceBuffer, coefficientsSize); - sourceBuffer += coefficientsSize; - } - } - - PACKET_READ_CHECK(NumJoints, sizeof(uint8_t)); - int numJoints = *sourceBuffer++; - - const int bytesOfValidity = (int)ceil((float)numJoints / (float)BITS_IN_BYTE); - PACKET_READ_CHECK(JointRotationValidityBits, bytesOfValidity); - - int numValidJointRotations = 0; - QVector<bool> validRotations; - validRotations.resize(numJoints); - { // rotation validity bits - unsigned char validity = 0; - int validityBit = 0; - for (int i = 0; i < numJoints; i++) { - if (validityBit == 0) { - validity = *sourceBuffer++; - } - bool valid = (bool)(validity & (1 << validityBit)); - if (valid) { - ++numValidJointRotations; - } - validRotations[i] = valid; - validityBit = (validityBit + 1) % BITS_IN_BYTE; - } - } - - // each joint rotation is stored in 6 bytes. - QWriteLocker writeLock(&_jointDataLock); - _jointData.resize(numJoints); - - const int COMPRESSED_QUATERNION_SIZE = 6; - PACKET_READ_CHECK(JointRotations, numValidJointRotations * COMPRESSED_QUATERNION_SIZE); - for (int i = 0; i < numJoints; i++) { - JointData& data = _jointData[i]; - if (validRotations[i]) { - sourceBuffer += unpackOrientationQuatFromSixBytes(sourceBuffer, data.rotation); - _hasNewJointRotations = true; - data.rotationSet = true; - } - } - - PACKET_READ_CHECK(JointTranslationValidityBits, bytesOfValidity); - - // get translation validity bits -- these indicate which translations were packed - int numValidJointTranslations = 0; - QVector<bool> validTranslations; - validTranslations.resize(numJoints); - { // translation validity bits - unsigned char validity = 0; - int validityBit = 0; - for (int i = 0; i < numJoints; i++) { - if (validityBit == 0) { - validity = *sourceBuffer++; - } - bool valid = (bool)(validity & (1 << validityBit)); - if (valid) { - ++numValidJointTranslations; - } - validTranslations[i] = valid; - validityBit = (validityBit + 1) % BITS_IN_BYTE; - } - } // 1 + bytesOfValidity bytes - - // each joint translation component is stored in 6 bytes. - const int COMPRESSED_TRANSLATION_SIZE = 6; - PACKET_READ_CHECK(JointTranslation, numValidJointTranslations * COMPRESSED_TRANSLATION_SIZE); - - for (int i = 0; i < numJoints; i++) { - JointData& data = _jointData[i]; - if (validTranslations[i]) { - sourceBuffer += unpackFloatVec3FromSignedTwoByteFixed(sourceBuffer, data.translation, TRANSLATION_COMPRESSION_RADIX); - _hasNewJointTranslations = true; - data.translationSet = true; - } - } - - #ifdef WANT_DEBUG - if (numValidJointRotations > 15) { - qCDebug(avatars) << "RECEIVING -- rotations:" << numValidJointRotations - << "translations:" << numValidJointTranslations - << "size:" << (int)(sourceBuffer - startPosition); - } - #endif - - // faux joints - sourceBuffer = unpackFauxJoint(sourceBuffer, _controllerLeftHandMatrixCache); - sourceBuffer = unpackFauxJoint(sourceBuffer, _controllerRightHandMatrixCache); - - int numBytesRead = sourceBuffer - startPosition; - _averageBytesReceived.updateAverage(numBytesRead); - return numBytesRead; -} - - -// read data in packet starting at byte offset and return number of bytes parsed -int AvatarData::parseDataFromBuffer_NEW(const QByteArray& buffer) { // lazily allocate memory for HeadData in case we're not an Avatar instance lazyInitHeadData(); @@ -1173,7 +674,7 @@ int AvatarData::parseDataFromBuffer_NEW(const QByteArray& buffer) { memcpy(&packetStateFlags, sourceBuffer, sizeof(packetStateFlags)); sourceBuffer += sizeof(packetStateFlags); - //qDebug() << __FUNCTION__ << "packetStateFlags:" << packetStateFlags; + qDebug() << __FUNCTION__ << "packetStateFlags:" << packetStateFlags; //qDebug() << "buffer size:" << buffer.size(); diff --git a/libraries/avatars/src/AvatarData.h b/libraries/avatars/src/AvatarData.h index c055c2af5a..4bd905bd69 100644 --- a/libraries/avatars/src/AvatarData.h +++ b/libraries/avatars/src/AvatarData.h @@ -620,12 +620,6 @@ protected: bool hasFaceTracker() { return _headData ? _headData->_isFaceTrackerConnected : false; } bool faceTrackerInfoChanged(); - QByteArray toByteArray_OLD(AvatarDataDetail dataDetail); - QByteArray toByteArray_NEW(AvatarDataDetail dataDetail); - - int parseDataFromBuffer_OLD(const QByteArray& buffer); - int parseDataFromBuffer_NEW(const QByteArray& buffer); - glm::vec3 _handPosition; virtual const QString& getSessionDisplayNameForTransport() const { return _sessionDisplayName; } virtual void maybeUpdateSessionDisplayNameFromTransport(const QString& sessionDisplayName) { } // No-op in AvatarMixer From 73bfc069da0baa0a21fe6f2d0269b4fc39245c9a Mon Sep 17 00:00:00 2001 From: Brad Hefta-Gaub <brad@highfidelity.io> Date: Tue, 27 Dec 2016 17:19:55 -0800 Subject: [PATCH 08/43] more work on only sending changes --- assignment-client/src/avatars/AvatarMixer.cpp | 4 +- .../src/avatars/AvatarMixerClientData.h | 13 ++ interface/src/Application.cpp | 1 + interface/src/avatar/MyAvatar.cpp | 6 +- interface/src/avatar/MyAvatar.h | 2 +- libraries/avatars/src/AvatarData.cpp | 144 ++++++++++-------- libraries/avatars/src/AvatarData.h | 52 +++---- libraries/avatars/src/HeadData.h | 22 ++- libraries/shared/src/SpatiallyNestable.cpp | 22 ++- libraries/shared/src/SpatiallyNestable.h | 7 + 10 files changed, 174 insertions(+), 99 deletions(-) diff --git a/assignment-client/src/avatars/AvatarMixer.cpp b/assignment-client/src/avatars/AvatarMixer.cpp index f052bb3a53..bdd5db9559 100644 --- a/assignment-client/src/avatars/AvatarMixer.cpp +++ b/assignment-client/src/avatars/AvatarMixer.cpp @@ -400,7 +400,9 @@ void AvatarMixer::broadcastAvatarData() { numAvatarDataBytes += avatarPacketList->write(otherNode->getUUID().toRfc4122()); //qDebug() << "about to write data for:" << otherNode->getUUID(); - numAvatarDataBytes += avatarPacketList->write(otherAvatar.toByteArray(detail)); + quint64 lastEncodeForOther = nodeData->getLastOtherAvatarEncodeTime(otherNode->getUUID()); + qDebug() << "about to write data for:" << otherNode->getUUID() << "last encoded at:" << lastEncodeForOther; + numAvatarDataBytes += avatarPacketList->write(otherAvatar.toByteArray(detail, lastEncodeForOther)); avatarPacketList->endSegment(); }); diff --git a/assignment-client/src/avatars/AvatarMixerClientData.h b/assignment-client/src/avatars/AvatarMixerClientData.h index 78a30d8206..3231a7c944 100644 --- a/assignment-client/src/avatars/AvatarMixerClientData.h +++ b/assignment-client/src/avatars/AvatarMixerClientData.h @@ -102,6 +102,15 @@ public: const QString& getBaseDisplayName() { return _baseDisplayName; } void setBaseDisplayName(const QString& baseDisplayName) { _baseDisplayName = baseDisplayName; } + quint64 getLastOtherAvatarEncodeTime(QUuid otherAvatar) { + quint64 result = 0; + if (_lastOtherAvatarEncodeTime.find(otherAvatar) != _lastOtherAvatarEncodeTime.end()) { + result = _lastOtherAvatarEncodeTime[otherAvatar]; + } + _lastOtherAvatarEncodeTime[otherAvatar] = usecTimestampNow(); + return result; + } + private: AvatarSharedPointer _avatar { new AvatarData() }; @@ -109,6 +118,10 @@ private: std::unordered_map<QUuid, uint16_t> _lastBroadcastSequenceNumbers; std::unordered_set<QUuid> _hasReceivedFirstPacketsFrom; + // this is a map of the last time we encoded an "other" avatar for + // sending to "this" node + std::unordered_map<QUuid, quint64> _lastOtherAvatarEncodeTime; + HRCTime _identityChangeTimestamp; bool _gotIdentity { false }; diff --git a/interface/src/Application.cpp b/interface/src/Application.cpp index b4b0ad10bb..96e9f2f498 100644 --- a/interface/src/Application.cpp +++ b/interface/src/Application.cpp @@ -5068,6 +5068,7 @@ void Application::nodeAdded(SharedNodePointer node) const { if (node->getType() == NodeType::AvatarMixer) { // new avatar mixer, send off our identity packet right away getMyAvatar()->sendIdentityPacket(); + getMyAvatar()->resetLastSent(); } } diff --git a/interface/src/avatar/MyAvatar.cpp b/interface/src/avatar/MyAvatar.cpp index eebcee8e4c..40cc46b272 100644 --- a/interface/src/avatar/MyAvatar.cpp +++ b/interface/src/avatar/MyAvatar.cpp @@ -226,7 +226,7 @@ void MyAvatar::simulateAttachments(float deltaTime) { // don't update attachments here, do it in harvestResultsFromPhysicsSimulation() } -QByteArray MyAvatar::toByteArray(AvatarDataDetail dataDetail) { +QByteArray MyAvatar::toByteArray(AvatarDataDetail dataDetail, quint64 lastSentTime) { CameraMode mode = qApp->getCamera()->getMode(); _globalPosition = getPosition(); _globalBoundingBoxCorner.x = _characterController.getCapsuleRadius(); @@ -237,12 +237,12 @@ QByteArray MyAvatar::toByteArray(AvatarDataDetail dataDetail) { // fake the avatar position that is sent up to the AvatarMixer glm::vec3 oldPosition = getPosition(); setPosition(getSkeletonPosition()); - QByteArray array = AvatarData::toByteArray(dataDetail); + QByteArray array = AvatarData::toByteArray(dataDetail, lastSentTime); // copy the correct position back setPosition(oldPosition); return array; } - return AvatarData::toByteArray(dataDetail); + return AvatarData::toByteArray(dataDetail, lastSentTime); } void MyAvatar::centerBody() { diff --git a/interface/src/avatar/MyAvatar.h b/interface/src/avatar/MyAvatar.h index 0e5ce0fe7b..68e65faad7 100644 --- a/interface/src/avatar/MyAvatar.h +++ b/interface/src/avatar/MyAvatar.h @@ -333,7 +333,7 @@ private: glm::vec3 getWorldBodyPosition() const; glm::quat getWorldBodyOrientation() const; - QByteArray toByteArray(AvatarDataDetail dataDetail) override; + QByteArray toByteArray(AvatarDataDetail dataDetail, quint64 lastSentTime) override; void simulate(float deltaTime); void updateFromTrackers(float deltaTime); virtual void render(RenderArgs* renderArgs, const glm::vec3& cameraPositio) override; diff --git a/libraries/avatars/src/AvatarData.cpp b/libraries/avatars/src/AvatarData.cpp index ca16854bd8..450c53a329 100644 --- a/libraries/avatars/src/AvatarData.cpp +++ b/libraries/avatars/src/AvatarData.cpp @@ -132,6 +132,7 @@ float AvatarData::getTargetScale() const { void AvatarData::setTargetScale(float targetScale) { _targetScale = glm::clamp(targetScale, MIN_AVATAR_SCALE, MAX_AVATAR_SCALE); + _scaleChanged = usecTimestampNow(); } void AvatarData::setTargetScaleVerbose(float targetScale) { @@ -159,48 +160,47 @@ void AvatarData::lazyInitHeadData() { } -bool AvatarData::avatarLocalPositionChanged() { - return _lastSentLocalPosition != getLocalPosition(); +bool AvatarData::avatarDimensionsChangedSince(quint64 time) { + return _avatarDimensionsChanged >= time; } -bool AvatarData::avatarDimensionsChanged() { - auto avatarDimensions = getPosition() - _globalBoundingBoxCorner; - return _lastSentAvatarDimensions != avatarDimensions; +bool AvatarData::avatarScaleChangedSince(quint64 time) { + return _avatarScaleChanged >= time; } -bool AvatarData::avatarOrientationChanged() { - return _lastSentLocalOrientation != getLocalOrientation(); +bool AvatarData::lookAtPositionChangedSince(quint64 time) { + return _headData->lookAtPositionChangedSince(time); } -bool AvatarData::avatarScaleChanged() { - return _lastSentScale != getDomainLimitedScale(); +bool AvatarData::audioLoudnessChangedSince(quint64 time) { + return _headData->audioLoudnessChangedSince(time); } -bool AvatarData::lookAtPositionChanged() { - return _lastSentLookAt != _headData->_lookAtPosition; +bool AvatarData::sensorToWorldMatrixChangedSince(quint64 time) { + return _sensorToWorldMatrixChanged >= time; } -bool AvatarData::audioLoudnessChanged() { - return _lastSentAudioLoudness != glm::min(_headData->_audioLoudness, MAX_AUDIO_LOUDNESS); -} - -bool AvatarData::sensorToWorldMatrixChanged() { - return _lastSentSensorToWorldMatrix != getSensorToWorldMatrix(); -} - -bool AvatarData::additionalFlagsChanged() { +bool AvatarData::additionalFlagsChangedSince(quint64 time) { return true; // FIXME! } -bool AvatarData::parentInfoChanged() { - return (_lastSentParentID != getParentID()) || (_lastSentParentJointIndex != _parentJointIndex); +bool AvatarData::parentInfoChangedSince(quint64 time) { + return _parentChanged >= time; } -bool AvatarData::faceTrackerInfoChanged() { +bool AvatarData::faceTrackerInfoChangedSince(quint64 time) { return true; // FIXME! } -QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail) { +QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSentTime) { + + // if no timestamp was included, then assume the avatarData is single instance + // and is tracking its own last encoding time. + if (lastSentTime == 0) { + lastSentTime = _lastToByteArray; + _lastToByteArray = usecTimestampNow(); + } + bool cullSmallChanges = (dataDetail == CullSmallData); bool sendAll = (dataDetail == SendAllData); bool sendMinimum = (dataDetail == MinimumData); @@ -261,18 +261,25 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail) { // the others will be partial. // we need some way of keeping track of what was sent the last time. + // AvatarDataRegulator + // .lastSent = time + // + // hasAvatarGlobalPosition = (globalPositionChanged > lastSent) + // hasAvatarLocalPosition = (localPositionChanged > lastSent) + // ... bool hasAvatarGlobalPosition = true; // always include global position - bool hasAvatarLocalPosition = sendAll || avatarLocalPositionChanged(); - bool hasAvatarDimensions = sendAll || avatarDimensionsChanged(); - bool hasAvatarOrientation = sendAll || avatarOrientationChanged(); - bool hasAvatarScale = sendAll || avatarScaleChanged(); - bool hasLookAtPosition = sendAll || lookAtPositionChanged(); - bool hasAudioLoudness = sendAll || audioLoudnessChanged(); - bool hasSensorToWorldMatrix = sendAll || sensorToWorldMatrixChanged(); - bool hasAdditionalFlags = sendAll || additionalFlagsChanged(); - bool hasParentInfo = hasParent() && (sendAll || parentInfoChanged()); - bool hasFaceTrackerInfo = hasFaceTracker() && (sendAll || faceTrackerInfoChanged()); + bool hasAvatarLocalPosition = sendAll || tranlationChangedSince(lastSentTime); + bool hasAvatarOrientation = sendAll || rotationChangedSince(lastSentTime); + + bool hasAvatarDimensions = sendAll || avatarDimensionsChangedSince(lastSentTime); + bool hasAvatarScale = sendAll || avatarScaleChangedSince(lastSentTime); + bool hasLookAtPosition = sendAll || lookAtPositionChangedSince(lastSentTime); + bool hasAudioLoudness = sendAll || audioLoudnessChangedSince(lastSentTime); + bool hasSensorToWorldMatrix = sendAll || sensorToWorldMatrixChangedSince(lastSentTime); + bool hasAdditionalFlags = sendAll || additionalFlagsChangedSince(lastSentTime); + bool hasParentInfo = hasParent() && (sendAll || parentInfoChangedSince(lastSentTime)); + bool hasFaceTrackerInfo = hasFaceTracker() && (sendAll || faceTrackerInfoChangedSince(lastSentTime)); bool hasJointData = sendAll || !sendMinimum; //qDebug() << __FUNCTION__ << "sendAll:" << sendAll; @@ -294,7 +301,12 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail) { | (hasFaceTrackerInfo ? AvatarDataPacket::PACKET_HAS_FACE_TRACKER_INFO : 0) | (hasJointData ? AvatarDataPacket::PACKET_HAS_JOINT_DATA : 0); - qDebug() << __FUNCTION__ << "packetStateFlags:" << packetStateFlags; + qDebug() << __FUNCTION__ << "packetStateFlags:" << packetStateFlags << "lastSentTime:" << lastSentTime; + qDebug() << "..." << "tranlationChangedSince():" << tranlationChangedSince(lastSentTime); + qDebug() << "..." << "rotationChangedSince():" << rotationChangedSince(lastSentTime); + qDebug() << "..." << "lookAtPositionChangedSince():" << lookAtPositionChangedSince(lastSentTime); + qDebug() << "..." << "audioLoudnessChangedSince():" << audioLoudnessChangedSince(lastSentTime); + qDebug() << "..." << "parentInfoChangedSince():" << parentInfoChangedSince(lastSentTime); memcpy(destinationBuffer, &packetStateFlags, sizeof(packetStateFlags)); destinationBuffer += sizeof(packetStateFlags); @@ -305,8 +317,6 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail) { data->globalPosition[1] = _globalPosition.y; data->globalPosition[2] = _globalPosition.z; destinationBuffer += sizeof(AvatarDataPacket::AvatarGlobalPosition); - _lastSentGlobalPosition = _globalPosition; - //qDebug() << "hasAvatarGlobalPosition _globalPosition:" << _globalPosition; } @@ -321,17 +331,18 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail) { data->localPosition[1] = localPosition.y; data->localPosition[2] = localPosition.z; destinationBuffer += sizeof(AvatarDataPacket::AvatarLocalPosition); - _lastSentLocalPosition = localPosition; } if (hasAvatarDimensions) { auto data = reinterpret_cast<AvatarDataPacket::AvatarDimensions*>(destinationBuffer); + + // FIXME - make this just dimensions!!! auto avatarDimensions = getPosition() - _globalBoundingBoxCorner; data->avatarDimensions[0] = avatarDimensions.x; data->avatarDimensions[1] = avatarDimensions.y; data->avatarDimensions[2] = avatarDimensions.z; destinationBuffer += sizeof(AvatarDataPacket::AvatarDimensions); - _lastSentAvatarDimensions = avatarDimensions; + qDebug() << "hasAvatarDimensions avatarDimensions:" << avatarDimensions; } if (hasAvatarOrientation) { @@ -342,7 +353,7 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail) { packFloatAngleToTwoByte((uint8_t*)(data->localOrientation + 1), bodyEulerAngles.x); packFloatAngleToTwoByte((uint8_t*)(data->localOrientation + 2), bodyEulerAngles.z); destinationBuffer += sizeof(AvatarDataPacket::AvatarOrientation); - _lastSentLocalOrientation = localOrientation; + qDebug() << "hasAvatarOrientation bodyEulerAngles:" << bodyEulerAngles; } if (hasAvatarScale) { @@ -350,27 +361,25 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail) { auto scale = getDomainLimitedScale(); packFloatRatioToTwoByte((uint8_t*)(&data->scale), scale); destinationBuffer += sizeof(AvatarDataPacket::AvatarScale); - _lastSentScale = scale; + qDebug() << "hasAvatarScale scale:" << scale; } if (hasLookAtPosition) { auto data = reinterpret_cast<AvatarDataPacket::LookAtPosition*>(destinationBuffer); - auto lookAt = _headData->_lookAtPosition; + auto lookAt = _headData->getLookAtPosition(); data->lookAtPosition[0] = lookAt.x; data->lookAtPosition[1] = lookAt.y; data->lookAtPosition[2] = lookAt.z; destinationBuffer += sizeof(AvatarDataPacket::LookAtPosition); - _lastSentLookAt = lookAt; - //qDebug() << "hasLookAtPosition lookAt:" << lookAt; + qDebug() << "hasLookAtPosition lookAt:" << lookAt; } if (hasAudioLoudness) { auto data = reinterpret_cast<AvatarDataPacket::AudioLoudness*>(destinationBuffer); - auto audioLoudness = glm::min(_headData->_audioLoudness, MAX_AUDIO_LOUDNESS); + auto audioLoudness = glm::min(_headData->getAudioLoudness(), MAX_AUDIO_LOUDNESS); packFloatScalarToSignedTwoByteFixed((uint8_t*)&data->audioLoudness, audioLoudness, AUDIO_LOUDNESS_RADIX); destinationBuffer += sizeof(AvatarDataPacket::AudioLoudness); - _lastSentAudioLoudness = audioLoudness; - //qDebug() << "hasAudioLoudness audioLoudness:" << audioLoudness; + qDebug() << "hasAudioLoudness audioLoudness:" << audioLoudness; } if (hasSensorToWorldMatrix) { @@ -383,7 +392,7 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail) { data->sensorToWorldTrans[1] = sensorToWorldMatrix[3][1]; data->sensorToWorldTrans[2] = sensorToWorldMatrix[3][2]; destinationBuffer += sizeof(AvatarDataPacket::SensorToWorldMatrix); - _lastSentSensorToWorldMatrix = sensorToWorldMatrix; + qDebug() << "hasSensorToWorldMatrix..."; } QUuid parentID = getParentID(); @@ -415,7 +424,6 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail) { } data->flags = flags; destinationBuffer += sizeof(AvatarDataPacket::AdditionalFlags); - _lastSentAdditionalFlags = flags; //qDebug() << "hasAdditionalFlags _keyState:" << _keyState; //qDebug() << "hasAdditionalFlags _handState:" << _handState; @@ -430,8 +438,6 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail) { memcpy(parentInfo->parentUUID, referentialAsBytes.data(), referentialAsBytes.size()); parentInfo->parentJointIndex = _parentJointIndex; destinationBuffer += sizeof(AvatarDataPacket::ParentInfo); - _lastSentParentID = parentID; - _lastSentParentJointIndex = _parentJointIndex; } // If it is connected, pack up the data @@ -701,8 +707,9 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) { PACKET_READ_CHECK(AvatarGlobalPosition, sizeof(AvatarDataPacket::AvatarGlobalPosition)); auto data = reinterpret_cast<const AvatarDataPacket::AvatarGlobalPosition*>(sourceBuffer); _globalPosition = glm::vec3(data->globalPosition[0], data->globalPosition[1], data->globalPosition[2]); + _globalPositionChanged = usecTimestampNow(); sourceBuffer += sizeof(AvatarDataPacket::AvatarGlobalPosition); - //qDebug() << "hasAvatarGlobalPosition _globalPosition:" << _globalPosition; + qDebug() << "hasAvatarGlobalPosition _globalPosition:" << _globalPosition; } if (hasAvatarLocalPosition) { @@ -717,7 +724,7 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) { } setLocalPosition(position); sourceBuffer += sizeof(AvatarDataPacket::AvatarLocalPosition); - //qDebug() << "hasAvatarLocalPosition position:" << position; + qDebug() << "hasAvatarLocalPosition position:" << position; } if (hasAvatarDimensions) { @@ -726,8 +733,9 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) { // FIXME - this is suspicious looking! _globalBoundingBoxCorner = glm::vec3(data->avatarDimensions[0], data->avatarDimensions[1], data->avatarDimensions[2]); + _avatarDimensionsChanged = usecTimestampNow(); sourceBuffer += sizeof(AvatarDataPacket::AvatarDimensions); - //qDebug() << "hasAvatarDimensions _globalBoundingBoxCorner:" << _globalBoundingBoxCorner; + qDebug() << "hasAvatarDimensions _globalBoundingBoxCorner:" << _globalBoundingBoxCorner; } if (hasAvatarOrientation) { @@ -752,7 +760,7 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) { setLocalOrientation(newOrientation); } sourceBuffer += sizeof(AvatarDataPacket::AvatarOrientation); - //qDebug() << "hasAvatarOrientation newOrientation:" << newOrientation; + qDebug() << "hasAvatarOrientation newOrientation:" << newOrientation; } if (hasAvatarScale) { @@ -768,7 +776,7 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) { } setTargetScale(scale); sourceBuffer += sizeof(AvatarDataPacket::AvatarScale); - //qDebug() << "hasAvatarOrientation scale:" << scale; + qDebug() << "hasAvatarOrientation scale:" << scale; } if (hasLookAtPosition) { @@ -781,9 +789,9 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) { } return buffer.size(); } - _headData->_lookAtPosition = lookAt; + _headData->setLookAtPosition(lookAt); sourceBuffer += sizeof(AvatarDataPacket::LookAtPosition); - //qDebug() << "hasLookAtPosition lookAt:" << lookAt; + qDebug() << "hasLookAtPosition lookAt:" << lookAt; } if (hasAudioLoudness) { @@ -798,9 +806,9 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) { } return buffer.size(); } - _headData->_audioLoudness = audioLoudness; + _headData->setAudioLoudness(audioLoudness); sourceBuffer += sizeof(AvatarDataPacket::AudioLoudness); - //qDebug() << "hasAudioLoudness audioLoudness:" << audioLoudness; + qDebug() << "hasAudioLoudness audioLoudness:" << audioLoudness; } if (hasSensorToWorldMatrix) { @@ -813,8 +821,9 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) { glm::vec3 sensorToWorldTrans(data->sensorToWorldTrans[0], data->sensorToWorldTrans[1], data->sensorToWorldTrans[2]); glm::mat4 sensorToWorldMatrix = createMatFromScaleQuatAndPos(glm::vec3(sensorToWorldScale), sensorToWorldQuat, sensorToWorldTrans); _sensorToWorldMatrixCache.set(sensorToWorldMatrix); + _sensorToWorldMatrixChanged = usecTimestampNow(); sourceBuffer += sizeof(AvatarDataPacket::SensorToWorldMatrix); - //qDebug() << "hasSensorToWorldMatrix sensorToWorldMatrix:" << sensorToWorldMatrix; + qDebug() << "hasSensorToWorldMatrix sensorToWorldMatrix:" << sensorToWorldMatrix; } if (hasAdditionalFlags) { @@ -844,8 +853,10 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) { //qDebug() << "hasAdditionalFlags _isFaceTrackerConnected:" << _headData->_isFaceTrackerConnected; //qDebug() << "hasAdditionalFlags _isEyeTrackerConnected:" << _headData->_isEyeTrackerConnected; - //qDebug() << "hasAdditionalFlags bitItems:" << bitItems; + qDebug() << "hasAdditionalFlags bitItems:" << bitItems; sourceBuffer += sizeof(AvatarDataPacket::AdditionalFlags); + + _additionalFlagsChanged = usecTimestampNow(); } // FIXME -- make sure to handle the existance of a parent vs a change in the parent... @@ -858,8 +869,11 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) { QByteArray byteArray((const char*)parentInfo->parentUUID, NUM_BYTES_RFC4122_UUID); _parentID = QUuid::fromRfc4122(byteArray); _parentJointIndex = parentInfo->parentJointIndex; - //qDebug() << "hasParentInfo _parentID:" << _parentID; + qDebug() << "hasParentInfo _parentID:" << _parentID; + _parentChanged = usecTimestampNow(); + } else { + // FIXME - this aint totally right, for switching to parent/no-parent _parentID = QUuid(); } @@ -879,13 +893,13 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) { _headData->_blendshapeCoefficients.resize(numCoefficients); // make sure there's room for the copy! memcpy(_headData->_blendshapeCoefficients.data(), sourceBuffer, coefficientsSize); sourceBuffer += coefficientsSize; - //qDebug() << "hasFaceTrackerInfo numCoefficients:" << numCoefficients; + qDebug() << "hasFaceTrackerInfo numCoefficients:" << numCoefficients; } if (hasJointData) { PACKET_READ_CHECK(NumJoints, sizeof(uint8_t)); int numJoints = *sourceBuffer++; - //qDebug() << "hasJointData numJoints:" << numJoints; + qDebug() << "hasJointData numJoints:" << numJoints; const int bytesOfValidity = (int)ceil((float)numJoints / (float)BITS_IN_BYTE); PACKET_READ_CHECK(JointRotationValidityBits, bytesOfValidity); diff --git a/libraries/avatars/src/AvatarData.h b/libraries/avatars/src/AvatarData.h index 4bd905bd69..3ebe196ce0 100644 --- a/libraries/avatars/src/AvatarData.h +++ b/libraries/avatars/src/AvatarData.h @@ -412,7 +412,7 @@ public: SendAllData } AvatarDataDetail; - virtual QByteArray toByteArray(AvatarDataDetail dataDetail); + virtual QByteArray toByteArray(AvatarDataDetail dataDetail, quint64 lastSentTime = 0); virtual void doneEncoding(bool cullSmallChanges); /// \return true if an error should be logged @@ -464,10 +464,11 @@ public: void setTargetScaleVerbose(float targetScale); float getDomainLimitedScale() const { return glm::clamp(_targetScale, _domainMinimumScale, _domainMaximumScale); } + void setDomainMinimumScale(float domainMinimumScale) - { _domainMinimumScale = glm::clamp(domainMinimumScale, MIN_AVATAR_SCALE, MAX_AVATAR_SCALE); } - void setDomainMaximumScale(float domainMaximumScale) - { _domainMaximumScale = glm::clamp(domainMaximumScale, MIN_AVATAR_SCALE, MAX_AVATAR_SCALE); } + { _domainMinimumScale = glm::clamp(domainMinimumScale, MIN_AVATAR_SCALE, MAX_AVATAR_SCALE); _scaleChanged = usecTimestampNow(); } + void setDomainMaximumScale(float domainMaximumScale) + { _domainMaximumScale = glm::clamp(domainMaximumScale, MIN_AVATAR_SCALE, MAX_AVATAR_SCALE); _scaleChanged = usecTimestampNow(); } // Hand State Q_INVOKABLE void setHandState(char s) { _handState = s; } @@ -602,23 +603,23 @@ public slots: float getTargetScale() { return _targetScale; } + void resetLastSent() { _lastToByteArray = 0; } + protected: void lazyInitHeadData(); - bool avatarLocalPositionChanged(); - bool avatarDimensionsChanged(); - bool avatarOrientationChanged(); - bool avatarScaleChanged(); - bool lookAtPositionChanged(); - bool audioLoudnessChanged(); - bool sensorToWorldMatrixChanged(); - bool additionalFlagsChanged(); + bool avatarDimensionsChangedSince(quint64 time); + bool avatarScaleChangedSince(quint64 time); + bool lookAtPositionChangedSince(quint64 time); + bool audioLoudnessChangedSince(quint64 time); + bool sensorToWorldMatrixChangedSince(quint64 time); + bool additionalFlagsChangedSince(quint64 time); bool hasParent() { return !getParentID().isNull(); } - bool parentInfoChanged(); + bool parentInfoChangedSince(quint64 time); bool hasFaceTracker() { return _headData ? _headData->_isFaceTrackerConnected : false; } - bool faceTrackerInfoChanged(); + bool faceTrackerInfoChangedSince(quint64 time); glm::vec3 _handPosition; virtual const QString& getSessionDisplayNameForTransport() const { return _sessionDisplayName; } @@ -681,17 +682,17 @@ protected: // updates about one avatar to another. glm::vec3 _globalPosition { 0, 0, 0 }; - glm::vec3 _lastSentGlobalPosition { 0, 0, 0 }; - glm::vec3 _lastSentLocalPosition { 0, 0, 0 }; - glm::vec3 _lastSentAvatarDimensions { 0, 0, 0 }; - glm::quat _lastSentLocalOrientation; - float _lastSentScale { 0 }; - glm::vec3 _lastSentLookAt { 0, 0, 0 }; - float _lastSentAudioLoudness { 0 }; - glm::mat4 _lastSentSensorToWorldMatrix; - uint8_t _lastSentAdditionalFlags { 0 }; - QUuid _lastSentParentID; - quint16 _lastSentParentJointIndex { 0 }; + + quint64 _globalPositionChanged { 0 }; + quint64 _avatarDimensionsChanged { 0 }; + quint64 _avatarScaleChanged { 0 }; + quint64 _lookAtChanged { 0 }; + quint64 _audioLoudnessChanged { 0 }; + quint64 _sensorToWorldMatrixChanged { 0 }; + quint64 _additionalFlagsChanged { 0 }; + quint64 _parentChanged { 0 }; + + quint64 _lastToByteArray { 0 }; // tracks the last time we did a toByteArray glm::vec3 _globalBoundingBoxCorner; @@ -710,7 +711,6 @@ protected: int getFauxJointIndex(const QString& name) const; AvatarDataPacket::AvatarInfo _lastAvatarInfo; - glm::mat4 _lastSensorToWorldMatrix; private: friend void avatarStateFromFrame(const QByteArray& frameData, AvatarData* _avatar); diff --git a/libraries/avatars/src/HeadData.h b/libraries/avatars/src/HeadData.h index af657339ba..cbf6c6bb32 100644 --- a/libraries/avatars/src/HeadData.h +++ b/libraries/avatars/src/HeadData.h @@ -19,6 +19,8 @@ #include <glm/glm.hpp> #include <glm/gtc/quaternion.hpp> +#include <SharedUtil.h> + // degrees const float MIN_HEAD_YAW = -180.0f; const float MAX_HEAD_YAW = 180.0f; @@ -56,7 +58,13 @@ public: void setOrientation(const glm::quat& orientation); float getAudioLoudness() const { return _audioLoudness; } - void setAudioLoudness(float audioLoudness) { _audioLoudness = audioLoudness; } + void setAudioLoudness(float audioLoudness) { + if (audioLoudness != _audioLoudness) { + _audioLoudnessChanged = usecTimestampNow(); + } + _audioLoudness = audioLoudness; + } + bool audioLoudnessChangedSince(quint64 time) { return _audioLoudnessChanged >= time; } float getAudioAverageLoudness() const { return _audioAverageLoudness; } void setAudioAverageLoudness(float audioAverageLoudness) { _audioAverageLoudness = audioAverageLoudness; } @@ -66,7 +74,13 @@ public: void setBlendshapeCoefficients(const QVector<float>& blendshapeCoefficients) { _blendshapeCoefficients = blendshapeCoefficients; } const glm::vec3& getLookAtPosition() const { return _lookAtPosition; } - void setLookAtPosition(const glm::vec3& lookAtPosition) { _lookAtPosition = lookAtPosition; } + void setLookAtPosition(const glm::vec3& lookAtPosition) { + if (_lookAtPosition != lookAtPosition) { + _lookAtPositionChanged = usecTimestampNow(); + } + _lookAtPosition = lookAtPosition; + } + bool lookAtPositionChangedSince(quint64 time) { return _lookAtPositionChanged >= time; } friend class AvatarData; @@ -80,7 +94,11 @@ protected: float _baseRoll; glm::vec3 _lookAtPosition; + quint64 _lookAtPositionChanged { 0 }; + float _audioLoudness; + quint64 _audioLoudnessChanged { 0 }; + bool _isFaceTrackerConnected; bool _isEyeTrackerConnected; float _leftEyeBlink; diff --git a/libraries/shared/src/SpatiallyNestable.cpp b/libraries/shared/src/SpatiallyNestable.cpp index cbe982b959..5045f74b21 100644 --- a/libraries/shared/src/SpatiallyNestable.cpp +++ b/libraries/shared/src/SpatiallyNestable.cpp @@ -26,6 +26,9 @@ SpatiallyNestable::SpatiallyNestable(NestableType nestableType, QUuid id) : // set flags in _transform _transform.setTranslation(glm::vec3(0.0f)); _transform.setRotation(glm::quat()); + _scaleChanged = usecTimestampNow(); + _translationChanged = usecTimestampNow(); + _rotationChanged = usecTimestampNow(); } SpatiallyNestable::~SpatiallyNestable() { @@ -403,6 +406,7 @@ void SpatiallyNestable::setPosition(const glm::vec3& position, bool& success, bo }); if (success && changed) { locationChanged(tellPhysics); + _translationChanged = usecTimestampNow(); } } @@ -455,6 +459,7 @@ void SpatiallyNestable::setOrientation(const glm::quat& orientation, bool& succe }); if (success && changed) { locationChanged(tellPhysics); + _rotationChanged = usecTimestampNow(); } } @@ -653,6 +658,8 @@ void SpatiallyNestable::setTransform(const Transform& transform, bool& success) }); if (success && changed) { locationChanged(); + _translationChanged = usecTimestampNow(); + _rotationChanged = usecTimestampNow(); } } @@ -693,6 +700,7 @@ void SpatiallyNestable::setScale(const glm::vec3& scale) { }); if (changed) { dimensionsChanged(); + _scaleChanged = usecTimestampNow(); } } @@ -715,6 +723,7 @@ void SpatiallyNestable::setScale(float value) { if (changed) { dimensionsChanged(); + _scaleChanged = usecTimestampNow(); } } @@ -743,6 +752,9 @@ void SpatiallyNestable::setLocalTransform(const Transform& transform) { if (changed) { locationChanged(); + _scaleChanged = usecTimestampNow(); + _translationChanged = usecTimestampNow(); + _rotationChanged = usecTimestampNow(); } } @@ -769,6 +781,7 @@ void SpatiallyNestable::setLocalPosition(const glm::vec3& position, bool tellPhy }); if (changed) { locationChanged(tellPhysics); + _translationChanged = usecTimestampNow(); } } @@ -795,6 +808,7 @@ void SpatiallyNestable::setLocalOrientation(const glm::quat& orientation) { }); if (changed) { locationChanged(); + _rotationChanged = usecTimestampNow(); } } @@ -850,7 +864,10 @@ void SpatiallyNestable::setLocalScale(const glm::vec3& scale) { changed = true; } }); - dimensionsChanged(); + if (changed) { + dimensionsChanged(); + _scaleChanged = usecTimestampNow(); + } } QList<SpatiallyNestablePointer> SpatiallyNestable::getChildren() const { @@ -1072,6 +1089,9 @@ void SpatiallyNestable::setLocalTransformAndVelocities( if (changed) { locationChanged(false); + _scaleChanged = usecTimestampNow(); + _translationChanged = usecTimestampNow(); + _rotationChanged = usecTimestampNow(); } } diff --git a/libraries/shared/src/SpatiallyNestable.h b/libraries/shared/src/SpatiallyNestable.h index f58e2c906c..3f3de89fce 100644 --- a/libraries/shared/src/SpatiallyNestable.h +++ b/libraries/shared/src/SpatiallyNestable.h @@ -176,6 +176,10 @@ public: const glm::vec3& localVelocity, const glm::vec3& localAngularVelocity); + bool scaleChangedSince(quint64 time) { return _scaleChanged > time; } + bool tranlationChangedSince(quint64 time) { return _translationChanged > time; } + bool rotationChangedSince(quint64 time) { return _rotationChanged > time; } + protected: const NestableType _nestableType; // EntityItem or an AvatarData QUuid _id; @@ -199,6 +203,9 @@ protected: mutable bool _queryAACubeSet { false }; bool _missingAncestor { false }; + quint64 _scaleChanged { 0 }; + quint64 _translationChanged { 0 }; + quint64 _rotationChanged { 0 }; private: mutable ReadWriteLockable _transformLock; From c7a5e873dfb09aa8848da06dd4a47820ad907c91 Mon Sep 17 00:00:00 2001 From: Brad Hefta-Gaub <brad@highfidelity.io> Date: Tue, 27 Dec 2016 18:34:47 -0800 Subject: [PATCH 09/43] debug by sending all --- libraries/avatars/src/AvatarData.cpp | 50 ++++++++++++++++------------ 1 file changed, 29 insertions(+), 21 deletions(-) diff --git a/libraries/avatars/src/AvatarData.cpp b/libraries/avatars/src/AvatarData.cpp index 450c53a329..708c033c33 100644 --- a/libraries/avatars/src/AvatarData.cpp +++ b/libraries/avatars/src/AvatarData.cpp @@ -205,6 +205,8 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSent bool sendAll = (dataDetail == SendAllData); bool sendMinimum = (dataDetail == MinimumData); + sendAll = true; + // TODO: DRY this up to a shared method // that can pack any type given the number of bytes // and return the number of bytes to push the pointer @@ -282,7 +284,7 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSent bool hasFaceTrackerInfo = hasFaceTracker() && (sendAll || faceTrackerInfoChangedSince(lastSentTime)); bool hasJointData = sendAll || !sendMinimum; - //qDebug() << __FUNCTION__ << "sendAll:" << sendAll; + qDebug() << __FUNCTION__ << "sendAll:" << sendAll; //qDebug() << "hasAvatarGlobalPosition:" << hasAvatarGlobalPosition; //qDebug() << "hasAvatarOrientation:" << hasAvatarOrientation; @@ -302,11 +304,14 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSent | (hasJointData ? AvatarDataPacket::PACKET_HAS_JOINT_DATA : 0); qDebug() << __FUNCTION__ << "packetStateFlags:" << packetStateFlags << "lastSentTime:" << lastSentTime; + + /* qDebug() << "..." << "tranlationChangedSince():" << tranlationChangedSince(lastSentTime); qDebug() << "..." << "rotationChangedSince():" << rotationChangedSince(lastSentTime); qDebug() << "..." << "lookAtPositionChangedSince():" << lookAtPositionChangedSince(lastSentTime); qDebug() << "..." << "audioLoudnessChangedSince():" << audioLoudnessChangedSince(lastSentTime); qDebug() << "..." << "parentInfoChangedSince():" << parentInfoChangedSince(lastSentTime); + */ memcpy(destinationBuffer, &packetStateFlags, sizeof(packetStateFlags)); destinationBuffer += sizeof(packetStateFlags); @@ -331,6 +336,7 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSent data->localPosition[1] = localPosition.y; data->localPosition[2] = localPosition.z; destinationBuffer += sizeof(AvatarDataPacket::AvatarLocalPosition); + //qDebug() << "hasAvatarLocalPosition localPosition:" << localPosition; } if (hasAvatarDimensions) { @@ -342,7 +348,7 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSent data->avatarDimensions[1] = avatarDimensions.y; data->avatarDimensions[2] = avatarDimensions.z; destinationBuffer += sizeof(AvatarDataPacket::AvatarDimensions); - qDebug() << "hasAvatarDimensions avatarDimensions:" << avatarDimensions; + //qDebug() << "hasAvatarDimensions avatarDimensions:" << avatarDimensions; } if (hasAvatarOrientation) { @@ -353,7 +359,7 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSent packFloatAngleToTwoByte((uint8_t*)(data->localOrientation + 1), bodyEulerAngles.x); packFloatAngleToTwoByte((uint8_t*)(data->localOrientation + 2), bodyEulerAngles.z); destinationBuffer += sizeof(AvatarDataPacket::AvatarOrientation); - qDebug() << "hasAvatarOrientation bodyEulerAngles:" << bodyEulerAngles; + //qDebug() << "hasAvatarOrientation bodyEulerAngles:" << bodyEulerAngles; } if (hasAvatarScale) { @@ -361,7 +367,7 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSent auto scale = getDomainLimitedScale(); packFloatRatioToTwoByte((uint8_t*)(&data->scale), scale); destinationBuffer += sizeof(AvatarDataPacket::AvatarScale); - qDebug() << "hasAvatarScale scale:" << scale; + //qDebug() << "hasAvatarScale scale:" << scale; } if (hasLookAtPosition) { @@ -371,7 +377,7 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSent data->lookAtPosition[1] = lookAt.y; data->lookAtPosition[2] = lookAt.z; destinationBuffer += sizeof(AvatarDataPacket::LookAtPosition); - qDebug() << "hasLookAtPosition lookAt:" << lookAt; + //qDebug() << "hasLookAtPosition lookAt:" << lookAt; } if (hasAudioLoudness) { @@ -379,7 +385,7 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSent auto audioLoudness = glm::min(_headData->getAudioLoudness(), MAX_AUDIO_LOUDNESS); packFloatScalarToSignedTwoByteFixed((uint8_t*)&data->audioLoudness, audioLoudness, AUDIO_LOUDNESS_RADIX); destinationBuffer += sizeof(AvatarDataPacket::AudioLoudness); - qDebug() << "hasAudioLoudness audioLoudness:" << audioLoudness; + //qDebug() << "hasAudioLoudness audioLoudness:" << audioLoudness; } if (hasSensorToWorldMatrix) { @@ -392,7 +398,7 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSent data->sensorToWorldTrans[1] = sensorToWorldMatrix[3][1]; data->sensorToWorldTrans[2] = sensorToWorldMatrix[3][2]; destinationBuffer += sizeof(AvatarDataPacket::SensorToWorldMatrix); - qDebug() << "hasSensorToWorldMatrix..."; + //qDebug() << "hasSensorToWorldMatrix..."; } QUuid parentID = getParentID(); @@ -425,7 +431,7 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSent data->flags = flags; destinationBuffer += sizeof(AvatarDataPacket::AdditionalFlags); - //qDebug() << "hasAdditionalFlags _keyState:" << _keyState; + ////qDebug() << "hasAdditionalFlags _keyState:" << _keyState; //qDebug() << "hasAdditionalFlags _handState:" << _handState; //qDebug() << "hasAdditionalFlags _isFaceTrackerConnected:" << _headData->_isFaceTrackerConnected; //qDebug() << "hasAdditionalFlags _isEyeTrackerConnected:" << _headData->_isEyeTrackerConnected; @@ -438,6 +444,7 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSent memcpy(parentInfo->parentUUID, referentialAsBytes.data(), referentialAsBytes.size()); parentInfo->parentJointIndex = _parentJointIndex; destinationBuffer += sizeof(AvatarDataPacket::ParentInfo); + //qDebug() << "hasParentInfo ...:"; } // If it is connected, pack up the data @@ -454,6 +461,7 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSent // followed by a variable number of float coefficients memcpy(destinationBuffer, _headData->_blendshapeCoefficients.data(), _headData->_blendshapeCoefficients.size() * sizeof(float)); destinationBuffer += _headData->_blendshapeCoefficients.size() * sizeof(float); + //qDebug() << "hasFaceTrackerInfo ...:"; } // If it is connected, pack up the data @@ -468,7 +476,7 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSent int numJoints = _jointData.size(); *destinationBuffer++ = (uint8_t)numJoints; - //qDebug() << "hasJointData numJoints:" << numJoints; + qDebug() << "hasJointData numJoints:" << numJoints; unsigned char* validityPosition = destinationBuffer; unsigned char validity = 0; @@ -709,7 +717,7 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) { _globalPosition = glm::vec3(data->globalPosition[0], data->globalPosition[1], data->globalPosition[2]); _globalPositionChanged = usecTimestampNow(); sourceBuffer += sizeof(AvatarDataPacket::AvatarGlobalPosition); - qDebug() << "hasAvatarGlobalPosition _globalPosition:" << _globalPosition; + //qDebug() << "hasAvatarGlobalPosition _globalPosition:" << _globalPosition; } if (hasAvatarLocalPosition) { @@ -724,7 +732,7 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) { } setLocalPosition(position); sourceBuffer += sizeof(AvatarDataPacket::AvatarLocalPosition); - qDebug() << "hasAvatarLocalPosition position:" << position; + //qDebug() << "hasAvatarLocalPosition position:" << position; } if (hasAvatarDimensions) { @@ -735,7 +743,7 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) { _globalBoundingBoxCorner = glm::vec3(data->avatarDimensions[0], data->avatarDimensions[1], data->avatarDimensions[2]); _avatarDimensionsChanged = usecTimestampNow(); sourceBuffer += sizeof(AvatarDataPacket::AvatarDimensions); - qDebug() << "hasAvatarDimensions _globalBoundingBoxCorner:" << _globalBoundingBoxCorner; + //qDebug() << "hasAvatarDimensions _globalBoundingBoxCorner:" << _globalBoundingBoxCorner; } if (hasAvatarOrientation) { @@ -760,7 +768,7 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) { setLocalOrientation(newOrientation); } sourceBuffer += sizeof(AvatarDataPacket::AvatarOrientation); - qDebug() << "hasAvatarOrientation newOrientation:" << newOrientation; + //qDebug() << "hasAvatarOrientation newOrientation:" << newOrientation; } if (hasAvatarScale) { @@ -776,7 +784,7 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) { } setTargetScale(scale); sourceBuffer += sizeof(AvatarDataPacket::AvatarScale); - qDebug() << "hasAvatarOrientation scale:" << scale; + //qDebug() << "hasAvatarOrientation scale:" << scale; } if (hasLookAtPosition) { @@ -791,7 +799,7 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) { } _headData->setLookAtPosition(lookAt); sourceBuffer += sizeof(AvatarDataPacket::LookAtPosition); - qDebug() << "hasLookAtPosition lookAt:" << lookAt; + //qDebug() << "hasLookAtPosition lookAt:" << lookAt; } if (hasAudioLoudness) { @@ -808,7 +816,7 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) { } _headData->setAudioLoudness(audioLoudness); sourceBuffer += sizeof(AvatarDataPacket::AudioLoudness); - qDebug() << "hasAudioLoudness audioLoudness:" << audioLoudness; + //qDebug() << "hasAudioLoudness audioLoudness:" << audioLoudness; } if (hasSensorToWorldMatrix) { @@ -823,7 +831,7 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) { _sensorToWorldMatrixCache.set(sensorToWorldMatrix); _sensorToWorldMatrixChanged = usecTimestampNow(); sourceBuffer += sizeof(AvatarDataPacket::SensorToWorldMatrix); - qDebug() << "hasSensorToWorldMatrix sensorToWorldMatrix:" << sensorToWorldMatrix; + //qDebug() << "hasSensorToWorldMatrix sensorToWorldMatrix:" << sensorToWorldMatrix; } if (hasAdditionalFlags) { @@ -853,7 +861,7 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) { //qDebug() << "hasAdditionalFlags _isFaceTrackerConnected:" << _headData->_isFaceTrackerConnected; //qDebug() << "hasAdditionalFlags _isEyeTrackerConnected:" << _headData->_isEyeTrackerConnected; - qDebug() << "hasAdditionalFlags bitItems:" << bitItems; + //qDebug() << "hasAdditionalFlags bitItems:" << bitItems; sourceBuffer += sizeof(AvatarDataPacket::AdditionalFlags); _additionalFlagsChanged = usecTimestampNow(); @@ -869,7 +877,7 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) { QByteArray byteArray((const char*)parentInfo->parentUUID, NUM_BYTES_RFC4122_UUID); _parentID = QUuid::fromRfc4122(byteArray); _parentJointIndex = parentInfo->parentJointIndex; - qDebug() << "hasParentInfo _parentID:" << _parentID; + //qDebug() << "hasParentInfo _parentID:" << _parentID; _parentChanged = usecTimestampNow(); } else { @@ -893,13 +901,13 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) { _headData->_blendshapeCoefficients.resize(numCoefficients); // make sure there's room for the copy! memcpy(_headData->_blendshapeCoefficients.data(), sourceBuffer, coefficientsSize); sourceBuffer += coefficientsSize; - qDebug() << "hasFaceTrackerInfo numCoefficients:" << numCoefficients; + //qDebug() << "hasFaceTrackerInfo numCoefficients:" << numCoefficients; } if (hasJointData) { PACKET_READ_CHECK(NumJoints, sizeof(uint8_t)); int numJoints = *sourceBuffer++; - qDebug() << "hasJointData numJoints:" << numJoints; + qDebug() << "....hasJointData numJoints:" << numJoints; const int bytesOfValidity = (int)ceil((float)numJoints / (float)BITS_IN_BYTE); PACKET_READ_CHECK(JointRotationValidityBits, bytesOfValidity); From 99f180f945638d424564f9779eb154fe6126c3e3 Mon Sep 17 00:00:00 2001 From: Brad Hefta-Gaub <brad@highfidelity.io> Date: Thu, 29 Dec 2016 08:16:19 -0800 Subject: [PATCH 10/43] more hacking --- libraries/avatars/src/AvatarData.cpp | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/libraries/avatars/src/AvatarData.cpp b/libraries/avatars/src/AvatarData.cpp index 708c033c33..06456ae848 100644 --- a/libraries/avatars/src/AvatarData.cpp +++ b/libraries/avatars/src/AvatarData.cpp @@ -284,7 +284,7 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSent bool hasFaceTrackerInfo = hasFaceTracker() && (sendAll || faceTrackerInfoChangedSince(lastSentTime)); bool hasJointData = sendAll || !sendMinimum; - qDebug() << __FUNCTION__ << "sendAll:" << sendAll; + //qDebug() << __FUNCTION__ << "sendAll:" << sendAll; //qDebug() << "hasAvatarGlobalPosition:" << hasAvatarGlobalPosition; //qDebug() << "hasAvatarOrientation:" << hasAvatarOrientation; @@ -303,7 +303,7 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSent | (hasFaceTrackerInfo ? AvatarDataPacket::PACKET_HAS_FACE_TRACKER_INFO : 0) | (hasJointData ? AvatarDataPacket::PACKET_HAS_JOINT_DATA : 0); - qDebug() << __FUNCTION__ << "packetStateFlags:" << packetStateFlags << "lastSentTime:" << lastSentTime; + //qDebug() << __FUNCTION__ << "packetStateFlags:" << packetStateFlags << "lastSentTime:" << lastSentTime; /* qDebug() << "..." << "tranlationChangedSince():" << tranlationChangedSince(lastSentTime); @@ -688,7 +688,7 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) { memcpy(&packetStateFlags, sourceBuffer, sizeof(packetStateFlags)); sourceBuffer += sizeof(packetStateFlags); - qDebug() << __FUNCTION__ << "packetStateFlags:" << packetStateFlags; + //qDebug() << __FUNCTION__ << "packetStateFlags:" << packetStateFlags; //qDebug() << "buffer size:" << buffer.size(); @@ -907,7 +907,7 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) { if (hasJointData) { PACKET_READ_CHECK(NumJoints, sizeof(uint8_t)); int numJoints = *sourceBuffer++; - qDebug() << "....hasJointData numJoints:" << numJoints; + //qDebug() << "....hasJointData numJoints:" << numJoints; const int bytesOfValidity = (int)ceil((float)numJoints / (float)BITS_IN_BYTE); PACKET_READ_CHECK(JointRotationValidityBits, bytesOfValidity); From 5acae04420dec1c2b1d36356ec584a2fb43ffe4a Mon Sep 17 00:00:00 2001 From: Brad Hefta-Gaub <brad@highfidelity.io> Date: Wed, 4 Jan 2017 19:40:03 -0800 Subject: [PATCH 11/43] fix some warnings --- libraries/avatars/src/AvatarData.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/libraries/avatars/src/AvatarData.cpp b/libraries/avatars/src/AvatarData.cpp index 06456ae848..b5430ea808 100644 --- a/libraries/avatars/src/AvatarData.cpp +++ b/libraries/avatars/src/AvatarData.cpp @@ -52,7 +52,7 @@ const QString AvatarData::FRAME_NAME = "com.highfidelity.recording.AvatarData"; static const int TRANSLATION_COMPRESSION_RADIX = 12; static const int SENSOR_TO_WORLD_SCALE_RADIX = 10; static const int AUDIO_LOUDNESS_RADIX = 2; -static const int MODEL_OFFSET_RADIX = 6; +//static const int MODEL_OFFSET_RADIX = 6; #define ASSERT(COND) do { if (!(COND)) { abort(); } } while(0) @@ -215,7 +215,7 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSent QByteArray avatarDataByteArray(udt::MAX_PACKET_SIZE, 0); unsigned char* destinationBuffer = reinterpret_cast<unsigned char*>(avatarDataByteArray.data()); unsigned char* startPosition = destinationBuffer; - unsigned char* packetStateFlagsAt = startPosition; + //unsigned char* packetStateFlagsAt = startPosition; // psuedo code.... // - determine which sections will be included @@ -607,7 +607,7 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSent int avatarDataSize = destinationBuffer - startPosition; //qDebug() << "avatarDataSize:" << avatarDataSize; - return avatarDataByteArray.left(destinationBuffer - startPosition); + return avatarDataByteArray.left(avatarDataSize); } void AvatarData::doneEncoding(bool cullSmallChanges) { From 251495978c20dd3debb547a4c6a180a0d62f06ce Mon Sep 17 00:00:00 2001 From: Brad Hefta-Gaub <brad@highfidelity.io> Date: Thu, 5 Jan 2017 19:46:19 -0800 Subject: [PATCH 12/43] more work on delta sending --- assignment-client/src/avatars/AvatarMixer.cpp | 6 +- .../src/avatars/AvatarMixerClientData.cpp | 1 + libraries/avatars/src/AvatarData.cpp | 96 ++++++++++++------- libraries/avatars/src/AvatarData.h | 7 +- 4 files changed, 71 insertions(+), 39 deletions(-) diff --git a/assignment-client/src/avatars/AvatarMixer.cpp b/assignment-client/src/avatars/AvatarMixer.cpp index 3b401934fc..7d9cc7c5b7 100644 --- a/assignment-client/src/avatars/AvatarMixer.cpp +++ b/assignment-client/src/avatars/AvatarMixer.cpp @@ -424,8 +424,10 @@ void AvatarMixer::broadcastAvatarData() { //qDebug() << "about to write data for:" << otherNode->getUUID(); quint64 lastEncodeForOther = nodeData->getLastOtherAvatarEncodeTime(otherNode->getUUID()); - qDebug() << "about to write data for:" << otherNode->getUUID() << "last encoded at:" << lastEncodeForOther; - numAvatarDataBytes += avatarPacketList->write(otherAvatar.toByteArray(detail, lastEncodeForOther)); + //qDebug() << "about to call toByteArray() for:" << otherNode->getUUID() << "last encoded at:" << lastEncodeForOther; + auto bytes = otherAvatar.toByteArray(detail, lastEncodeForOther); + //qDebug() << "about to call avatarPacketList->write() for:" << otherNode->getUUID() << " bytes:" << bytes.size(); + numAvatarDataBytes += avatarPacketList->write(bytes); avatarPacketList->endSegment(); }); diff --git a/assignment-client/src/avatars/AvatarMixerClientData.cpp b/assignment-client/src/avatars/AvatarMixerClientData.cpp index c65703b8e6..5732f63eb6 100644 --- a/assignment-client/src/avatars/AvatarMixerClientData.cpp +++ b/assignment-client/src/avatars/AvatarMixerClientData.cpp @@ -21,6 +21,7 @@ int AvatarMixerClientData::parseData(ReceivedMessage& message) { message.readPrimitive(&_lastReceivedSequenceNumber); // compute the offset to the data payload + //qDebug() << __FUNCTION__ "about to call parseDataFromBuffer() for:" << getNodeID(); return _avatar->parseDataFromBuffer(message.readWithoutCopy(message.getBytesLeftToRead())); } diff --git a/libraries/avatars/src/AvatarData.cpp b/libraries/avatars/src/AvatarData.cpp index b5430ea808..b05a733874 100644 --- a/libraries/avatars/src/AvatarData.cpp +++ b/libraries/avatars/src/AvatarData.cpp @@ -131,8 +131,11 @@ float AvatarData::getTargetScale() const { } void AvatarData::setTargetScale(float targetScale) { - _targetScale = glm::clamp(targetScale, MIN_AVATAR_SCALE, MAX_AVATAR_SCALE); - _scaleChanged = usecTimestampNow(); + auto newValue = glm::clamp(targetScale, MIN_AVATAR_SCALE, MAX_AVATAR_SCALE); + if (_targetScale != newValue) { + _targetScale = newValue; + _scaleChanged = usecTimestampNow(); + } } void AvatarData::setTargetScaleVerbose(float targetScale) { @@ -181,7 +184,7 @@ bool AvatarData::sensorToWorldMatrixChangedSince(quint64 time) { } bool AvatarData::additionalFlagsChangedSince(quint64 time) { - return true; // FIXME! + return _additionalFlagsChanged >= time; } bool AvatarData::parentInfoChangedSince(quint64 time) { @@ -201,11 +204,13 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSent _lastToByteArray = usecTimestampNow(); } - bool cullSmallChanges = (dataDetail == CullSmallData); + // FIXME - the other "delta" sending seems to work ok, but this culling small data seems to cause + // problems in the sending of joint data... hand waving is awkward + bool cullSmallChanges = false; // (dataDetail == CullSmallData); bool sendAll = (dataDetail == SendAllData); bool sendMinimum = (dataDetail == MinimumData); - sendAll = true; + //sendAll = true; // FIXME -- hack-o-rama // TODO: DRY this up to a shared method // that can pack any type given the number of bytes @@ -284,7 +289,13 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSent bool hasFaceTrackerInfo = hasFaceTracker() && (sendAll || faceTrackerInfoChangedSince(lastSentTime)); bool hasJointData = sendAll || !sendMinimum; - //qDebug() << __FUNCTION__ << "sendAll:" << sendAll; + /* + qDebug() << __FUNCTION__ << "sendAll:" << sendAll + << "sendMinimum:" << sendMinimum + << "hasJointData:" << hasJointData + << "cullSmallChanges:" << cullSmallChanges; + */ + //qDebug() << "hasAvatarGlobalPosition:" << hasAvatarGlobalPosition; //qDebug() << "hasAvatarOrientation:" << hasAvatarOrientation; @@ -431,7 +442,7 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSent data->flags = flags; destinationBuffer += sizeof(AvatarDataPacket::AdditionalFlags); - ////qDebug() << "hasAdditionalFlags _keyState:" << _keyState; + //qDebug() << "hasAdditionalFlags _keyState:" << _keyState; //qDebug() << "hasAdditionalFlags _handState:" << _handState; //qDebug() << "hasAdditionalFlags _isFaceTrackerConnected:" << _headData->_isFaceTrackerConnected; //qDebug() << "hasAdditionalFlags _isEyeTrackerConnected:" << _headData->_isEyeTrackerConnected; @@ -476,7 +487,7 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSent int numJoints = _jointData.size(); *destinationBuffer++ = (uint8_t)numJoints; - qDebug() << "hasJointData numJoints:" << numJoints; + //qDebug() << "hasJointData numJoints:" << numJoints; unsigned char* validityPosition = destinationBuffer; unsigned char validity = 0; @@ -688,10 +699,7 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) { memcpy(&packetStateFlags, sourceBuffer, sizeof(packetStateFlags)); sourceBuffer += sizeof(packetStateFlags); - //qDebug() << __FUNCTION__ << "packetStateFlags:" << packetStateFlags; - //qDebug() << "buffer size:" << buffer.size(); - - + //qDebug() << __FUNCTION__ << "packetStateFlags:" << packetStateFlags << "buffer size:" << buffer.size(); #define HAS_FLAG(B,F) ((B & F) == F) @@ -714,8 +722,11 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) { if (hasAvatarGlobalPosition) { PACKET_READ_CHECK(AvatarGlobalPosition, sizeof(AvatarDataPacket::AvatarGlobalPosition)); auto data = reinterpret_cast<const AvatarDataPacket::AvatarGlobalPosition*>(sourceBuffer); - _globalPosition = glm::vec3(data->globalPosition[0], data->globalPosition[1], data->globalPosition[2]); - _globalPositionChanged = usecTimestampNow(); + auto newValue = glm::vec3(data->globalPosition[0], data->globalPosition[1], data->globalPosition[2]); + if (_globalPosition != newValue) { + _globalPosition = newValue; + _globalPositionChanged = usecTimestampNow(); + } sourceBuffer += sizeof(AvatarDataPacket::AvatarGlobalPosition); //qDebug() << "hasAvatarGlobalPosition _globalPosition:" << _globalPosition; } @@ -738,10 +749,12 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) { if (hasAvatarDimensions) { PACKET_READ_CHECK(AvatarDimensions, sizeof(AvatarDataPacket::AvatarDimensions)); auto data = reinterpret_cast<const AvatarDataPacket::AvatarDimensions*>(sourceBuffer); - + auto newValue = glm::vec3(data->avatarDimensions[0], data->avatarDimensions[1], data->avatarDimensions[2]); // FIXME - this is suspicious looking! - _globalBoundingBoxCorner = glm::vec3(data->avatarDimensions[0], data->avatarDimensions[1], data->avatarDimensions[2]); - _avatarDimensionsChanged = usecTimestampNow(); + if (_globalBoundingBoxCorner != newValue) { + _globalBoundingBoxCorner = newValue; + _avatarDimensionsChanged = usecTimestampNow(); + } sourceBuffer += sizeof(AvatarDataPacket::AvatarDimensions); //qDebug() << "hasAvatarDimensions _globalBoundingBoxCorner:" << _globalBoundingBoxCorner; } @@ -828,19 +841,22 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) { unpackFloatScalarFromSignedTwoByteFixed((int16_t*)&data->sensorToWorldScale, &sensorToWorldScale, SENSOR_TO_WORLD_SCALE_RADIX); glm::vec3 sensorToWorldTrans(data->sensorToWorldTrans[0], data->sensorToWorldTrans[1], data->sensorToWorldTrans[2]); glm::mat4 sensorToWorldMatrix = createMatFromScaleQuatAndPos(glm::vec3(sensorToWorldScale), sensorToWorldQuat, sensorToWorldTrans); - _sensorToWorldMatrixCache.set(sensorToWorldMatrix); - _sensorToWorldMatrixChanged = usecTimestampNow(); + if (_sensorToWorldMatrixCache.get() != sensorToWorldMatrix) { + _sensorToWorldMatrixCache.set(sensorToWorldMatrix); + _sensorToWorldMatrixChanged = usecTimestampNow(); + } sourceBuffer += sizeof(AvatarDataPacket::SensorToWorldMatrix); //qDebug() << "hasSensorToWorldMatrix sensorToWorldMatrix:" << sensorToWorldMatrix; } if (hasAdditionalFlags) { + //qDebug() << "hasAdditionalFlags..."; PACKET_READ_CHECK(AdditionalFlags, sizeof(AvatarDataPacket::AdditionalFlags)); auto data = reinterpret_cast<const AvatarDataPacket::AdditionalFlags*>(sourceBuffer); uint8_t bitItems = data->flags; // key state, stored as a semi-nibble in the bitItems - _keyState = (KeyState)getSemiNibbleAt(bitItems, KEY_STATE_START_BIT); + auto newKeyState = (KeyState)getSemiNibbleAt(bitItems, KEY_STATE_START_BIT); // hand state, stored as a semi-nibble plus a bit in the bitItems // we store the hand state as well as other items in a shared bitset. The hand state is an octal, but is split @@ -849,22 +865,28 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) { // |x,x|H0,H1|x,x,x|H2| // +---+-----+-----+--+ // Hand state - H0,H1,H2 is found in the 3rd, 4th, and 8th bits - _handState = getSemiNibbleAt(bitItems, HAND_STATE_START_BIT) + auto newHandState = getSemiNibbleAt(bitItems, HAND_STATE_START_BIT) + (oneAtBit(bitItems, HAND_STATE_FINGER_POINTING_BIT) ? IS_FINGER_POINTING_FLAG : 0); + auto newFaceTrackerConnected = oneAtBit(bitItems, IS_FACESHIFT_CONNECTED); + auto newEyeTrackerConnected = oneAtBit(bitItems, IS_EYE_TRACKER_CONNECTED); - _headData->_isFaceTrackerConnected = oneAtBit(bitItems, IS_FACESHIFT_CONNECTED); - _headData->_isEyeTrackerConnected = oneAtBit(bitItems, IS_EYE_TRACKER_CONNECTED); + bool keyStateChanged = (_keyState != newKeyState); + bool handStateChanged = (_handState != newHandState); + bool faceStateChanged = (_headData->_isFaceTrackerConnected != newFaceTrackerConnected); + bool eyeStateChanged = (_headData->_isEyeTrackerConnected != newEyeTrackerConnected); + bool somethingChanged = keyStateChanged || handStateChanged || faceStateChanged || eyeStateChanged; - //qDebug() << "hasAdditionalFlags _keyState:" << _keyState; - //qDebug() << "hasAdditionalFlags _handState:" << _handState; - //qDebug() << "hasAdditionalFlags _isFaceTrackerConnected:" << _headData->_isFaceTrackerConnected; - //qDebug() << "hasAdditionalFlags _isEyeTrackerConnected:" << _headData->_isEyeTrackerConnected; + _keyState = newKeyState; + _handState = newHandState; + _headData->_isFaceTrackerConnected = newFaceTrackerConnected; + _headData->_isEyeTrackerConnected = newEyeTrackerConnected; - //qDebug() << "hasAdditionalFlags bitItems:" << bitItems; sourceBuffer += sizeof(AvatarDataPacket::AdditionalFlags); - _additionalFlagsChanged = usecTimestampNow(); + if (somethingChanged) { + _additionalFlagsChanged = usecTimestampNow(); + } } // FIXME -- make sure to handle the existance of a parent vs a change in the parent... @@ -875,10 +897,14 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) { sourceBuffer += sizeof(AvatarDataPacket::ParentInfo); QByteArray byteArray((const char*)parentInfo->parentUUID, NUM_BYTES_RFC4122_UUID); - _parentID = QUuid::fromRfc4122(byteArray); - _parentJointIndex = parentInfo->parentJointIndex; - //qDebug() << "hasParentInfo _parentID:" << _parentID; - _parentChanged = usecTimestampNow(); + + auto newParentID = QUuid::fromRfc4122(byteArray); + + if ((_parentID != newParentID) || (_parentJointIndex = parentInfo->parentJointIndex)) { + _parentID = newParentID; + _parentJointIndex = parentInfo->parentJointIndex; + _parentChanged = usecTimestampNow(); + } } else { // FIXME - this aint totally right, for switching to parent/no-parent @@ -907,7 +933,7 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) { if (hasJointData) { PACKET_READ_CHECK(NumJoints, sizeof(uint8_t)); int numJoints = *sourceBuffer++; - //qDebug() << "....hasJointData numJoints:" << numJoints; + //qDebug() << __FUNCTION__ << "....hasJointData numJoints:" << numJoints; const int bytesOfValidity = (int)ceil((float)numJoints / (float)BITS_IN_BYTE); PACKET_READ_CHECK(JointRotationValidityBits, bytesOfValidity); @@ -981,7 +1007,7 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) { } } -#ifdef WANT_DEBUG +#if 0 //def WANT_DEBUG if (numValidJointRotations > 15) { qCDebug(avatars) << "RECEIVING -- rotations:" << numValidJointRotations << "translations:" << numValidJointTranslations diff --git a/libraries/avatars/src/AvatarData.h b/libraries/avatars/src/AvatarData.h index 3ebe196ce0..e1f39006f3 100644 --- a/libraries/avatars/src/AvatarData.h +++ b/libraries/avatars/src/AvatarData.h @@ -116,6 +116,11 @@ const char AVATARDATA_FLAGS_MINIMUM = 0; using smallFloat = uint16_t; // a compressed float with less precision, user defined radix namespace AvatarDataPacket { + + // NOTE: every time AvatarData is sent from mixer to client, it also includes the GUIID for the session + // this is 16bytes of data at 45hz that's 5.76kbps + // it might be nice to use a dictionary to compress that + // Packet State Flags - we store the details about the existence of other records in this bitset: // AvatarGlobalPosition, Avatar Faceshift, eye tracking, and existence of using HasFlags = uint16_t; @@ -686,8 +691,6 @@ protected: quint64 _globalPositionChanged { 0 }; quint64 _avatarDimensionsChanged { 0 }; quint64 _avatarScaleChanged { 0 }; - quint64 _lookAtChanged { 0 }; - quint64 _audioLoudnessChanged { 0 }; quint64 _sensorToWorldMatrixChanged { 0 }; quint64 _additionalFlagsChanged { 0 }; quint64 _parentChanged { 0 }; From 785582057e70f167c33ee568904f2851abca1430 Mon Sep 17 00:00:00 2001 From: Brad Hefta-Gaub <brad@highfidelity.io> Date: Sun, 8 Jan 2017 20:35:52 -0800 Subject: [PATCH 13/43] more comments --- libraries/avatars/src/AvatarData.cpp | 43 +++++++++++----------------- 1 file changed, 17 insertions(+), 26 deletions(-) diff --git a/libraries/avatars/src/AvatarData.cpp b/libraries/avatars/src/AvatarData.cpp index b05a733874..e5587fca57 100644 --- a/libraries/avatars/src/AvatarData.cpp +++ b/libraries/avatars/src/AvatarData.cpp @@ -220,7 +220,6 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSent QByteArray avatarDataByteArray(udt::MAX_PACKET_SIZE, 0); unsigned char* destinationBuffer = reinterpret_cast<unsigned char*>(avatarDataByteArray.data()); unsigned char* startPosition = destinationBuffer; - //unsigned char* packetStateFlagsAt = startPosition; // psuedo code.... // - determine which sections will be included @@ -228,6 +227,9 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSent // - include each section in order // FIXME - things to consider + // + // - cullSmallChanges is broken... needs to be repaired... <<<<<<<<<<<<<<< top issue + // // - how to dry up this code? // // - the sections below are basically little repeats of each other, where they @@ -239,41 +241,30 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSent // - also, we could determine the "hasXXX" flags in the little sections, // and then set the actual flag values AFTER the rest are done... // - // - this toByteArray() side-effects the AvatarData, is that safe? in particular - // is it possible we'll call toByteArray() and then NOT actually use the result? + // FIXME - + // + // BUG -- if you enter a space bubble, and then back away, the avatar has wrong orientation until "send all" happens... + // this is an iFrame issue... what to do about that? + // + // // TODO - // typical -- 1jd 0ft 0p 1af 1stw 0loud 1look 0s 0o 1d 1lp 1gp // - // 1) make the dimensions really be dimensions instead of corner - 12bytes - 4.3kbps - // 2) determine if local position really only matters for parent - 12bytes - 4.3kbps - // 3) AdditionalFlags - only send if changed - 1byte - 0.36 kpbs - // 4) SensorToWorld - should we only send this for avatars with attachments?? - 20bytes - 7.2kbps + // 1) make the dimensions really be dimensions instead of corner - 12 bytes - 4.32 kbps (when moving) + // 2) determine if local position really only matters for parent - 12 bytes - 4.32 kbps (when moving and/or not parented) + // 3) SensorToWorld - should we only send this for avatars with attachments?? - 20 bytes - 7.20 kbps + // 4) AudioLoudness - use Ken's 8bit encoding - 1 byte - 0.36 kpbs (when speaking) + // 5) GUIID for the session change to 2byte index (savings) - 14 bytes - 5.04 kbps // - // ----- Subtotal -- non-joint savings --- 16.2kbps --- ~12% savings? + // ----- Subtotal -- non-joint savings --- ~21.2 kbps --- ~12.8% savings? // + // 5) Joints... use more aggressive quantization and/or culling for more distance between avatars + // // Joints -- // 63 rotations * 6 bytes = 136kbps // 3 translations * 6 bytes = 6.48kbps // - // FIXME - // - if you enter a space bubble, and then back away, the avatar has wrong orientation until "send all" happens... - // this is an iFrame issue... what to do about that? - // - // - probably - if the avatar was out of view, then came in view, it would also not correctly do an iFrame - // - // - in the AvatarMixer, there's a single AvatarData per connected avatar, that means that this - // "last sent" strategy, actually won't work, because the serialization of the byte array will - // iterate through a bunch of avatars in a loop, the first one will get the full data, then - // the others will be partial. - // we need some way of keeping track of what was sent the last time. - - // AvatarDataRegulator - // .lastSent = time - // - // hasAvatarGlobalPosition = (globalPositionChanged > lastSent) - // hasAvatarLocalPosition = (localPositionChanged > lastSent) - // ... bool hasAvatarGlobalPosition = true; // always include global position bool hasAvatarLocalPosition = sendAll || tranlationChangedSince(lastSentTime); From 13198fd949560710cbb2ff0d156e02c2c5b20e7e Mon Sep 17 00:00:00 2001 From: ZappoMan <brad@highfidelity.io> Date: Mon, 9 Jan 2017 13:57:59 -0800 Subject: [PATCH 14/43] add datarate --- interface/src/avatar/AvatarManager.cpp | 5 ++ interface/src/avatar/AvatarManager.h | 1 + libraries/avatars/src/AvatarData.cpp | 59 +++++++++++++++++-- libraries/avatars/src/AvatarData.h | 81 ++++++-------------------- 4 files changed, 79 insertions(+), 67 deletions(-) diff --git a/interface/src/avatar/AvatarManager.cpp b/interface/src/avatar/AvatarManager.cpp index 1f5726acba..58028d05a3 100644 --- a/interface/src/avatar/AvatarManager.cpp +++ b/interface/src/avatar/AvatarManager.cpp @@ -132,6 +132,11 @@ void AvatarManager::updateMyAvatar(float deltaTime) { Q_LOGGING_CATEGORY(trace_simulation_avatar, "trace.simulation.avatar"); +float AvatarManager::getAvatarDataRate(const QUuid& sessionID, const QString& rateName) { + auto avatar = getAvatarBySessionID(sessionID); + return avatar->getDataRate(rateName); +} + void AvatarManager::updateOtherAvatars(float deltaTime) { // lock the hash for read to check the size QReadLocker lock(&_hashLock); diff --git a/interface/src/avatar/AvatarManager.h b/interface/src/avatar/AvatarManager.h index a423e34f8f..193fa35ec5 100644 --- a/interface/src/avatar/AvatarManager.h +++ b/interface/src/avatar/AvatarManager.h @@ -71,6 +71,7 @@ public: void addAvatarToSimulation(Avatar* avatar); + Q_INVOKABLE float getAvatarDataRate(const QUuid& sessionID, const QString& rateName = QString("")); Q_INVOKABLE RayToAvatarIntersectionResult findRayIntersection(const PickRay& ray, const QScriptValue& avatarIdsToInclude = QScriptValue(), const QScriptValue& avatarIdsToDiscard = QScriptValue()); diff --git a/libraries/avatars/src/AvatarData.cpp b/libraries/avatars/src/AvatarData.cpp index e5587fca57..ec725609da 100644 --- a/libraries/avatars/src/AvatarData.cpp +++ b/libraries/avatars/src/AvatarData.cpp @@ -89,10 +89,6 @@ AvatarData::AvatarData() : ASSERT(sizeof(AvatarDataPacket::AdditionalFlags) == AvatarDataPacket::ADDITIONAL_FLAGS_SIZE); ASSERT(sizeof(AvatarDataPacket::ParentInfo) == AvatarDataPacket::PARENT_INFO_SIZE); ASSERT(sizeof(AvatarDataPacket::FaceTrackerInfo) == AvatarDataPacket::FACE_TRACKER_INFO_SIZE); - - // Old format... - ASSERT(sizeof(AvatarDataPacket::AvatarInfo) == AvatarDataPacket::AVATAR_INFO_SIZE); - } AvatarData::~AvatarData() { @@ -682,6 +678,8 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) { AvatarDataPacket::HasFlags packetStateFlags; + _parseBufferRate.increment(); + const unsigned char* startPosition = reinterpret_cast<const unsigned char*>(buffer.data()); const unsigned char* endPosition = startPosition + buffer.size(); const unsigned char* sourceBuffer = startPosition; @@ -720,6 +718,8 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) { } sourceBuffer += sizeof(AvatarDataPacket::AvatarGlobalPosition); //qDebug() << "hasAvatarGlobalPosition _globalPosition:" << _globalPosition; + + _globalPositionRate.increment(); } if (hasAvatarLocalPosition) { @@ -735,6 +735,8 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) { setLocalPosition(position); sourceBuffer += sizeof(AvatarDataPacket::AvatarLocalPosition); //qDebug() << "hasAvatarLocalPosition position:" << position; + + _localPositionRate.increment(); } if (hasAvatarDimensions) { @@ -748,6 +750,8 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) { } sourceBuffer += sizeof(AvatarDataPacket::AvatarDimensions); //qDebug() << "hasAvatarDimensions _globalBoundingBoxCorner:" << _globalBoundingBoxCorner; + + _avatarDimensionRate.increment(); } if (hasAvatarOrientation) { @@ -773,6 +777,8 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) { } sourceBuffer += sizeof(AvatarDataPacket::AvatarOrientation); //qDebug() << "hasAvatarOrientation newOrientation:" << newOrientation; + + _avatarOrientationRate.increment(); } if (hasAvatarScale) { @@ -789,6 +795,8 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) { setTargetScale(scale); sourceBuffer += sizeof(AvatarDataPacket::AvatarScale); //qDebug() << "hasAvatarOrientation scale:" << scale; + + _avatarScaleRate.increment(); } if (hasLookAtPosition) { @@ -804,6 +812,8 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) { _headData->setLookAtPosition(lookAt); sourceBuffer += sizeof(AvatarDataPacket::LookAtPosition); //qDebug() << "hasLookAtPosition lookAt:" << lookAt; + + _lookAtPositionRate.increment(); } if (hasAudioLoudness) { @@ -821,6 +831,8 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) { _headData->setAudioLoudness(audioLoudness); sourceBuffer += sizeof(AvatarDataPacket::AudioLoudness); //qDebug() << "hasAudioLoudness audioLoudness:" << audioLoudness; + + _audioLoudnessRate.increment(); } if (hasSensorToWorldMatrix) { @@ -838,6 +850,8 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) { } sourceBuffer += sizeof(AvatarDataPacket::SensorToWorldMatrix); //qDebug() << "hasSensorToWorldMatrix sensorToWorldMatrix:" << sensorToWorldMatrix; + + _sensorToWorldRate.increment(); } if (hasAdditionalFlags) { @@ -878,6 +892,7 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) { if (somethingChanged) { _additionalFlagsChanged = usecTimestampNow(); } + _additionalFlagsRate.increment(); } // FIXME -- make sure to handle the existance of a parent vs a change in the parent... @@ -897,6 +912,7 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) { _parentChanged = usecTimestampNow(); } + _parentInfoRate.increment(); } else { // FIXME - this aint totally right, for switching to parent/no-parent _parentID = QUuid(); @@ -919,6 +935,8 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) { memcpy(_headData->_blendshapeCoefficients.data(), sourceBuffer, coefficientsSize); sourceBuffer += coefficientsSize; //qDebug() << "hasFaceTrackerInfo numCoefficients:" << numCoefficients; + + _faceTrackerRate.increment(); } if (hasJointData) { @@ -1011,6 +1029,7 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) { //qDebug() << "hasJointData numValidJointRotations:" << numValidJointRotations << "numValidJointTranslations:" << numValidJointTranslations; + _jointDataRate.increment(); } int numBytesRead = sourceBuffer - startPosition; @@ -1018,6 +1037,38 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) { return numBytesRead; } +float AvatarData::getDataRate(const QString& rateName) { + if (rateName == "") { + return _parseBufferRate.rate(); + } else if (rateName == "globalPosition") { + return _globalPositionRate.rate(); + } else if (rateName == "localPosition") { + return _localPositionRate.rate(); + } else if (rateName == "avatarDimensions") { + return _avatarDimensionRate.rate(); + } else if (rateName == "avatarOrientation") { + return _avatarOrientationRate.rate(); + } else if (rateName == "avatarScale") { + return _avatarScaleRate.rate(); + } else if (rateName == "lookAtPosition") { + return _lookAtPositionRate.rate(); + } else if (rateName == "audioLoudness") { + return _audioLoudnessRate.rate(); + } else if (rateName == "sensorToWorkMatrix") { + return _sensorToWorldRate.rate(); + } else if (rateName == "additionalFlags") { + return _additionalFlagsRate.rate(); + } else if (rateName == "parentInfo") { + return _parentInfoRate.rate(); + } else if (rateName == "faceTracker") { + return _faceTrackerRate.rate(); + } else if (rateName == "jointData") { + return _jointDataRate.rate(); + } + return 0.0f; +} + + int AvatarData::getAverageBytesReceivedPerSecond() const { return lrint(_averageBytesReceived.getAverageSampleValuePerSecond()); } diff --git a/libraries/avatars/src/AvatarData.h b/libraries/avatars/src/AvatarData.h index e1f39006f3..bb6135dc7e 100644 --- a/libraries/avatars/src/AvatarData.h +++ b/libraries/avatars/src/AvatarData.h @@ -56,6 +56,7 @@ typedef unsigned long long quint64; #include <Packed.h> #include <ThreadSafeValueCache.h> #include <SharedUtil.h> +#include <shared/RateCounter.h> #include "AABox.h" #include "HeadData.h" @@ -250,66 +251,6 @@ namespace AvatarDataPacket { SixByteTrans translation[numValidTranslations]; // encodeded and compressed by packFloatVec3ToSignedTwoByteFixed() }; */ - - // OLD FORMAT.... - PACKED_BEGIN struct AvatarInfo { - // FIXME - this has 8 unqiue items, we could use a simple header byte to indicate whether or not the fields - // exist in the packet and have changed since last being sent. - float globalPosition[3]; // avatar's position - // FIXME - possible savings: - // a) could be encoded as relative to last known position, most movements - // will be withing a smaller radix - // b) would still need an intermittent absolute value. - - float position[3]; // skeletal model's position - // FIXME - this used to account for a registration offset from the avatar's position - // to the position of the skeletal model/mesh. This relative offset doesn't change from - // frame to frame, instead only changes when the model changes, it could be moved to the - // identity packet and/or only included when it changes. - // if it's encoded relative to the globalPosition, it could be reduced to a smaller radix - // - // POTENTIAL SAVINGS - 12 bytes - - float globalBoundingBoxCorner[3]; // global position of the lowest corner of the avatar's bounding box - // FIXME - this would change less frequently if it was the dimensions of the bounding box - // instead of the corner. - // - // POTENTIAL SAVINGS - 12 bytes - - uint16_t localOrientation[3]; // avatar's local euler angles (degrees, compressed) relative to the thing it's attached to - uint16_t scale; // (compressed) 'ratio' encoding uses sign bit as flag. - // FIXME - this doesn't change every frame - // - // POTENTIAL SAVINGS - 2 bytes - - float lookAtPosition[3]; // world space position that eyes are focusing on. - // FIXME - unless the person has an eye tracker, this is simulated... - // a) maybe we can just have the client calculate this - // b) at distance this will be hard to discern and can likely be - // descimated or dropped completely - // - // POTENTIAL SAVINGS - 12 bytes - - uint16_t audioLoudness; // current loundess of microphone - // FIXME - - // a) this could probably be decimated with a smaller radix <<< DONE - // b) this doesn't change every frame - // - // POTENTIAL SAVINGS - 4-2 bytes - - // FIXME - these 20 bytes are only used by viewers if my avatar has "attachments" - // we could save these bytes if no attachments are active. - // - // POTENTIAL SAVINGS - 20 bytes - - uint8_t sensorToWorldQuat[6]; // 6 byte compressed quaternion part of sensor to world matrix - uint16_t sensorToWorldScale; // uniform scale of sensor to world matrix - float sensorToWorldTrans[3]; // fourth column of sensor to world matrix - // FIXME - sensorToWorldTrans might be able to be better compressed if it was - // relative to the avatar position. - uint8_t flags; - } PACKED_END; - const size_t AVATAR_INFO_SIZE = 79; } static const float MAX_AVATAR_SCALE = 1000.0f; @@ -594,6 +535,8 @@ public: Q_INVOKABLE glm::mat4 getControllerLeftHandMatrix() const; Q_INVOKABLE glm::mat4 getControllerRightHandMatrix() const; + float getDataRate(const QString& rateName = QString("")); + public slots: void sendAvatarDataPacket(); void sendIdentityPacket(); @@ -696,7 +639,21 @@ protected: quint64 _parentChanged { 0 }; quint64 _lastToByteArray { 0 }; // tracks the last time we did a toByteArray - + + // Some rate data for incoming data + RateCounter<> _parseBufferRate; + RateCounter<> _globalPositionRate; + RateCounter<> _localPositionRate; + RateCounter<> _avatarDimensionRate; + RateCounter<> _avatarOrientationRate; + RateCounter<> _avatarScaleRate; + RateCounter<> _lookAtPositionRate; + RateCounter<> _audioLoudnessRate; + RateCounter<> _sensorToWorldRate; + RateCounter<> _additionalFlagsRate; + RateCounter<> _parentInfoRate; + RateCounter<> _faceTrackerRate; + RateCounter<> _jointDataRate; glm::vec3 _globalBoundingBoxCorner; @@ -713,8 +670,6 @@ protected: int getFauxJointIndex(const QString& name) const; - AvatarDataPacket::AvatarInfo _lastAvatarInfo; - private: friend void avatarStateFromFrame(const QByteArray& frameData, AvatarData* _avatar); static QUrl _defaultFullAvatarModelUrl; From eadb1758f9f3e8052f6ccfb9acf9c6d78e1c38b4 Mon Sep 17 00:00:00 2001 From: ZappoMan <brad@highfidelity.io> Date: Mon, 9 Jan 2017 17:07:47 -0800 Subject: [PATCH 15/43] adding some rate debugging --- libraries/avatars/src/AvatarData.cpp | 91 +++++++--- .../developer/debugging/debugAvatarMixer.js | 168 ++++++++++++++++++ 2 files changed, 231 insertions(+), 28 deletions(-) create mode 100644 scripts/developer/debugging/debugAvatarMixer.js diff --git a/libraries/avatars/src/AvatarData.cpp b/libraries/avatars/src/AvatarData.cpp index ec725609da..26a750f0c1 100644 --- a/libraries/avatars/src/AvatarData.cpp +++ b/libraries/avatars/src/AvatarData.cpp @@ -678,8 +678,6 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) { AvatarDataPacket::HasFlags packetStateFlags; - _parseBufferRate.increment(); - const unsigned char* startPosition = reinterpret_cast<const unsigned char*>(buffer.data()); const unsigned char* endPosition = startPosition + buffer.size(); const unsigned char* sourceBuffer = startPosition; @@ -709,6 +707,8 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) { quint64 now = usecTimestampNow(); if (hasAvatarGlobalPosition) { + auto startSection = sourceBuffer; + PACKET_READ_CHECK(AvatarGlobalPosition, sizeof(AvatarDataPacket::AvatarGlobalPosition)); auto data = reinterpret_cast<const AvatarDataPacket::AvatarGlobalPosition*>(sourceBuffer); auto newValue = glm::vec3(data->globalPosition[0], data->globalPosition[1], data->globalPosition[2]); @@ -719,10 +719,13 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) { sourceBuffer += sizeof(AvatarDataPacket::AvatarGlobalPosition); //qDebug() << "hasAvatarGlobalPosition _globalPosition:" << _globalPosition; - _globalPositionRate.increment(); + int numBytesRead = sourceBuffer - startSection; + _globalPositionRate.increment(numBytesRead); } if (hasAvatarLocalPosition) { + auto startSection = sourceBuffer; + PACKET_READ_CHECK(AvatarLocalPosition, sizeof(AvatarDataPacket::AvatarLocalPosition)); auto data = reinterpret_cast<const AvatarDataPacket::AvatarLocalPosition*>(sourceBuffer); glm::vec3 position = glm::vec3(data->localPosition[0], data->localPosition[1], data->localPosition[2]); @@ -736,10 +739,13 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) { sourceBuffer += sizeof(AvatarDataPacket::AvatarLocalPosition); //qDebug() << "hasAvatarLocalPosition position:" << position; - _localPositionRate.increment(); + int numBytesRead = sourceBuffer - startSection; + _localPositionRate.increment(numBytesRead); } if (hasAvatarDimensions) { + auto startSection = sourceBuffer; + PACKET_READ_CHECK(AvatarDimensions, sizeof(AvatarDataPacket::AvatarDimensions)); auto data = reinterpret_cast<const AvatarDataPacket::AvatarDimensions*>(sourceBuffer); auto newValue = glm::vec3(data->avatarDimensions[0], data->avatarDimensions[1], data->avatarDimensions[2]); @@ -751,10 +757,13 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) { sourceBuffer += sizeof(AvatarDataPacket::AvatarDimensions); //qDebug() << "hasAvatarDimensions _globalBoundingBoxCorner:" << _globalBoundingBoxCorner; - _avatarDimensionRate.increment(); + int numBytesRead = sourceBuffer - startSection; + _avatarDimensionRate.increment(numBytesRead); } if (hasAvatarOrientation) { + auto startSection = sourceBuffer; + PACKET_READ_CHECK(AvatarOrientation, sizeof(AvatarDataPacket::AvatarOrientation)); auto data = reinterpret_cast<const AvatarDataPacket::AvatarOrientation*>(sourceBuffer); float pitch, yaw, roll; @@ -778,10 +787,13 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) { sourceBuffer += sizeof(AvatarDataPacket::AvatarOrientation); //qDebug() << "hasAvatarOrientation newOrientation:" << newOrientation; - _avatarOrientationRate.increment(); + int numBytesRead = sourceBuffer - startSection; + _avatarOrientationRate.increment(numBytesRead); } if (hasAvatarScale) { + auto startSection = sourceBuffer; + PACKET_READ_CHECK(AvatarScale, sizeof(AvatarDataPacket::AvatarScale)); auto data = reinterpret_cast<const AvatarDataPacket::AvatarScale*>(sourceBuffer); float scale; @@ -796,10 +808,13 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) { sourceBuffer += sizeof(AvatarDataPacket::AvatarScale); //qDebug() << "hasAvatarOrientation scale:" << scale; - _avatarScaleRate.increment(); + int numBytesRead = sourceBuffer - startSection; + _avatarScaleRate.increment(numBytesRead); } if (hasLookAtPosition) { + auto startSection = sourceBuffer; + PACKET_READ_CHECK(LookAtPosition, sizeof(AvatarDataPacket::LookAtPosition)); auto data = reinterpret_cast<const AvatarDataPacket::LookAtPosition*>(sourceBuffer); glm::vec3 lookAt = glm::vec3(data->lookAtPosition[0], data->lookAtPosition[1], data->lookAtPosition[2]); @@ -813,10 +828,13 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) { sourceBuffer += sizeof(AvatarDataPacket::LookAtPosition); //qDebug() << "hasLookAtPosition lookAt:" << lookAt; - _lookAtPositionRate.increment(); + int numBytesRead = sourceBuffer - startSection; + _lookAtPositionRate.increment(numBytesRead); } if (hasAudioLoudness) { + auto startSection = sourceBuffer; + PACKET_READ_CHECK(AudioLoudness, sizeof(AvatarDataPacket::AudioLoudness)); auto data = reinterpret_cast<const AvatarDataPacket::AudioLoudness*>(sourceBuffer); float audioLoudness; @@ -832,10 +850,13 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) { sourceBuffer += sizeof(AvatarDataPacket::AudioLoudness); //qDebug() << "hasAudioLoudness audioLoudness:" << audioLoudness; - _audioLoudnessRate.increment(); + int numBytesRead = sourceBuffer - startSection; + _audioLoudnessRate.increment(numBytesRead); } if (hasSensorToWorldMatrix) { + auto startSection = sourceBuffer; + PACKET_READ_CHECK(SensorToWorldMatrix, sizeof(AvatarDataPacket::SensorToWorldMatrix)); auto data = reinterpret_cast<const AvatarDataPacket::SensorToWorldMatrix*>(sourceBuffer); glm::quat sensorToWorldQuat; @@ -851,11 +872,13 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) { sourceBuffer += sizeof(AvatarDataPacket::SensorToWorldMatrix); //qDebug() << "hasSensorToWorldMatrix sensorToWorldMatrix:" << sensorToWorldMatrix; - _sensorToWorldRate.increment(); + int numBytesRead = sourceBuffer - startSection; + _sensorToWorldRate.increment(numBytesRead); } if (hasAdditionalFlags) { - //qDebug() << "hasAdditionalFlags..."; + auto startSection = sourceBuffer; + PACKET_READ_CHECK(AdditionalFlags, sizeof(AvatarDataPacket::AdditionalFlags)); auto data = reinterpret_cast<const AvatarDataPacket::AdditionalFlags*>(sourceBuffer); uint8_t bitItems = data->flags; @@ -892,12 +915,14 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) { if (somethingChanged) { _additionalFlagsChanged = usecTimestampNow(); } - _additionalFlagsRate.increment(); + int numBytesRead = sourceBuffer - startSection; + _additionalFlagsRate.increment(numBytesRead); } // FIXME -- make sure to handle the existance of a parent vs a change in the parent... //bool hasReferential = oneAtBit(bitItems, HAS_REFERENTIAL); if (hasParentInfo) { + auto startSection = sourceBuffer; PACKET_READ_CHECK(ParentInfo, sizeof(AvatarDataPacket::ParentInfo)); auto parentInfo = reinterpret_cast<const AvatarDataPacket::ParentInfo*>(sourceBuffer); sourceBuffer += sizeof(AvatarDataPacket::ParentInfo); @@ -912,13 +937,16 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) { _parentChanged = usecTimestampNow(); } - _parentInfoRate.increment(); + int numBytesRead = sourceBuffer - startSection; + _parentInfoRate.increment(numBytesRead); } else { // FIXME - this aint totally right, for switching to parent/no-parent _parentID = QUuid(); } if (hasFaceTrackerInfo) { + auto startSection = sourceBuffer; + PACKET_READ_CHECK(FaceTrackerInfo, sizeof(AvatarDataPacket::FaceTrackerInfo)); auto faceTrackerInfo = reinterpret_cast<const AvatarDataPacket::FaceTrackerInfo*>(sourceBuffer); sourceBuffer += sizeof(AvatarDataPacket::FaceTrackerInfo); @@ -936,10 +964,13 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) { sourceBuffer += coefficientsSize; //qDebug() << "hasFaceTrackerInfo numCoefficients:" << numCoefficients; - _faceTrackerRate.increment(); + int numBytesRead = sourceBuffer - startSection; + _faceTrackerRate.increment(numBytesRead); } if (hasJointData) { + auto startSection = sourceBuffer; + PACKET_READ_CHECK(NumJoints, sizeof(uint8_t)); int numJoints = *sourceBuffer++; //qDebug() << __FUNCTION__ << "....hasJointData numJoints:" << numJoints; @@ -1029,41 +1060,45 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) { //qDebug() << "hasJointData numValidJointRotations:" << numValidJointRotations << "numValidJointTranslations:" << numValidJointTranslations; - _jointDataRate.increment(); + int numBytesRead = sourceBuffer - startSection; + _jointDataRate.increment(numBytesRead); } int numBytesRead = sourceBuffer - startPosition; _averageBytesReceived.updateAverage(numBytesRead); + + _parseBufferRate.increment(numBytesRead); + return numBytesRead; } float AvatarData::getDataRate(const QString& rateName) { if (rateName == "") { - return _parseBufferRate.rate(); + return _parseBufferRate.rate() / BYTES_PER_KILOBIT; } else if (rateName == "globalPosition") { - return _globalPositionRate.rate(); + return _globalPositionRate.rate() / BYTES_PER_KILOBIT; } else if (rateName == "localPosition") { - return _localPositionRate.rate(); + return _localPositionRate.rate() / BYTES_PER_KILOBIT; } else if (rateName == "avatarDimensions") { - return _avatarDimensionRate.rate(); + return _avatarDimensionRate.rate() / BYTES_PER_KILOBIT; } else if (rateName == "avatarOrientation") { - return _avatarOrientationRate.rate(); + return _avatarOrientationRate.rate() / BYTES_PER_KILOBIT; } else if (rateName == "avatarScale") { - return _avatarScaleRate.rate(); + return _avatarScaleRate.rate() / BYTES_PER_KILOBIT; } else if (rateName == "lookAtPosition") { - return _lookAtPositionRate.rate(); + return _lookAtPositionRate.rate() / BYTES_PER_KILOBIT; } else if (rateName == "audioLoudness") { - return _audioLoudnessRate.rate(); + return _audioLoudnessRate.rate() / BYTES_PER_KILOBIT; } else if (rateName == "sensorToWorkMatrix") { - return _sensorToWorldRate.rate(); + return _sensorToWorldRate.rate() / BYTES_PER_KILOBIT; } else if (rateName == "additionalFlags") { - return _additionalFlagsRate.rate(); + return _additionalFlagsRate.rate() / BYTES_PER_KILOBIT; } else if (rateName == "parentInfo") { - return _parentInfoRate.rate(); + return _parentInfoRate.rate() / BYTES_PER_KILOBIT; } else if (rateName == "faceTracker") { - return _faceTrackerRate.rate(); + return _faceTrackerRate.rate() / BYTES_PER_KILOBIT; } else if (rateName == "jointData") { - return _jointDataRate.rate(); + return _jointDataRate.rate() / BYTES_PER_KILOBIT; } return 0.0f; } diff --git a/scripts/developer/debugging/debugAvatarMixer.js b/scripts/developer/debugging/debugAvatarMixer.js new file mode 100644 index 0000000000..2e7901b962 --- /dev/null +++ b/scripts/developer/debugging/debugAvatarMixer.js @@ -0,0 +1,168 @@ +"use strict"; + +// +// debugAvatarMixer.js +// scripts/developer/debugging +// +// Created by Brad Hefta-Gaub on 01/09/2017 +// Copyright 2017 High Fidelity, Inc. +// +// Distributed under the Apache License, Version 2.0. +// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html +// +/* global Toolbars, Script, Users, Overlays, AvatarList, Controller, Camera, getControllerWorldLocation */ + + +(function() { // BEGIN LOCAL_SCOPE + +Script.include("/~/system/libraries/controllers.js"); + +// grab the toolbar +var toolbar = Toolbars.getToolbar("com.highfidelity.interface.toolbar.system"); + +var ASSETS_PATH = Script.resolvePath("assets"); +var TOOLS_PATH = Script.resolvePath("assets/images/tools/"); + +function buttonImageURL() { + return TOOLS_PATH + (Users.canKick ? 'kick.svg' : 'ignore.svg'); +} + +// setup the mod button and add it to the toolbar +var button = toolbar.addButton({ + objectName: 'debugAvatarMixer', + imageURL: buttonImageURL(), + visible: true, + buttonState: 1, + defaultState: 1, + hoverState: 3, + alpha: 0.9 +}); + +var isShowingOverlays = false; +var debugOverlays = {}; + +function removeOverlays() { + // enumerate the overlays and remove them + var overlayKeys = Object.keys(debugOverlays); + + for (var i = 0; i < overlayKeys.length; ++i) { + var avatarID = overlayKeys[i]; + for (var j = 0; j < debugOverlays[avatarID].length; ++j) { + Overlays.deleteOverlay(debugOverlays[avatarID][j]); + } + } + + debugOverlays = {}; +} + +// handle clicks on the toolbar button +function buttonClicked(){ + if (isShowingOverlays) { + removeOverlays(); + isShowingOverlays = false; + } else { + isShowingOverlays = true; + } + + button.writeProperty('buttonState', isShowingOverlays ? 0 : 1); + button.writeProperty('defaultState', isShowingOverlays ? 0 : 1); + button.writeProperty('hoverState', isShowingOverlays ? 2 : 3); +} + +button.clicked.connect(buttonClicked); + +function updateOverlays() { + if (isShowingOverlays) { + + var identifiers = AvatarList.getAvatarIdentifiers(); + + for (var i = 0; i < identifiers.length; ++i) { + var avatarID = identifiers[i]; + + if (avatarID === null) { + // this is our avatar, skip it + continue; + } + + // get the position for this avatar + var avatar = AvatarList.getAvatar(avatarID); + var avatarPosition = avatar && avatar.position; + + if (!avatarPosition) { + // we don't have a valid position for this avatar, skip it + continue; + } + + // setup a position for the overlay that is just above this avatar's head + var overlayPosition = avatar.getJointPosition("Head"); + overlayPosition.y += 1.05; + + var text = " All: " + AvatarManager.getAvatarDataRate(avatarID).toFixed(2) + "\n" + +" GP: " + AvatarManager.getAvatarDataRate(avatarID,"globalPosition").toFixed(2) + "\n" + +" LP: " + AvatarManager.getAvatarDataRate(avatarID,"localPosition").toFixed(2) + "\n" + +" AD: " + AvatarManager.getAvatarDataRate(avatarID,"avatarDimensions").toFixed(2) + "\n" + +" AO: " + AvatarManager.getAvatarDataRate(avatarID,"avatarOrientation").toFixed(2) + "\n" + +" AS: " + AvatarManager.getAvatarDataRate(avatarID,"avatarScale").toFixed(2) + "\n" + +" LA: " + AvatarManager.getAvatarDataRate(avatarID,"lookAtPosition").toFixed(2) + "\n" + +" AL: " + AvatarManager.getAvatarDataRate(avatarID,"audioLoudness").toFixed(2) + "\n" + +" SW: " + AvatarManager.getAvatarDataRate(avatarID,"sensorToWorkMatrix").toFixed(2) + "\n" + +" AF: " + AvatarManager.getAvatarDataRate(avatarID,"additionalFlags").toFixed(2) + "\n" + +" PI: " + AvatarManager.getAvatarDataRate(avatarID,"parentInfo").toFixed(2) + "\n" + +" FT: " + AvatarManager.getAvatarDataRate(avatarID,"faceTracker").toFixed(2) + "\n" + +" JD: " + AvatarManager.getAvatarDataRate(avatarID,"jointData").toFixed(2); + + if (avatarID in debugOverlays) { + // keep the overlay above the current position of this avatar + Overlays.editOverlay(debugOverlays[avatarID][0], { + position: overlayPosition, + text: text + }); + } else { + // add the overlay above this avatar + var newOverlay = Overlays.addOverlay("text3d", { + position: overlayPosition, + dimensions: { + x: 1, + y: 13 * 0.13 + }, + lineHeight: 0.1, + font:{size:0.1}, + text: text, + size: 1, + scale: 0.4, + color: { red: 255, green: 255, blue: 255}, + alpha: 1, + solid: true, + isFacingAvatar: true, + drawInFront: true + }); + + debugOverlays[avatarID]=[newOverlay]; + } + } + } +} + +Script.update.connect(updateOverlays); + +AvatarList.avatarRemovedEvent.connect(function(avatarID){ + if (isShowingOverlays) { + // we are currently showing overlays and an avatar just went away + + // first remove the rendered overlays + for (var j = 0; j < debugOverlays[avatarID].length; ++j) { + Overlays.deleteOverlay(debugOverlays[avatarID][j]); + } + + // delete the saved ID of the overlay from our mod overlays object + delete debugOverlays[avatarID]; + } +}); + +// cleanup the toolbar button and overlays when script is stopped +Script.scriptEnding.connect(function() { + toolbar.removeButton('debugAvatarMixer'); + removeOverlays(); +}); + +}()); // END LOCAL_SCOPE From 8bbfb5141916a01d9822a2d2e5d1be8dbe052c96 Mon Sep 17 00:00:00 2001 From: ZappoMan <brad@highfidelity.io> Date: Tue, 10 Jan 2017 08:37:01 -0800 Subject: [PATCH 16/43] some cleanup --- assignment-client/src/avatars/AvatarMixer.cpp | 4 - .../src/avatars/AvatarMixerClientData.cpp | 1 - libraries/avatars/src/AvatarData.cpp | 78 +------------------ 3 files changed, 1 insertion(+), 82 deletions(-) diff --git a/assignment-client/src/avatars/AvatarMixer.cpp b/assignment-client/src/avatars/AvatarMixer.cpp index 206b9bbdd9..2c8dcb7f8a 100644 --- a/assignment-client/src/avatars/AvatarMixer.cpp +++ b/assignment-client/src/avatars/AvatarMixer.cpp @@ -421,12 +421,8 @@ void AvatarMixer::broadcastAvatarData() { } numAvatarDataBytes += avatarPacketList->write(otherNode->getUUID().toRfc4122()); - - //qDebug() << "about to write data for:" << otherNode->getUUID(); quint64 lastEncodeForOther = nodeData->getLastOtherAvatarEncodeTime(otherNode->getUUID()); - //qDebug() << "about to call toByteArray() for:" << otherNode->getUUID() << "last encoded at:" << lastEncodeForOther; auto bytes = otherAvatar.toByteArray(detail, lastEncodeForOther); - //qDebug() << "about to call avatarPacketList->write() for:" << otherNode->getUUID() << " bytes:" << bytes.size(); numAvatarDataBytes += avatarPacketList->write(bytes); avatarPacketList->endSegment(); diff --git a/assignment-client/src/avatars/AvatarMixerClientData.cpp b/assignment-client/src/avatars/AvatarMixerClientData.cpp index b600f7c925..a7a506e1d8 100644 --- a/assignment-client/src/avatars/AvatarMixerClientData.cpp +++ b/assignment-client/src/avatars/AvatarMixerClientData.cpp @@ -21,7 +21,6 @@ int AvatarMixerClientData::parseData(ReceivedMessage& message) { message.readPrimitive(&_lastReceivedSequenceNumber); // compute the offset to the data payload - //qDebug() << __FUNCTION__ "about to call parseDataFromBuffer() for:" << getNodeID(); return _avatar->parseDataFromBuffer(message.readWithoutCopy(message.getBytesLeftToRead())); } diff --git a/libraries/avatars/src/AvatarData.cpp b/libraries/avatars/src/AvatarData.cpp index 26a750f0c1..be21abcfd5 100644 --- a/libraries/avatars/src/AvatarData.cpp +++ b/libraries/avatars/src/AvatarData.cpp @@ -200,32 +200,18 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSent _lastToByteArray = usecTimestampNow(); } - // FIXME - the other "delta" sending seems to work ok, but this culling small data seems to cause - // problems in the sending of joint data... hand waving is awkward - bool cullSmallChanges = false; // (dataDetail == CullSmallData); + bool cullSmallChanges = (dataDetail == CullSmallData); bool sendAll = (dataDetail == SendAllData); bool sendMinimum = (dataDetail == MinimumData); - //sendAll = true; // FIXME -- hack-o-rama - - // TODO: DRY this up to a shared method - // that can pack any type given the number of bytes - // and return the number of bytes to push the pointer lazyInitHeadData(); QByteArray avatarDataByteArray(udt::MAX_PACKET_SIZE, 0); unsigned char* destinationBuffer = reinterpret_cast<unsigned char*>(avatarDataByteArray.data()); unsigned char* startPosition = destinationBuffer; - // psuedo code.... - // - determine which sections will be included - // - create the packet has flags - // - include each section in order - // FIXME - things to consider // - // - cullSmallChanges is broken... needs to be repaired... <<<<<<<<<<<<<<< top issue - // // - how to dry up this code? // // - the sections below are basically little repeats of each other, where they @@ -276,16 +262,6 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSent bool hasFaceTrackerInfo = hasFaceTracker() && (sendAll || faceTrackerInfoChangedSince(lastSentTime)); bool hasJointData = sendAll || !sendMinimum; - /* - qDebug() << __FUNCTION__ << "sendAll:" << sendAll - << "sendMinimum:" << sendMinimum - << "hasJointData:" << hasJointData - << "cullSmallChanges:" << cullSmallChanges; - */ - - //qDebug() << "hasAvatarGlobalPosition:" << hasAvatarGlobalPosition; - //qDebug() << "hasAvatarOrientation:" << hasAvatarOrientation; - // Leading flags, to indicate how much data is actually included in the packet... AvatarDataPacket::HasFlags packetStateFlags = (hasAvatarGlobalPosition ? AvatarDataPacket::PACKET_HAS_AVATAR_GLOBAL_POSITION : 0) @@ -301,16 +277,6 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSent | (hasFaceTrackerInfo ? AvatarDataPacket::PACKET_HAS_FACE_TRACKER_INFO : 0) | (hasJointData ? AvatarDataPacket::PACKET_HAS_JOINT_DATA : 0); - //qDebug() << __FUNCTION__ << "packetStateFlags:" << packetStateFlags << "lastSentTime:" << lastSentTime; - - /* - qDebug() << "..." << "tranlationChangedSince():" << tranlationChangedSince(lastSentTime); - qDebug() << "..." << "rotationChangedSince():" << rotationChangedSince(lastSentTime); - qDebug() << "..." << "lookAtPositionChangedSince():" << lookAtPositionChangedSince(lastSentTime); - qDebug() << "..." << "audioLoudnessChangedSince():" << audioLoudnessChangedSince(lastSentTime); - qDebug() << "..." << "parentInfoChangedSince():" << parentInfoChangedSince(lastSentTime); - */ - memcpy(destinationBuffer, &packetStateFlags, sizeof(packetStateFlags)); destinationBuffer += sizeof(packetStateFlags); @@ -320,7 +286,6 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSent data->globalPosition[1] = _globalPosition.y; data->globalPosition[2] = _globalPosition.z; destinationBuffer += sizeof(AvatarDataPacket::AvatarGlobalPosition); - //qDebug() << "hasAvatarGlobalPosition _globalPosition:" << _globalPosition; } // FIXME - I was told by tony this was "skeletal model position"-- but it seems to be @@ -334,7 +299,6 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSent data->localPosition[1] = localPosition.y; data->localPosition[2] = localPosition.z; destinationBuffer += sizeof(AvatarDataPacket::AvatarLocalPosition); - //qDebug() << "hasAvatarLocalPosition localPosition:" << localPosition; } if (hasAvatarDimensions) { @@ -346,7 +310,6 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSent data->avatarDimensions[1] = avatarDimensions.y; data->avatarDimensions[2] = avatarDimensions.z; destinationBuffer += sizeof(AvatarDataPacket::AvatarDimensions); - //qDebug() << "hasAvatarDimensions avatarDimensions:" << avatarDimensions; } if (hasAvatarOrientation) { @@ -357,7 +320,6 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSent packFloatAngleToTwoByte((uint8_t*)(data->localOrientation + 1), bodyEulerAngles.x); packFloatAngleToTwoByte((uint8_t*)(data->localOrientation + 2), bodyEulerAngles.z); destinationBuffer += sizeof(AvatarDataPacket::AvatarOrientation); - //qDebug() << "hasAvatarOrientation bodyEulerAngles:" << bodyEulerAngles; } if (hasAvatarScale) { @@ -365,7 +327,6 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSent auto scale = getDomainLimitedScale(); packFloatRatioToTwoByte((uint8_t*)(&data->scale), scale); destinationBuffer += sizeof(AvatarDataPacket::AvatarScale); - //qDebug() << "hasAvatarScale scale:" << scale; } if (hasLookAtPosition) { @@ -375,7 +336,6 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSent data->lookAtPosition[1] = lookAt.y; data->lookAtPosition[2] = lookAt.z; destinationBuffer += sizeof(AvatarDataPacket::LookAtPosition); - //qDebug() << "hasLookAtPosition lookAt:" << lookAt; } if (hasAudioLoudness) { @@ -383,7 +343,6 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSent auto audioLoudness = glm::min(_headData->getAudioLoudness(), MAX_AUDIO_LOUDNESS); packFloatScalarToSignedTwoByteFixed((uint8_t*)&data->audioLoudness, audioLoudness, AUDIO_LOUDNESS_RADIX); destinationBuffer += sizeof(AvatarDataPacket::AudioLoudness); - //qDebug() << "hasAudioLoudness audioLoudness:" << audioLoudness; } if (hasSensorToWorldMatrix) { @@ -396,7 +355,6 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSent data->sensorToWorldTrans[1] = sensorToWorldMatrix[3][1]; data->sensorToWorldTrans[2] = sensorToWorldMatrix[3][2]; destinationBuffer += sizeof(AvatarDataPacket::SensorToWorldMatrix); - //qDebug() << "hasSensorToWorldMatrix..."; } QUuid parentID = getParentID(); @@ -428,12 +386,6 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSent } data->flags = flags; destinationBuffer += sizeof(AvatarDataPacket::AdditionalFlags); - - //qDebug() << "hasAdditionalFlags _keyState:" << _keyState; - //qDebug() << "hasAdditionalFlags _handState:" << _handState; - //qDebug() << "hasAdditionalFlags _isFaceTrackerConnected:" << _headData->_isFaceTrackerConnected; - //qDebug() << "hasAdditionalFlags _isEyeTrackerConnected:" << _headData->_isEyeTrackerConnected; - //qDebug() << "hasAdditionalFlags bitItems:" << flags; } if (hasParentInfo) { @@ -442,7 +394,6 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSent memcpy(parentInfo->parentUUID, referentialAsBytes.data(), referentialAsBytes.size()); parentInfo->parentJointIndex = _parentJointIndex; destinationBuffer += sizeof(AvatarDataPacket::ParentInfo); - //qDebug() << "hasParentInfo ...:"; } // If it is connected, pack up the data @@ -459,7 +410,6 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSent // followed by a variable number of float coefficients memcpy(destinationBuffer, _headData->_blendshapeCoefficients.data(), _headData->_blendshapeCoefficients.size() * sizeof(float)); destinationBuffer += _headData->_blendshapeCoefficients.size() * sizeof(float); - //qDebug() << "hasFaceTrackerInfo ...:"; } // If it is connected, pack up the data @@ -474,8 +424,6 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSent int numJoints = _jointData.size(); *destinationBuffer++ = (uint8_t)numJoints; - //qDebug() << "hasJointData numJoints:" << numJoints; - unsigned char* validityPosition = destinationBuffer; unsigned char validity = 0; int validityBit = 0; @@ -604,7 +552,6 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSent } int avatarDataSize = destinationBuffer - startPosition; - //qDebug() << "avatarDataSize:" << avatarDataSize; return avatarDataByteArray.left(avatarDataSize); } @@ -686,8 +633,6 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) { memcpy(&packetStateFlags, sourceBuffer, sizeof(packetStateFlags)); sourceBuffer += sizeof(packetStateFlags); - //qDebug() << __FUNCTION__ << "packetStateFlags:" << packetStateFlags << "buffer size:" << buffer.size(); - #define HAS_FLAG(B,F) ((B & F) == F) bool hasAvatarGlobalPosition = HAS_FLAG(packetStateFlags, AvatarDataPacket::PACKET_HAS_AVATAR_GLOBAL_POSITION); @@ -717,8 +662,6 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) { _globalPositionChanged = usecTimestampNow(); } sourceBuffer += sizeof(AvatarDataPacket::AvatarGlobalPosition); - //qDebug() << "hasAvatarGlobalPosition _globalPosition:" << _globalPosition; - int numBytesRead = sourceBuffer - startSection; _globalPositionRate.increment(numBytesRead); } @@ -737,8 +680,6 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) { } setLocalPosition(position); sourceBuffer += sizeof(AvatarDataPacket::AvatarLocalPosition); - //qDebug() << "hasAvatarLocalPosition position:" << position; - int numBytesRead = sourceBuffer - startSection; _localPositionRate.increment(numBytesRead); } @@ -755,8 +696,6 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) { _avatarDimensionsChanged = usecTimestampNow(); } sourceBuffer += sizeof(AvatarDataPacket::AvatarDimensions); - //qDebug() << "hasAvatarDimensions _globalBoundingBoxCorner:" << _globalBoundingBoxCorner; - int numBytesRead = sourceBuffer - startSection; _avatarDimensionRate.increment(numBytesRead); } @@ -785,8 +724,6 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) { setLocalOrientation(newOrientation); } sourceBuffer += sizeof(AvatarDataPacket::AvatarOrientation); - //qDebug() << "hasAvatarOrientation newOrientation:" << newOrientation; - int numBytesRead = sourceBuffer - startSection; _avatarOrientationRate.increment(numBytesRead); } @@ -806,8 +743,6 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) { } setTargetScale(scale); sourceBuffer += sizeof(AvatarDataPacket::AvatarScale); - //qDebug() << "hasAvatarOrientation scale:" << scale; - int numBytesRead = sourceBuffer - startSection; _avatarScaleRate.increment(numBytesRead); } @@ -826,8 +761,6 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) { } _headData->setLookAtPosition(lookAt); sourceBuffer += sizeof(AvatarDataPacket::LookAtPosition); - //qDebug() << "hasLookAtPosition lookAt:" << lookAt; - int numBytesRead = sourceBuffer - startSection; _lookAtPositionRate.increment(numBytesRead); } @@ -848,8 +781,6 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) { } _headData->setAudioLoudness(audioLoudness); sourceBuffer += sizeof(AvatarDataPacket::AudioLoudness); - //qDebug() << "hasAudioLoudness audioLoudness:" << audioLoudness; - int numBytesRead = sourceBuffer - startSection; _audioLoudnessRate.increment(numBytesRead); } @@ -870,8 +801,6 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) { _sensorToWorldMatrixChanged = usecTimestampNow(); } sourceBuffer += sizeof(AvatarDataPacket::SensorToWorldMatrix); - //qDebug() << "hasSensorToWorldMatrix sensorToWorldMatrix:" << sensorToWorldMatrix; - int numBytesRead = sourceBuffer - startSection; _sensorToWorldRate.increment(numBytesRead); } @@ -962,8 +891,6 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) { _headData->_blendshapeCoefficients.resize(numCoefficients); // make sure there's room for the copy! memcpy(_headData->_blendshapeCoefficients.data(), sourceBuffer, coefficientsSize); sourceBuffer += coefficientsSize; - //qDebug() << "hasFaceTrackerInfo numCoefficients:" << numCoefficients; - int numBytesRead = sourceBuffer - startSection; _faceTrackerRate.increment(numBytesRead); } @@ -973,8 +900,6 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) { PACKET_READ_CHECK(NumJoints, sizeof(uint8_t)); int numJoints = *sourceBuffer++; - //qDebug() << __FUNCTION__ << "....hasJointData numJoints:" << numJoints; - const int bytesOfValidity = (int)ceil((float)numJoints / (float)BITS_IN_BYTE); PACKET_READ_CHECK(JointRotationValidityBits, bytesOfValidity); @@ -1059,7 +984,6 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) { sourceBuffer = unpackFauxJoint(sourceBuffer, _controllerRightHandMatrixCache); //qDebug() << "hasJointData numValidJointRotations:" << numValidJointRotations << "numValidJointTranslations:" << numValidJointTranslations; - int numBytesRead = sourceBuffer - startSection; _jointDataRate.increment(numBytesRead); } From 182edf0e9ae11add375821fc7c7aa314e58da0b0 Mon Sep 17 00:00:00 2001 From: ZappoMan <brad@highfidelity.io> Date: Tue, 10 Jan 2017 09:41:11 -0800 Subject: [PATCH 17/43] change avatarOrientation to use SixByteQuat packing --- libraries/avatars/src/AvatarData.cpp | 17 +++++++++++++---- libraries/avatars/src/AvatarData.h | 8 ++++++-- 2 files changed, 19 insertions(+), 6 deletions(-) diff --git a/libraries/avatars/src/AvatarData.cpp b/libraries/avatars/src/AvatarData.cpp index be21abcfd5..e70e73f8b5 100644 --- a/libraries/avatars/src/AvatarData.cpp +++ b/libraries/avatars/src/AvatarData.cpp @@ -313,13 +313,16 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSent } if (hasAvatarOrientation) { - auto data = reinterpret_cast<AvatarDataPacket::AvatarOrientation*>(destinationBuffer); auto localOrientation = getLocalOrientation(); + /* + auto data = reinterpret_cast<AvatarDataPacket::AvatarOrientation*>(destinationBuffer); glm::vec3 bodyEulerAngles = glm::degrees(safeEulerAngles(localOrientation)); packFloatAngleToTwoByte((uint8_t*)(data->localOrientation + 0), bodyEulerAngles.y); packFloatAngleToTwoByte((uint8_t*)(data->localOrientation + 1), bodyEulerAngles.x); packFloatAngleToTwoByte((uint8_t*)(data->localOrientation + 2), bodyEulerAngles.z); destinationBuffer += sizeof(AvatarDataPacket::AvatarOrientation); + */ + destinationBuffer += packOrientationQuatToSixBytes(destinationBuffer, localOrientation); } if (hasAvatarScale) { @@ -704,6 +707,8 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) { auto startSection = sourceBuffer; PACKET_READ_CHECK(AvatarOrientation, sizeof(AvatarDataPacket::AvatarOrientation)); + + /* auto data = reinterpret_cast<const AvatarDataPacket::AvatarOrientation*>(sourceBuffer); float pitch, yaw, roll; unpackFloatAngleFromTwoByte(data->localOrientation + 0, &yaw); @@ -715,15 +720,19 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) { } return buffer.size(); } - - glm::quat currentOrientation = getLocalOrientation(); glm::vec3 newEulerAngles(pitch, yaw, roll); glm::quat newOrientation = glm::quat(glm::radians(newEulerAngles)); + sourceBuffer += sizeof(AvatarDataPacket::AvatarOrientation); + */ + + glm::quat newOrientation; + sourceBuffer += unpackOrientationQuatFromSixBytes(sourceBuffer, newOrientation); + + glm::quat currentOrientation = getLocalOrientation(); if (currentOrientation != newOrientation) { _hasNewJointRotations = true; setLocalOrientation(newOrientation); } - sourceBuffer += sizeof(AvatarDataPacket::AvatarOrientation); int numBytesRead = sourceBuffer - startSection; _avatarOrientationRate.increment(numBytesRead); } diff --git a/libraries/avatars/src/AvatarData.h b/libraries/avatars/src/AvatarData.h index bb6135dc7e..a31166920a 100644 --- a/libraries/avatars/src/AvatarData.h +++ b/libraries/avatars/src/AvatarData.h @@ -177,9 +177,13 @@ namespace AvatarDataPacket { const size_t AVATAR_DIMENSIONS_SIZE = 12; + using SixByteQuat = uint8_t[6]; PACKED_BEGIN struct AvatarOrientation { - smallFloat localOrientation[3]; // avatar's local euler angles (degrees, compressed) relative to the - // thing it's attached to, or world relative if not attached + //smallFloat localOrientation[3]; // avatar's local euler angles (degrees, compressed) relative to the + // thing it's attached to, or world relative if not attached + + SixByteQuat avatarOrientation; // encodeded and compressed by packOrientationQuatToSixBytes() + } PACKED_END; const size_t AVATAR_ORIENTATION_SIZE = 6; From 407ad633e0facf8143dd17722bfa0fc384853f92 Mon Sep 17 00:00:00 2001 From: ZappoMan <brad@highfidelity.io> Date: Wed, 11 Jan 2017 11:01:49 -0800 Subject: [PATCH 18/43] more cleanup use kens 1 byte audio gain --- libraries/avatars/src/AvatarData.cpp | 35 +++------------------------- libraries/avatars/src/AvatarData.h | 10 +++----- 2 files changed, 6 insertions(+), 39 deletions(-) diff --git a/libraries/avatars/src/AvatarData.cpp b/libraries/avatars/src/AvatarData.cpp index e70e73f8b5..5a781340a1 100644 --- a/libraries/avatars/src/AvatarData.cpp +++ b/libraries/avatars/src/AvatarData.cpp @@ -35,6 +35,7 @@ #include <UUID.h> #include <shared/JSONHelpers.h> #include <ShapeInfo.h> +#include <AudioHelpers.h> #include "AvatarLogging.h" @@ -314,14 +315,6 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSent if (hasAvatarOrientation) { auto localOrientation = getLocalOrientation(); - /* - auto data = reinterpret_cast<AvatarDataPacket::AvatarOrientation*>(destinationBuffer); - glm::vec3 bodyEulerAngles = glm::degrees(safeEulerAngles(localOrientation)); - packFloatAngleToTwoByte((uint8_t*)(data->localOrientation + 0), bodyEulerAngles.y); - packFloatAngleToTwoByte((uint8_t*)(data->localOrientation + 1), bodyEulerAngles.x); - packFloatAngleToTwoByte((uint8_t*)(data->localOrientation + 2), bodyEulerAngles.z); - destinationBuffer += sizeof(AvatarDataPacket::AvatarOrientation); - */ destinationBuffer += packOrientationQuatToSixBytes(destinationBuffer, localOrientation); } @@ -343,8 +336,7 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSent if (hasAudioLoudness) { auto data = reinterpret_cast<AvatarDataPacket::AudioLoudness*>(destinationBuffer); - auto audioLoudness = glm::min(_headData->getAudioLoudness(), MAX_AUDIO_LOUDNESS); - packFloatScalarToSignedTwoByteFixed((uint8_t*)&data->audioLoudness, audioLoudness, AUDIO_LOUDNESS_RADIX); + data->audioLoudness = packFloatGainToByte(_headData->getAudioLoudness()); destinationBuffer += sizeof(AvatarDataPacket::AudioLoudness); } @@ -705,29 +697,9 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) { if (hasAvatarOrientation) { auto startSection = sourceBuffer; - PACKET_READ_CHECK(AvatarOrientation, sizeof(AvatarDataPacket::AvatarOrientation)); - - /* - auto data = reinterpret_cast<const AvatarDataPacket::AvatarOrientation*>(sourceBuffer); - float pitch, yaw, roll; - unpackFloatAngleFromTwoByte(data->localOrientation + 0, &yaw); - unpackFloatAngleFromTwoByte(data->localOrientation + 1, &pitch); - unpackFloatAngleFromTwoByte(data->localOrientation + 2, &roll); - if (isNaN(yaw) || isNaN(pitch) || isNaN(roll)) { - if (shouldLogError(now)) { - qCWarning(avatars) << "Discard AvatarData packet: localOriention is NaN, uuid " << getSessionUUID(); - } - return buffer.size(); - } - glm::vec3 newEulerAngles(pitch, yaw, roll); - glm::quat newOrientation = glm::quat(glm::radians(newEulerAngles)); - sourceBuffer += sizeof(AvatarDataPacket::AvatarOrientation); - */ - glm::quat newOrientation; sourceBuffer += unpackOrientationQuatFromSixBytes(sourceBuffer, newOrientation); - glm::quat currentOrientation = getLocalOrientation(); if (currentOrientation != newOrientation) { _hasNewJointRotations = true; @@ -779,8 +751,7 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) { PACKET_READ_CHECK(AudioLoudness, sizeof(AvatarDataPacket::AudioLoudness)); auto data = reinterpret_cast<const AvatarDataPacket::AudioLoudness*>(sourceBuffer); - float audioLoudness; - unpackFloatScalarFromSignedTwoByteFixed((int16_t*)&data->audioLoudness, &audioLoudness, AUDIO_LOUDNESS_RADIX); + float audioLoudness = unpackFloatGainFromByte(data->audioLoudness); if (isNaN(audioLoudness)) { if (shouldLogError(now)) { diff --git a/libraries/avatars/src/AvatarData.h b/libraries/avatars/src/AvatarData.h index a31166920a..7e3a9f2923 100644 --- a/libraries/avatars/src/AvatarData.h +++ b/libraries/avatars/src/AvatarData.h @@ -179,16 +179,12 @@ namespace AvatarDataPacket { using SixByteQuat = uint8_t[6]; PACKED_BEGIN struct AvatarOrientation { - //smallFloat localOrientation[3]; // avatar's local euler angles (degrees, compressed) relative to the - // thing it's attached to, or world relative if not attached - SixByteQuat avatarOrientation; // encodeded and compressed by packOrientationQuatToSixBytes() - } PACKED_END; const size_t AVATAR_ORIENTATION_SIZE = 6; PACKED_BEGIN struct AvatarScale { - smallFloat scale; // avatar's scale, (compressed) 'ratio' encoding uses sign bit as flag. + smallFloat scale; // avatar's scale, compressed by packFloatRatioToTwoByte() } PACKED_END; const size_t AVATAR_SCALE_SIZE = 2; @@ -204,9 +200,9 @@ namespace AvatarDataPacket { const size_t LOOK_AT_POSITION_SIZE = 12; PACKED_BEGIN struct AudioLoudness { - smallFloat audioLoudness; // current loudness of microphone, (compressed) + uint8_t audioLoudness; // current loudness of microphone, compressed by packFloatGainToByte() } PACKED_END; - const size_t AUDIO_LOUDNESS_SIZE = 2; + const size_t AUDIO_LOUDNESS_SIZE = 1; PACKED_BEGIN struct SensorToWorldMatrix { // FIXME - these 20 bytes are only used by viewers if my avatar has "attachments" From 0661531e3a95a7ecae96f489b480646d8f206cad Mon Sep 17 00:00:00 2001 From: ZappoMan <brad@highfidelity.io> Date: Wed, 11 Jan 2017 17:51:12 -0800 Subject: [PATCH 19/43] try another version of an 8bit audio loudness --- libraries/avatars/src/AvatarData.cpp | 13 ++++++++----- libraries/avatars/src/AvatarData.h | 2 +- libraries/shared/src/GLMHelpers.cpp | 12 ++++++++++++ libraries/shared/src/GLMHelpers.h | 4 ++++ 4 files changed, 25 insertions(+), 6 deletions(-) diff --git a/libraries/avatars/src/AvatarData.cpp b/libraries/avatars/src/AvatarData.cpp index 5a781340a1..0b9df8f7a1 100644 --- a/libraries/avatars/src/AvatarData.cpp +++ b/libraries/avatars/src/AvatarData.cpp @@ -35,7 +35,6 @@ #include <UUID.h> #include <shared/JSONHelpers.h> #include <ShapeInfo.h> -#include <AudioHelpers.h> #include "AvatarLogging.h" @@ -53,6 +52,7 @@ const QString AvatarData::FRAME_NAME = "com.highfidelity.recording.AvatarData"; static const int TRANSLATION_COMPRESSION_RADIX = 12; static const int SENSOR_TO_WORLD_SCALE_RADIX = 10; static const int AUDIO_LOUDNESS_RADIX = 2; +static const float AUDIO_LOUDNESS_SCALE = 10.0f; //static const int MODEL_OFFSET_RADIX = 6; #define ASSERT(COND) do { if (!(COND)) { abort(); } } while(0) @@ -336,8 +336,9 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSent if (hasAudioLoudness) { auto data = reinterpret_cast<AvatarDataPacket::AudioLoudness*>(destinationBuffer); - data->audioLoudness = packFloatGainToByte(_headData->getAudioLoudness()); - destinationBuffer += sizeof(AvatarDataPacket::AudioLoudness); + + auto audioLoudness = glm::min(_headData->getAudioLoudness(), MAX_AUDIO_LOUDNESS) / AUDIO_LOUDNESS_SCALE; + destinationBuffer += packFloatScalarToSignedOneByteFixed((uint8_t*)&data->audioLoudness, audioLoudness, AUDIO_LOUDNESS_RADIX); } if (hasSensorToWorldMatrix) { @@ -751,7 +752,9 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) { PACKET_READ_CHECK(AudioLoudness, sizeof(AvatarDataPacket::AudioLoudness)); auto data = reinterpret_cast<const AvatarDataPacket::AudioLoudness*>(sourceBuffer); - float audioLoudness = unpackFloatGainFromByte(data->audioLoudness); + float audioLoudness; + sourceBuffer += unpackFloatScalarFromSignedOneByteFixed(&data->audioLoudness, &audioLoudness, AUDIO_LOUDNESS_RADIX); + audioLoudness *= AUDIO_LOUDNESS_SCALE; if (isNaN(audioLoudness)) { if (shouldLogError(now)) { @@ -760,7 +763,7 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) { return buffer.size(); } _headData->setAudioLoudness(audioLoudness); - sourceBuffer += sizeof(AvatarDataPacket::AudioLoudness); + qDebug() << "audioLoudness:" << audioLoudness; int numBytesRead = sourceBuffer - startSection; _audioLoudnessRate.increment(numBytesRead); } diff --git a/libraries/avatars/src/AvatarData.h b/libraries/avatars/src/AvatarData.h index 7e3a9f2923..36dc52cf9c 100644 --- a/libraries/avatars/src/AvatarData.h +++ b/libraries/avatars/src/AvatarData.h @@ -200,7 +200,7 @@ namespace AvatarDataPacket { const size_t LOOK_AT_POSITION_SIZE = 12; PACKED_BEGIN struct AudioLoudness { - uint8_t audioLoudness; // current loudness of microphone, compressed by packFloatGainToByte() + int8_t audioLoudness; // current loudness of microphone, compressed by packFloatGainToByte() } PACKED_END; const size_t AUDIO_LOUDNESS_SIZE = 1; diff --git a/libraries/shared/src/GLMHelpers.cpp b/libraries/shared/src/GLMHelpers.cpp index ec244553f8..2cf64ed4f8 100644 --- a/libraries/shared/src/GLMHelpers.cpp +++ b/libraries/shared/src/GLMHelpers.cpp @@ -81,6 +81,18 @@ int unpackFloatScalarFromSignedTwoByteFixed(const int16_t* byteFixedPointer, flo return sizeof(int16_t); } +// Allows sending of fixed-point numbers: radix 1 makes 15.1 number, radix 8 makes 8.8 number, etc +int packFloatScalarToSignedOneByteFixed(unsigned char* buffer, float scalar, int radix) { + int8_t outVal = (int8_t)(scalar * (float)(1 << radix)); + memcpy(buffer, &outVal, sizeof(uint16_t)); + return sizeof(outVal); +} + +int unpackFloatScalarFromSignedOneByteFixed(const int8_t* byteFixedPointer, float* destinationPointer, int radix) { + *destinationPointer = *byteFixedPointer / (float)(1 << radix); + return sizeof(int8_t); +} + int packFloatVec3ToSignedTwoByteFixed(unsigned char* destBuffer, const glm::vec3& srcVector, int radix) { const unsigned char* startPosition = destBuffer; destBuffer += packFloatScalarToSignedTwoByteFixed(destBuffer, srcVector.x, radix); diff --git a/libraries/shared/src/GLMHelpers.h b/libraries/shared/src/GLMHelpers.h index 4aac913768..d6868e1f1a 100644 --- a/libraries/shared/src/GLMHelpers.h +++ b/libraries/shared/src/GLMHelpers.h @@ -125,6 +125,10 @@ int unpackFloatFromByte(const unsigned char* buffer, float& value, float scaleBy int packFloatScalarToSignedTwoByteFixed(unsigned char* buffer, float scalar, int radix); int unpackFloatScalarFromSignedTwoByteFixed(const int16_t* byteFixedPointer, float* destinationPointer, int radix); +// Allows sending of fixed-point numbers: radix 1 makes 7.1 number, radix 4 makes 4.4 number, etc +int unpackFloatScalarFromSignedOneByteFixed(const int8_t* byteFixedPointer, float* destinationPointer, int radix); +int packFloatScalarToSignedOneByteFixed(unsigned char* buffer, float scalar, int radix); + // A convenience for sending vec3's as fixed-point floats int packFloatVec3ToSignedTwoByteFixed(unsigned char* destBuffer, const glm::vec3& srcVector, int radix); int unpackFloatVec3FromSignedTwoByteFixed(const unsigned char* sourceBuffer, glm::vec3& destination, int radix); From fe06dfdca7d948ad0dccb5ea644269f17d7f748e Mon Sep 17 00:00:00 2001 From: ZappoMan <brad@highfidelity.io> Date: Wed, 11 Jan 2017 18:58:43 -0800 Subject: [PATCH 20/43] tweak audioLoudness packing --- libraries/avatars/src/AvatarData.cpp | 2 +- libraries/avatars/src/AvatarData.h | 2 +- libraries/shared/src/GLMHelpers.cpp | 8 ++++---- libraries/shared/src/GLMHelpers.h | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/libraries/avatars/src/AvatarData.cpp b/libraries/avatars/src/AvatarData.cpp index 0b9df8f7a1..8c4b31587b 100644 --- a/libraries/avatars/src/AvatarData.cpp +++ b/libraries/avatars/src/AvatarData.cpp @@ -52,7 +52,7 @@ const QString AvatarData::FRAME_NAME = "com.highfidelity.recording.AvatarData"; static const int TRANSLATION_COMPRESSION_RADIX = 12; static const int SENSOR_TO_WORLD_SCALE_RADIX = 10; static const int AUDIO_LOUDNESS_RADIX = 2; -static const float AUDIO_LOUDNESS_SCALE = 10.0f; +static const float AUDIO_LOUDNESS_SCALE = 20.0f; //static const int MODEL_OFFSET_RADIX = 6; #define ASSERT(COND) do { if (!(COND)) { abort(); } } while(0) diff --git a/libraries/avatars/src/AvatarData.h b/libraries/avatars/src/AvatarData.h index 36dc52cf9c..7e3a9f2923 100644 --- a/libraries/avatars/src/AvatarData.h +++ b/libraries/avatars/src/AvatarData.h @@ -200,7 +200,7 @@ namespace AvatarDataPacket { const size_t LOOK_AT_POSITION_SIZE = 12; PACKED_BEGIN struct AudioLoudness { - int8_t audioLoudness; // current loudness of microphone, compressed by packFloatGainToByte() + uint8_t audioLoudness; // current loudness of microphone, compressed by packFloatGainToByte() } PACKED_END; const size_t AUDIO_LOUDNESS_SIZE = 1; diff --git a/libraries/shared/src/GLMHelpers.cpp b/libraries/shared/src/GLMHelpers.cpp index 2cf64ed4f8..85b2e1f57e 100644 --- a/libraries/shared/src/GLMHelpers.cpp +++ b/libraries/shared/src/GLMHelpers.cpp @@ -83,14 +83,14 @@ int unpackFloatScalarFromSignedTwoByteFixed(const int16_t* byteFixedPointer, flo // Allows sending of fixed-point numbers: radix 1 makes 15.1 number, radix 8 makes 8.8 number, etc int packFloatScalarToSignedOneByteFixed(unsigned char* buffer, float scalar, int radix) { - int8_t outVal = (int8_t)(scalar * (float)(1 << radix)); - memcpy(buffer, &outVal, sizeof(uint16_t)); + uint8_t outVal = (uint8_t)(scalar * (float)(1 << radix)); + memcpy(buffer, &outVal, sizeof(uint8_t)); return sizeof(outVal); } -int unpackFloatScalarFromSignedOneByteFixed(const int8_t* byteFixedPointer, float* destinationPointer, int radix) { +int unpackFloatScalarFromSignedOneByteFixed(const uint8_t* byteFixedPointer, float* destinationPointer, int radix) { *destinationPointer = *byteFixedPointer / (float)(1 << radix); - return sizeof(int8_t); + return sizeof(uint8_t); } int packFloatVec3ToSignedTwoByteFixed(unsigned char* destBuffer, const glm::vec3& srcVector, int radix) { diff --git a/libraries/shared/src/GLMHelpers.h b/libraries/shared/src/GLMHelpers.h index d6868e1f1a..ed84e45ad8 100644 --- a/libraries/shared/src/GLMHelpers.h +++ b/libraries/shared/src/GLMHelpers.h @@ -126,7 +126,7 @@ int packFloatScalarToSignedTwoByteFixed(unsigned char* buffer, float scalar, int int unpackFloatScalarFromSignedTwoByteFixed(const int16_t* byteFixedPointer, float* destinationPointer, int radix); // Allows sending of fixed-point numbers: radix 1 makes 7.1 number, radix 4 makes 4.4 number, etc -int unpackFloatScalarFromSignedOneByteFixed(const int8_t* byteFixedPointer, float* destinationPointer, int radix); +int unpackFloatScalarFromSignedOneByteFixed(const uint8_t* byteFixedPointer, float* destinationPointer, int radix); int packFloatScalarToSignedOneByteFixed(unsigned char* buffer, float scalar, int radix); // A convenience for sending vec3's as fixed-point floats From 0e600fc8fd976cc641e6d9c41ae914b6d1ff4943 Mon Sep 17 00:00:00 2001 From: ZappoMan <brad@highfidelity.io> Date: Thu, 12 Jan 2017 07:56:09 -0800 Subject: [PATCH 21/43] more audio loudness tweaks --- libraries/avatars/src/AvatarData.cpp | 14 ++++++++------ libraries/avatars/src/AvatarData.h | 4 +++- 2 files changed, 11 insertions(+), 7 deletions(-) diff --git a/libraries/avatars/src/AvatarData.cpp b/libraries/avatars/src/AvatarData.cpp index 8c4b31587b..9cb0eeda25 100644 --- a/libraries/avatars/src/AvatarData.cpp +++ b/libraries/avatars/src/AvatarData.cpp @@ -52,7 +52,7 @@ const QString AvatarData::FRAME_NAME = "com.highfidelity.recording.AvatarData"; static const int TRANSLATION_COMPRESSION_RADIX = 12; static const int SENSOR_TO_WORLD_SCALE_RADIX = 10; static const int AUDIO_LOUDNESS_RADIX = 2; -static const float AUDIO_LOUDNESS_SCALE = 20.0f; +static const float AUDIO_LOUDNESS_SCALE = 4.0f; //static const int MODEL_OFFSET_RADIX = 6; #define ASSERT(COND) do { if (!(COND)) { abort(); } } while(0) @@ -234,10 +234,12 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSent // TODO - // typical -- 1jd 0ft 0p 1af 1stw 0loud 1look 0s 0o 1d 1lp 1gp // + // 4) AudioLoudness - 8bit encoding, clamp to 1000 and / 4.0f - 1 byte - 0.36 kpbs (when speaking) + // + // // 1) make the dimensions really be dimensions instead of corner - 12 bytes - 4.32 kbps (when moving) // 2) determine if local position really only matters for parent - 12 bytes - 4.32 kbps (when moving and/or not parented) // 3) SensorToWorld - should we only send this for avatars with attachments?? - 20 bytes - 7.20 kbps - // 4) AudioLoudness - use Ken's 8bit encoding - 1 byte - 0.36 kpbs (when speaking) // 5) GUIID for the session change to 2byte index (savings) - 14 bytes - 5.04 kbps // // ----- Subtotal -- non-joint savings --- ~21.2 kbps --- ~12.8% savings? @@ -337,8 +339,8 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSent if (hasAudioLoudness) { auto data = reinterpret_cast<AvatarDataPacket::AudioLoudness*>(destinationBuffer); - auto audioLoudness = glm::min(_headData->getAudioLoudness(), MAX_AUDIO_LOUDNESS) / AUDIO_LOUDNESS_SCALE; - destinationBuffer += packFloatScalarToSignedOneByteFixed((uint8_t*)&data->audioLoudness, audioLoudness, AUDIO_LOUDNESS_RADIX); + data->audioLoudness = glm::min(_headData->getAudioLoudness(), MAX_AUDIO_LOUDNESS) / AUDIO_LOUDNESS_SCALE; + destinationBuffer += sizeof(AvatarDataPacket::AudioLoudness); } if (hasSensorToWorldMatrix) { @@ -753,8 +755,8 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) { PACKET_READ_CHECK(AudioLoudness, sizeof(AvatarDataPacket::AudioLoudness)); auto data = reinterpret_cast<const AvatarDataPacket::AudioLoudness*>(sourceBuffer); float audioLoudness; - sourceBuffer += unpackFloatScalarFromSignedOneByteFixed(&data->audioLoudness, &audioLoudness, AUDIO_LOUDNESS_RADIX); - audioLoudness *= AUDIO_LOUDNESS_SCALE; + audioLoudness = data->audioLoudness * AUDIO_LOUDNESS_SCALE; + sourceBuffer += sizeof(AvatarDataPacket::AudioLoudness); if (isNaN(audioLoudness)) { if (shouldLogError(now)) { diff --git a/libraries/avatars/src/AvatarData.h b/libraries/avatars/src/AvatarData.h index 7e3a9f2923..38b90ac726 100644 --- a/libraries/avatars/src/AvatarData.h +++ b/libraries/avatars/src/AvatarData.h @@ -200,7 +200,9 @@ namespace AvatarDataPacket { const size_t LOOK_AT_POSITION_SIZE = 12; PACKED_BEGIN struct AudioLoudness { - uint8_t audioLoudness; // current loudness of microphone, compressed by packFloatGainToByte() + uint8_t audioLoudness; // current loudness of microphone, clamped to MAX_AUDIO_LOUDNESS and + // scaled by AUDIO_LOUDNESS_SCALE typical values 0 to 255 or once + // rescaled 0.0 to 1000.0 } PACKED_END; const size_t AUDIO_LOUDNESS_SIZE = 1; From dd85cd95586148bdb22c8ffe19c0d04a6b626330 Mon Sep 17 00:00:00 2001 From: ZappoMan <brad@highfidelity.io> Date: Thu, 12 Jan 2017 08:48:33 -0800 Subject: [PATCH 22/43] one more shot at kens way --- libraries/avatars/src/AvatarData.cpp | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/libraries/avatars/src/AvatarData.cpp b/libraries/avatars/src/AvatarData.cpp index 9cb0eeda25..b277f99c5d 100644 --- a/libraries/avatars/src/AvatarData.cpp +++ b/libraries/avatars/src/AvatarData.cpp @@ -35,6 +35,7 @@ #include <UUID.h> #include <shared/JSONHelpers.h> #include <ShapeInfo.h> +#include <AudioHelpers.h> #include "AvatarLogging.h" @@ -52,7 +53,7 @@ const QString AvatarData::FRAME_NAME = "com.highfidelity.recording.AvatarData"; static const int TRANSLATION_COMPRESSION_RADIX = 12; static const int SENSOR_TO_WORLD_SCALE_RADIX = 10; static const int AUDIO_LOUDNESS_RADIX = 2; -static const float AUDIO_LOUDNESS_SCALE = 4.0f; +static const float AUDIO_LOUDNESS_SCALE = 1024.0f; //static const int MODEL_OFFSET_RADIX = 6; #define ASSERT(COND) do { if (!(COND)) { abort(); } } while(0) @@ -234,9 +235,6 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSent // TODO - // typical -- 1jd 0ft 0p 1af 1stw 0loud 1look 0s 0o 1d 1lp 1gp // - // 4) AudioLoudness - 8bit encoding, clamp to 1000 and / 4.0f - 1 byte - 0.36 kpbs (when speaking) - // - // // 1) make the dimensions really be dimensions instead of corner - 12 bytes - 4.32 kbps (when moving) // 2) determine if local position really only matters for parent - 12 bytes - 4.32 kbps (when moving and/or not parented) // 3) SensorToWorld - should we only send this for avatars with attachments?? - 20 bytes - 7.20 kbps @@ -338,8 +336,7 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSent if (hasAudioLoudness) { auto data = reinterpret_cast<AvatarDataPacket::AudioLoudness*>(destinationBuffer); - - data->audioLoudness = glm::min(_headData->getAudioLoudness(), MAX_AUDIO_LOUDNESS) / AUDIO_LOUDNESS_SCALE; + data->audioLoudness = packFloatGainToByte(_headData->getAudioLoudness() * (1 / AUDIO_LOUDNESS_SCALE)); destinationBuffer += sizeof(AvatarDataPacket::AudioLoudness); } @@ -755,7 +752,7 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) { PACKET_READ_CHECK(AudioLoudness, sizeof(AvatarDataPacket::AudioLoudness)); auto data = reinterpret_cast<const AvatarDataPacket::AudioLoudness*>(sourceBuffer); float audioLoudness; - audioLoudness = data->audioLoudness * AUDIO_LOUDNESS_SCALE; + audioLoudness = unpackFloatGainFromByte(data->audioLoudness * AUDIO_LOUDNESS_SCALE); sourceBuffer += sizeof(AvatarDataPacket::AudioLoudness); if (isNaN(audioLoudness)) { From aed1b69ee0289bc9e7b1a2ad8f258153ac51ee03 Mon Sep 17 00:00:00 2001 From: ZappoMan <brad@highfidelity.io> Date: Thu, 12 Jan 2017 17:19:06 -0800 Subject: [PATCH 23/43] make the avatar bounding box be avatar local and only change when it actually changes --- interface/src/avatar/MyAvatar.cpp | 8 +-- libraries/avatars/src/AvatarData.cpp | 65 +++++++++++-------- libraries/avatars/src/AvatarData.h | 28 ++++---- .../developer/debugging/debugAvatarMixer.js | 2 +- 4 files changed, 56 insertions(+), 47 deletions(-) diff --git a/interface/src/avatar/MyAvatar.cpp b/interface/src/avatar/MyAvatar.cpp index 97ffd45587..6f5de308ac 100644 --- a/interface/src/avatar/MyAvatar.cpp +++ b/interface/src/avatar/MyAvatar.cpp @@ -229,10 +229,10 @@ void MyAvatar::simulateAttachments(float deltaTime) { QByteArray MyAvatar::toByteArray(AvatarDataDetail dataDetail, quint64 lastSentTime) { CameraMode mode = qApp->getCamera()->getMode(); _globalPosition = getPosition(); - _globalBoundingBoxCorner.x = _characterController.getCapsuleRadius(); - _globalBoundingBoxCorner.y = _characterController.getCapsuleHalfHeight(); - _globalBoundingBoxCorner.z = _characterController.getCapsuleRadius(); - _globalBoundingBoxCorner += _characterController.getCapsuleLocalOffset(); + _globalBoundingBoxDimensions.x = _characterController.getCapsuleRadius(); + _globalBoundingBoxDimensions.y = _characterController.getCapsuleHalfHeight(); + _globalBoundingBoxDimensions.z = _characterController.getCapsuleRadius(); + _globalBoundingBoxOffset = _characterController.getCapsuleLocalOffset(); if (mode == CAMERA_MODE_THIRD_PERSON || mode == CAMERA_MODE_INDEPENDENT) { // fake the avatar position that is sent up to the AvatarMixer glm::vec3 oldPosition = getPosition(); diff --git a/libraries/avatars/src/AvatarData.cpp b/libraries/avatars/src/AvatarData.cpp index b277f99c5d..2427198a3e 100644 --- a/libraries/avatars/src/AvatarData.cpp +++ b/libraries/avatars/src/AvatarData.cpp @@ -82,7 +82,7 @@ AvatarData::AvatarData() : ASSERT(sizeof(AvatarDataPacket::Header) == AvatarDataPacket::HEADER_SIZE); ASSERT(sizeof(AvatarDataPacket::AvatarGlobalPosition) == AvatarDataPacket::AVATAR_GLOBAL_POSITION_SIZE); ASSERT(sizeof(AvatarDataPacket::AvatarLocalPosition) == AvatarDataPacket::AVATAR_LOCAL_POSITION_SIZE); - ASSERT(sizeof(AvatarDataPacket::AvatarDimensions) == AvatarDataPacket::AVATAR_DIMENSIONS_SIZE); + ASSERT(sizeof(AvatarDataPacket::AvatarBoundingBox) == AvatarDataPacket::AVATAR_BOUNDING_BOX_SIZE); ASSERT(sizeof(AvatarDataPacket::AvatarOrientation) == AvatarDataPacket::AVATAR_ORIENTATION_SIZE); ASSERT(sizeof(AvatarDataPacket::AvatarScale) == AvatarDataPacket::AVATAR_SCALE_SIZE); ASSERT(sizeof(AvatarDataPacket::LookAtPosition) == AvatarDataPacket::LOOK_AT_POSITION_SIZE); @@ -161,8 +161,8 @@ void AvatarData::lazyInitHeadData() { } -bool AvatarData::avatarDimensionsChangedSince(quint64 time) { - return _avatarDimensionsChanged >= time; +bool AvatarData::avatarBoundingBoxChangedSince(quint64 time) { + return _avatarBoundingBoxChanged >= time; } bool AvatarData::avatarScaleChangedSince(quint64 time) { @@ -235,7 +235,6 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSent // TODO - // typical -- 1jd 0ft 0p 1af 1stw 0loud 1look 0s 0o 1d 1lp 1gp // - // 1) make the dimensions really be dimensions instead of corner - 12 bytes - 4.32 kbps (when moving) // 2) determine if local position really only matters for parent - 12 bytes - 4.32 kbps (when moving and/or not parented) // 3) SensorToWorld - should we only send this for avatars with attachments?? - 20 bytes - 7.20 kbps // 5) GUIID for the session change to 2byte index (savings) - 14 bytes - 5.04 kbps @@ -253,7 +252,7 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSent bool hasAvatarLocalPosition = sendAll || tranlationChangedSince(lastSentTime); bool hasAvatarOrientation = sendAll || rotationChangedSince(lastSentTime); - bool hasAvatarDimensions = sendAll || avatarDimensionsChangedSince(lastSentTime); + bool hasAvatarBoundingBox = sendAll || avatarBoundingBoxChangedSince(lastSentTime); bool hasAvatarScale = sendAll || avatarScaleChangedSince(lastSentTime); bool hasLookAtPosition = sendAll || lookAtPositionChangedSince(lastSentTime); bool hasAudioLoudness = sendAll || audioLoudnessChangedSince(lastSentTime); @@ -267,7 +266,7 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSent AvatarDataPacket::HasFlags packetStateFlags = (hasAvatarGlobalPosition ? AvatarDataPacket::PACKET_HAS_AVATAR_GLOBAL_POSITION : 0) | (hasAvatarLocalPosition ? AvatarDataPacket::PACKET_HAS_AVATAR_LOCAL_POSITION : 0) - | (hasAvatarDimensions ? AvatarDataPacket::PACKET_HAS_AVATAR_DIMENSIONS : 0) + | (hasAvatarBoundingBox ? AvatarDataPacket::PACKET_HAS_AVATAR_BOUNDING_BOX : 0) | (hasAvatarOrientation ? AvatarDataPacket::PACKET_HAS_AVATAR_ORIENTATION : 0) | (hasAvatarScale ? AvatarDataPacket::PACKET_HAS_AVATAR_SCALE : 0) | (hasLookAtPosition ? AvatarDataPacket::PACKET_HAS_LOOK_AT_POSITION : 0) @@ -302,15 +301,18 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSent destinationBuffer += sizeof(AvatarDataPacket::AvatarLocalPosition); } - if (hasAvatarDimensions) { - auto data = reinterpret_cast<AvatarDataPacket::AvatarDimensions*>(destinationBuffer); + if (hasAvatarBoundingBox) { + auto data = reinterpret_cast<AvatarDataPacket::AvatarBoundingBox*>(destinationBuffer); - // FIXME - make this just dimensions!!! - auto avatarDimensions = getPosition() - _globalBoundingBoxCorner; - data->avatarDimensions[0] = avatarDimensions.x; - data->avatarDimensions[1] = avatarDimensions.y; - data->avatarDimensions[2] = avatarDimensions.z; - destinationBuffer += sizeof(AvatarDataPacket::AvatarDimensions); + data->avatarDimensions[0] = _globalBoundingBoxDimensions.x; + data->avatarDimensions[1] = _globalBoundingBoxDimensions.y; + data->avatarDimensions[2] = _globalBoundingBoxDimensions.z; + + data->boundOriginOffset[0] = _globalBoundingBoxOffset.x; + data->boundOriginOffset[1] = _globalBoundingBoxOffset.y; + data->boundOriginOffset[2] = _globalBoundingBoxOffset.z; + + destinationBuffer += sizeof(AvatarDataPacket::AvatarBoundingBox); } if (hasAvatarOrientation) { @@ -632,7 +634,7 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) { bool hasAvatarGlobalPosition = HAS_FLAG(packetStateFlags, AvatarDataPacket::PACKET_HAS_AVATAR_GLOBAL_POSITION); bool hasAvatarLocalPosition = HAS_FLAG(packetStateFlags, AvatarDataPacket::PACKET_HAS_AVATAR_LOCAL_POSITION); - bool hasAvatarDimensions = HAS_FLAG(packetStateFlags, AvatarDataPacket::PACKET_HAS_AVATAR_DIMENSIONS); + bool hasAvatarBoundingBox = HAS_FLAG(packetStateFlags, AvatarDataPacket::PACKET_HAS_AVATAR_BOUNDING_BOX); bool hasAvatarOrientation = HAS_FLAG(packetStateFlags, AvatarDataPacket::PACKET_HAS_AVATAR_ORIENTATION); bool hasAvatarScale = HAS_FLAG(packetStateFlags, AvatarDataPacket::PACKET_HAS_AVATAR_SCALE); bool hasLookAtPosition = HAS_FLAG(packetStateFlags, AvatarDataPacket::PACKET_HAS_LOOK_AT_POSITION); @@ -679,20 +681,27 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) { _localPositionRate.increment(numBytesRead); } - if (hasAvatarDimensions) { + if (hasAvatarBoundingBox) { auto startSection = sourceBuffer; - PACKET_READ_CHECK(AvatarDimensions, sizeof(AvatarDataPacket::AvatarDimensions)); - auto data = reinterpret_cast<const AvatarDataPacket::AvatarDimensions*>(sourceBuffer); - auto newValue = glm::vec3(data->avatarDimensions[0], data->avatarDimensions[1], data->avatarDimensions[2]); - // FIXME - this is suspicious looking! - if (_globalBoundingBoxCorner != newValue) { - _globalBoundingBoxCorner = newValue; - _avatarDimensionsChanged = usecTimestampNow(); + PACKET_READ_CHECK(AvatarBoundingBox, sizeof(AvatarDataPacket::AvatarBoundingBox)); + auto data = reinterpret_cast<const AvatarDataPacket::AvatarBoundingBox*>(sourceBuffer); + auto newDimensions = glm::vec3(data->avatarDimensions[0], data->avatarDimensions[1], data->avatarDimensions[2]); + auto newOffset = glm::vec3(data->boundOriginOffset[0], data->boundOriginOffset[1], data->boundOriginOffset[2]); + + + if (_globalBoundingBoxDimensions != newDimensions) { + _globalBoundingBoxDimensions = newDimensions; + _avatarBoundingBoxChanged = usecTimestampNow(); } - sourceBuffer += sizeof(AvatarDataPacket::AvatarDimensions); + if (_globalBoundingBoxOffset != newOffset) { + _globalBoundingBoxOffset = newOffset; + _avatarBoundingBoxChanged = usecTimestampNow(); + } + + sourceBuffer += sizeof(AvatarDataPacket::AvatarBoundingBox); int numBytesRead = sourceBuffer - startSection; - _avatarDimensionRate.increment(numBytesRead); + _avatarBoundingBoxRate.increment(numBytesRead); } if (hasAvatarOrientation) { @@ -762,7 +771,7 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) { return buffer.size(); } _headData->setAudioLoudness(audioLoudness); - qDebug() << "audioLoudness:" << audioLoudness; + //qDebug() << "audioLoudness:" << audioLoudness; int numBytesRead = sourceBuffer - startSection; _audioLoudnessRate.increment(numBytesRead); } @@ -985,8 +994,8 @@ float AvatarData::getDataRate(const QString& rateName) { return _globalPositionRate.rate() / BYTES_PER_KILOBIT; } else if (rateName == "localPosition") { return _localPositionRate.rate() / BYTES_PER_KILOBIT; - } else if (rateName == "avatarDimensions") { - return _avatarDimensionRate.rate() / BYTES_PER_KILOBIT; + } else if (rateName == "avatarBoundingBox") { + return _avatarBoundingBoxRate.rate() / BYTES_PER_KILOBIT; } else if (rateName == "avatarOrientation") { return _avatarOrientationRate.rate() / BYTES_PER_KILOBIT; } else if (rateName == "avatarScale") { diff --git a/libraries/avatars/src/AvatarData.h b/libraries/avatars/src/AvatarData.h index 38b90ac726..ce604634b7 100644 --- a/libraries/avatars/src/AvatarData.h +++ b/libraries/avatars/src/AvatarData.h @@ -127,7 +127,7 @@ namespace AvatarDataPacket { using HasFlags = uint16_t; const HasFlags PACKET_HAS_AVATAR_GLOBAL_POSITION = 1U << 0; const HasFlags PACKET_HAS_AVATAR_LOCAL_POSITION = 1U << 1; // FIXME - can this be in the PARENT_INFO?? - const HasFlags PACKET_HAS_AVATAR_DIMENSIONS = 1U << 2; + const HasFlags PACKET_HAS_AVATAR_BOUNDING_BOX = 1U << 2; const HasFlags PACKET_HAS_AVATAR_ORIENTATION = 1U << 3; const HasFlags PACKET_HAS_AVATAR_SCALE = 1U << 4; const HasFlags PACKET_HAS_LOOK_AT_POSITION = 1U << 5; @@ -144,7 +144,7 @@ namespace AvatarDataPacket { HasFlags packetHasFlags; // state flags, indicated which additional records are included in the packet // bit 0 - has AvatarGlobalPosition // bit 1 - has AvatarLocalPosition - // bit 2 - has AvatarDimensions + // bit 2 - has AvatarBoundingBox // bit 3 - has AvatarOrientation // bit 4 - has AvatarScale // bit 5 - has LookAtPosition @@ -164,17 +164,16 @@ namespace AvatarDataPacket { PACKED_BEGIN struct AvatarLocalPosition { float localPosition[3]; // this appears to be the avatar local position?? - // this is a reduced precision radix - // FIXME - could this be changed into compressed floats? + // this is a reduced precision radix + // FIXME - could this be changed into compressed floats? } PACKED_END; const size_t AVATAR_LOCAL_POSITION_SIZE = 12; - PACKED_BEGIN struct AvatarDimensions { - float avatarDimensions[3]; // avatar's bounding box in world space units, but relative to the - // position. Assumed to be centered around the world position - // FIXME - could this be changed into compressed floats? + PACKED_BEGIN struct AvatarBoundingBox { + float avatarDimensions[3]; // avatar's bounding box in world space units, but relative to the position. + float boundOriginOffset[3]; // offset from the position of the avatar to the origin of the bounding box } PACKED_END; - const size_t AVATAR_DIMENSIONS_SIZE = 12; + const size_t AVATAR_BOUNDING_BOX_SIZE = 24; using SixByteQuat = uint8_t[6]; @@ -525,7 +524,7 @@ public: void fromJson(const QJsonObject& json); glm::vec3 getClientGlobalPosition() { return _globalPosition; } - glm::vec3 getGlobalBoundingBoxCorner() { return _globalBoundingBoxCorner; } + glm::vec3 getGlobalBoundingBoxCorner() { return _globalPosition + _globalBoundingBoxOffset - _globalBoundingBoxDimensions; } Q_INVOKABLE AvatarEntityMap getAvatarEntityData() const; Q_INVOKABLE void setAvatarEntityData(const AvatarEntityMap& avatarEntityData); @@ -558,7 +557,7 @@ public slots: protected: void lazyInitHeadData(); - bool avatarDimensionsChangedSince(quint64 time); + bool avatarBoundingBoxChangedSince(quint64 time); bool avatarScaleChangedSince(quint64 time); bool lookAtPositionChangedSince(quint64 time); bool audioLoudnessChangedSince(quint64 time); @@ -634,7 +633,7 @@ protected: quint64 _globalPositionChanged { 0 }; - quint64 _avatarDimensionsChanged { 0 }; + quint64 _avatarBoundingBoxChanged { 0 }; quint64 _avatarScaleChanged { 0 }; quint64 _sensorToWorldMatrixChanged { 0 }; quint64 _additionalFlagsChanged { 0 }; @@ -646,7 +645,7 @@ protected: RateCounter<> _parseBufferRate; RateCounter<> _globalPositionRate; RateCounter<> _localPositionRate; - RateCounter<> _avatarDimensionRate; + RateCounter<> _avatarBoundingBoxRate; RateCounter<> _avatarOrientationRate; RateCounter<> _avatarScaleRate; RateCounter<> _lookAtPositionRate; @@ -657,7 +656,8 @@ protected: RateCounter<> _faceTrackerRate; RateCounter<> _jointDataRate; - glm::vec3 _globalBoundingBoxCorner; + glm::vec3 _globalBoundingBoxDimensions; + glm::vec3 _globalBoundingBoxOffset; mutable ReadWriteLockable _avatarEntitiesLock; AvatarEntityIDs _avatarEntityDetached; // recently detached from this avatar diff --git a/scripts/developer/debugging/debugAvatarMixer.js b/scripts/developer/debugging/debugAvatarMixer.js index 2e7901b962..1a16832e0d 100644 --- a/scripts/developer/debugging/debugAvatarMixer.js +++ b/scripts/developer/debugging/debugAvatarMixer.js @@ -100,7 +100,7 @@ function updateOverlays() { var text = " All: " + AvatarManager.getAvatarDataRate(avatarID).toFixed(2) + "\n" +" GP: " + AvatarManager.getAvatarDataRate(avatarID,"globalPosition").toFixed(2) + "\n" +" LP: " + AvatarManager.getAvatarDataRate(avatarID,"localPosition").toFixed(2) + "\n" - +" AD: " + AvatarManager.getAvatarDataRate(avatarID,"avatarDimensions").toFixed(2) + "\n" + +" BB: " + AvatarManager.getAvatarDataRate(avatarID,"avatarBoundingBox").toFixed(2) + "\n" +" AO: " + AvatarManager.getAvatarDataRate(avatarID,"avatarOrientation").toFixed(2) + "\n" +" AS: " + AvatarManager.getAvatarDataRate(avatarID,"avatarScale").toFixed(2) + "\n" +" LA: " + AvatarManager.getAvatarDataRate(avatarID,"lookAtPosition").toFixed(2) + "\n" From 2e484444222f176676b9ea1b2bda5a2a3e5408e9 Mon Sep 17 00:00:00 2001 From: ZappoMan <brad@highfidelity.io> Date: Thu, 12 Jan 2017 19:03:58 -0800 Subject: [PATCH 24/43] hack --- libraries/avatars/src/AvatarData.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/libraries/avatars/src/AvatarData.cpp b/libraries/avatars/src/AvatarData.cpp index 2427198a3e..967deedba8 100644 --- a/libraries/avatars/src/AvatarData.cpp +++ b/libraries/avatars/src/AvatarData.cpp @@ -338,7 +338,7 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSent if (hasAudioLoudness) { auto data = reinterpret_cast<AvatarDataPacket::AudioLoudness*>(destinationBuffer); - data->audioLoudness = packFloatGainToByte(_headData->getAudioLoudness() * (1 / AUDIO_LOUDNESS_SCALE)); + data->audioLoudness = packFloatGainToByte(_headData->getAudioLoudness() / AUDIO_LOUDNESS_SCALE); destinationBuffer += sizeof(AvatarDataPacket::AudioLoudness); } @@ -771,7 +771,7 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) { return buffer.size(); } _headData->setAudioLoudness(audioLoudness); - //qDebug() << "audioLoudness:" << audioLoudness; + qDebug() << "audioLoudness:" << audioLoudness; int numBytesRead = sourceBuffer - startSection; _audioLoudnessRate.increment(numBytesRead); } From ee4df20df3096e0e246cf686276f86288a338bcd Mon Sep 17 00:00:00 2001 From: ZappoMan <brad@highfidelity.io> Date: Thu, 12 Jan 2017 19:27:28 -0800 Subject: [PATCH 25/43] duh --- libraries/avatars/src/AvatarData.cpp | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/libraries/avatars/src/AvatarData.cpp b/libraries/avatars/src/AvatarData.cpp index 967deedba8..3b5c5e5e8f 100644 --- a/libraries/avatars/src/AvatarData.cpp +++ b/libraries/avatars/src/AvatarData.cpp @@ -761,7 +761,7 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) { PACKET_READ_CHECK(AudioLoudness, sizeof(AvatarDataPacket::AudioLoudness)); auto data = reinterpret_cast<const AvatarDataPacket::AudioLoudness*>(sourceBuffer); float audioLoudness; - audioLoudness = unpackFloatGainFromByte(data->audioLoudness * AUDIO_LOUDNESS_SCALE); + audioLoudness = unpackFloatGainFromByte(data->audioLoudness) * AUDIO_LOUDNESS_SCALE; sourceBuffer += sizeof(AvatarDataPacket::AudioLoudness); if (isNaN(audioLoudness)) { @@ -771,7 +771,6 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) { return buffer.size(); } _headData->setAudioLoudness(audioLoudness); - qDebug() << "audioLoudness:" << audioLoudness; int numBytesRead = sourceBuffer - startSection; _audioLoudnessRate.increment(numBytesRead); } From 3a9a6e82835e266e1152f43a26b670bb48625801 Mon Sep 17 00:00:00 2001 From: ZappoMan <brad@highfidelity.io> Date: Fri, 13 Jan 2017 09:09:29 -0800 Subject: [PATCH 26/43] local position tweaks --- libraries/avatars/src/AvatarData.cpp | 90 ++++++++++++++-------------- libraries/avatars/src/AvatarData.h | 44 +++++--------- 2 files changed, 62 insertions(+), 72 deletions(-) diff --git a/libraries/avatars/src/AvatarData.cpp b/libraries/avatars/src/AvatarData.cpp index 3b5c5e5e8f..3f590c2ebd 100644 --- a/libraries/avatars/src/AvatarData.cpp +++ b/libraries/avatars/src/AvatarData.cpp @@ -233,39 +233,40 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSent // // TODO - - // typical -- 1jd 0ft 0p 1af 1stw 0loud 1look 0s 0o 1d 1lp 1gp // - // 2) determine if local position really only matters for parent - 12 bytes - 4.32 kbps (when moving and/or not parented) - // 3) SensorToWorld - should we only send this for avatars with attachments?? - 20 bytes - 7.20 kbps - // 5) GUIID for the session change to 2byte index (savings) - 14 bytes - 5.04 kbps - // - // ----- Subtotal -- non-joint savings --- ~21.2 kbps --- ~12.8% savings? - // - // 5) Joints... use more aggressive quantization and/or culling for more distance between avatars + // 1) Joints... use more aggressive quantization and/or culling for more distance between avatars + // 2) SensorToWorld - should we only send this for avatars with attachments?? - 20 bytes - 7.20 kbps + // 3) GUIID for the session change to 2byte index (savings) - 14 bytes - 5.04 kbps // // Joints -- // 63 rotations * 6 bytes = 136kbps // 3 translations * 6 bytes = 6.48kbps // - bool hasAvatarGlobalPosition = true; // always include global position - bool hasAvatarLocalPosition = sendAll || tranlationChangedSince(lastSentTime); - bool hasAvatarOrientation = sendAll || rotationChangedSince(lastSentTime); + auto localPosition = getLocalPosition(); + auto parentID = getParentID(); + bool hasAvatarGlobalPosition = true; // always include global position + bool hasAvatarOrientation = sendAll || rotationChangedSince(lastSentTime); bool hasAvatarBoundingBox = sendAll || avatarBoundingBoxChangedSince(lastSentTime); bool hasAvatarScale = sendAll || avatarScaleChangedSince(lastSentTime); bool hasLookAtPosition = sendAll || lookAtPositionChangedSince(lastSentTime); bool hasAudioLoudness = sendAll || audioLoudnessChangedSince(lastSentTime); bool hasSensorToWorldMatrix = sendAll || sensorToWorldMatrixChangedSince(lastSentTime); bool hasAdditionalFlags = sendAll || additionalFlagsChangedSince(lastSentTime); + + // local position, and parent info only apply to avatars that are parented. The local position + // and the parent info can change independently though, so we track their "changed since" + // separately bool hasParentInfo = hasParent() && (sendAll || parentInfoChangedSince(lastSentTime)); + bool hasAvatarLocalPosition = hasParent() && (sendAll || tranlationChangedSince(lastSentTime)); + bool hasFaceTrackerInfo = hasFaceTracker() && (sendAll || faceTrackerInfoChangedSince(lastSentTime)); bool hasJointData = sendAll || !sendMinimum; // Leading flags, to indicate how much data is actually included in the packet... AvatarDataPacket::HasFlags packetStateFlags = (hasAvatarGlobalPosition ? AvatarDataPacket::PACKET_HAS_AVATAR_GLOBAL_POSITION : 0) - | (hasAvatarLocalPosition ? AvatarDataPacket::PACKET_HAS_AVATAR_LOCAL_POSITION : 0) | (hasAvatarBoundingBox ? AvatarDataPacket::PACKET_HAS_AVATAR_BOUNDING_BOX : 0) | (hasAvatarOrientation ? AvatarDataPacket::PACKET_HAS_AVATAR_ORIENTATION : 0) | (hasAvatarScale ? AvatarDataPacket::PACKET_HAS_AVATAR_SCALE : 0) @@ -274,6 +275,7 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSent | (hasSensorToWorldMatrix ? AvatarDataPacket::PACKET_HAS_SENSOR_TO_WORLD_MATRIX : 0) | (hasAdditionalFlags ? AvatarDataPacket::PACKET_HAS_ADDITIONAL_FLAGS : 0) | (hasParentInfo ? AvatarDataPacket::PACKET_HAS_PARENT_INFO : 0) + | (hasAvatarLocalPosition ? AvatarDataPacket::PACKET_HAS_AVATAR_LOCAL_POSITION : 0) | (hasFaceTrackerInfo ? AvatarDataPacket::PACKET_HAS_FACE_TRACKER_INFO : 0) | (hasJointData ? AvatarDataPacket::PACKET_HAS_JOINT_DATA : 0); @@ -288,19 +290,6 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSent destinationBuffer += sizeof(AvatarDataPacket::AvatarGlobalPosition); } - // FIXME - I was told by tony this was "skeletal model position"-- but it seems to be - // SpatiallyNestable::getLocalPosition() ... which AFAICT is almost always the same as - // the global position (unless presumably you're on a parent)... we might be able to - // include this in the parent info record - if (hasAvatarLocalPosition) { - auto data = reinterpret_cast<AvatarDataPacket::AvatarLocalPosition*>(destinationBuffer); - auto localPosition = getLocalPosition(); - data->localPosition[0] = localPosition.x; - data->localPosition[1] = localPosition.y; - data->localPosition[2] = localPosition.z; - destinationBuffer += sizeof(AvatarDataPacket::AvatarLocalPosition); - } - if (hasAvatarBoundingBox) { auto data = reinterpret_cast<AvatarDataPacket::AvatarBoundingBox*>(destinationBuffer); @@ -354,8 +343,6 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSent destinationBuffer += sizeof(AvatarDataPacket::SensorToWorldMatrix); } - QUuid parentID = getParentID(); - if (hasAdditionalFlags) { auto data = reinterpret_cast<AvatarDataPacket::AdditionalFlags*>(destinationBuffer); @@ -385,6 +372,15 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSent destinationBuffer += sizeof(AvatarDataPacket::AdditionalFlags); } + if (hasAvatarLocalPosition) { + auto data = reinterpret_cast<AvatarDataPacket::AvatarLocalPosition*>(destinationBuffer); + auto localPosition = getLocalPosition(); + data->localPosition[0] = localPosition.x; + data->localPosition[1] = localPosition.y; + data->localPosition[2] = localPosition.z; + destinationBuffer += sizeof(AvatarDataPacket::AvatarLocalPosition); + } + if (hasParentInfo) { auto parentInfo = reinterpret_cast<AvatarDataPacket::ParentInfo*>(destinationBuffer); QByteArray referentialAsBytes = parentID.toRfc4122(); @@ -633,7 +629,6 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) { #define HAS_FLAG(B,F) ((B & F) == F) bool hasAvatarGlobalPosition = HAS_FLAG(packetStateFlags, AvatarDataPacket::PACKET_HAS_AVATAR_GLOBAL_POSITION); - bool hasAvatarLocalPosition = HAS_FLAG(packetStateFlags, AvatarDataPacket::PACKET_HAS_AVATAR_LOCAL_POSITION); bool hasAvatarBoundingBox = HAS_FLAG(packetStateFlags, AvatarDataPacket::PACKET_HAS_AVATAR_BOUNDING_BOX); bool hasAvatarOrientation = HAS_FLAG(packetStateFlags, AvatarDataPacket::PACKET_HAS_AVATAR_ORIENTATION); bool hasAvatarScale = HAS_FLAG(packetStateFlags, AvatarDataPacket::PACKET_HAS_AVATAR_SCALE); @@ -642,6 +637,7 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) { bool hasSensorToWorldMatrix = HAS_FLAG(packetStateFlags, AvatarDataPacket::PACKET_HAS_SENSOR_TO_WORLD_MATRIX); bool hasAdditionalFlags = HAS_FLAG(packetStateFlags, AvatarDataPacket::PACKET_HAS_ADDITIONAL_FLAGS); bool hasParentInfo = HAS_FLAG(packetStateFlags, AvatarDataPacket::PACKET_HAS_PARENT_INFO); + bool hasAvatarLocalPosition = HAS_FLAG(packetStateFlags, AvatarDataPacket::PACKET_HAS_AVATAR_LOCAL_POSITION); bool hasFaceTrackerInfo = HAS_FLAG(packetStateFlags, AvatarDataPacket::PACKET_HAS_FACE_TRACKER_INFO); bool hasJointData = HAS_FLAG(packetStateFlags, AvatarDataPacket::PACKET_HAS_JOINT_DATA); @@ -661,24 +657,11 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) { sourceBuffer += sizeof(AvatarDataPacket::AvatarGlobalPosition); int numBytesRead = sourceBuffer - startSection; _globalPositionRate.increment(numBytesRead); - } - if (hasAvatarLocalPosition) { - auto startSection = sourceBuffer; - - PACKET_READ_CHECK(AvatarLocalPosition, sizeof(AvatarDataPacket::AvatarLocalPosition)); - auto data = reinterpret_cast<const AvatarDataPacket::AvatarLocalPosition*>(sourceBuffer); - glm::vec3 position = glm::vec3(data->localPosition[0], data->localPosition[1], data->localPosition[2]); - if (isNaN(position)) { - if (shouldLogError(now)) { - qCWarning(avatars) << "Discard AvatarData packet: position NaN, uuid " << getSessionUUID(); - } - return buffer.size(); + // if we don't have a parent, make sure to also set our local position + if (!hasParent()) { + setLocalPosition(newValue); } - setLocalPosition(position); - sourceBuffer += sizeof(AvatarDataPacket::AvatarLocalPosition); - int numBytesRead = sourceBuffer - startSection; - _localPositionRate.increment(numBytesRead); } if (hasAvatarBoundingBox) { @@ -863,6 +846,25 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) { _parentID = QUuid(); } + if (hasAvatarLocalPosition) { + assert(hasParent()); // we shouldn't have local position unless we have a parent + auto startSection = sourceBuffer; + + PACKET_READ_CHECK(AvatarLocalPosition, sizeof(AvatarDataPacket::AvatarLocalPosition)); + auto data = reinterpret_cast<const AvatarDataPacket::AvatarLocalPosition*>(sourceBuffer); + glm::vec3 position = glm::vec3(data->localPosition[0], data->localPosition[1], data->localPosition[2]); + if (isNaN(position)) { + if (shouldLogError(now)) { + qCWarning(avatars) << "Discard AvatarData packet: position NaN, uuid " << getSessionUUID(); + } + return buffer.size(); + } + setLocalPosition(position); + sourceBuffer += sizeof(AvatarDataPacket::AvatarLocalPosition); + int numBytesRead = sourceBuffer - startSection; + _localPositionRate.increment(numBytesRead); + } + if (hasFaceTrackerInfo) { auto startSection = sourceBuffer; diff --git a/libraries/avatars/src/AvatarData.h b/libraries/avatars/src/AvatarData.h index ce604634b7..433c5441eb 100644 --- a/libraries/avatars/src/AvatarData.h +++ b/libraries/avatars/src/AvatarData.h @@ -126,15 +126,15 @@ namespace AvatarDataPacket { // AvatarGlobalPosition, Avatar Faceshift, eye tracking, and existence of using HasFlags = uint16_t; const HasFlags PACKET_HAS_AVATAR_GLOBAL_POSITION = 1U << 0; - const HasFlags PACKET_HAS_AVATAR_LOCAL_POSITION = 1U << 1; // FIXME - can this be in the PARENT_INFO?? - const HasFlags PACKET_HAS_AVATAR_BOUNDING_BOX = 1U << 2; - const HasFlags PACKET_HAS_AVATAR_ORIENTATION = 1U << 3; - const HasFlags PACKET_HAS_AVATAR_SCALE = 1U << 4; - const HasFlags PACKET_HAS_LOOK_AT_POSITION = 1U << 5; - const HasFlags PACKET_HAS_AUDIO_LOUDNESS = 1U << 6; - const HasFlags PACKET_HAS_SENSOR_TO_WORLD_MATRIX = 1U << 7; - const HasFlags PACKET_HAS_ADDITIONAL_FLAGS = 1U << 8; - const HasFlags PACKET_HAS_PARENT_INFO = 1U << 9; + const HasFlags PACKET_HAS_AVATAR_BOUNDING_BOX = 1U << 1; + const HasFlags PACKET_HAS_AVATAR_ORIENTATION = 1U << 2; + const HasFlags PACKET_HAS_AVATAR_SCALE = 1U << 3; + const HasFlags PACKET_HAS_LOOK_AT_POSITION = 1U << 4; + const HasFlags PACKET_HAS_AUDIO_LOUDNESS = 1U << 5; + const HasFlags PACKET_HAS_SENSOR_TO_WORLD_MATRIX = 1U << 6; + const HasFlags PACKET_HAS_ADDITIONAL_FLAGS = 1U << 7; + const HasFlags PACKET_HAS_PARENT_INFO = 1U << 8; + const HasFlags PACKET_HAS_AVATAR_LOCAL_POSITION = 1U << 9; const HasFlags PACKET_HAS_FACE_TRACKER_INFO = 1U << 10; const HasFlags PACKET_HAS_JOINT_DATA = 1U << 11; @@ -142,18 +142,6 @@ namespace AvatarDataPacket { PACKED_BEGIN struct Header { HasFlags packetHasFlags; // state flags, indicated which additional records are included in the packet - // bit 0 - has AvatarGlobalPosition - // bit 1 - has AvatarLocalPosition - // bit 2 - has AvatarBoundingBox - // bit 3 - has AvatarOrientation - // bit 4 - has AvatarScale - // bit 5 - has LookAtPosition - // bit 6 - has AudioLoudness - // bit 7 - has SensorToWorldMatrix - // bit 8 - has AdditionalFlags - // bit 9 - has ParentInfo - // bit 10 - has FaceTrackerInfo - // bit 11 - has JointData } PACKED_END; const size_t HEADER_SIZE = 2; @@ -162,13 +150,6 @@ namespace AvatarDataPacket { } PACKED_END; const size_t AVATAR_GLOBAL_POSITION_SIZE = 12; - PACKED_BEGIN struct AvatarLocalPosition { - float localPosition[3]; // this appears to be the avatar local position?? - // this is a reduced precision radix - // FIXME - could this be changed into compressed floats? - } PACKED_END; - const size_t AVATAR_LOCAL_POSITION_SIZE = 12; - PACKED_BEGIN struct AvatarBoundingBox { float avatarDimensions[3]; // avatar's bounding box in world space units, but relative to the position. float boundOriginOffset[3]; // offset from the position of the avatar to the origin of the bounding box @@ -231,6 +212,13 @@ namespace AvatarDataPacket { } PACKED_END; const size_t PARENT_INFO_SIZE = 18; + // will only ever be included if the avatar has a parent but can change independent of changes to parent info + // and so we keep it a separate record + PACKED_BEGIN struct AvatarLocalPosition { + float localPosition[3]; // parent frame translation of the avatar + } PACKED_END; + const size_t AVATAR_LOCAL_POSITION_SIZE = 12; + // only present if IS_FACESHIFT_CONNECTED flag is set in AvatarInfo.flags PACKED_BEGIN struct FaceTrackerInfo { float leftEyeBlink; From c9c311e2751fe5aadf46e0793e0a54e4a43f49f4 Mon Sep 17 00:00:00 2001 From: ZappoMan <brad@highfidelity.io> Date: Sun, 15 Jan 2017 11:47:48 -0800 Subject: [PATCH 27/43] checkpoint for distance joints --- assignment-client/src/Agent.cpp | 10 ++++- assignment-client/src/avatars/AvatarMixer.cpp | 7 ++- .../src/avatars/AvatarMixerClientData.h | 7 +++ .../src/avatars/ScriptableAvatar.cpp | 7 +++ .../src/avatars/ScriptableAvatar.h | 4 ++ interface/src/avatar/MyAvatar.cpp | 7 +-- interface/src/avatar/MyAvatar.h | 3 +- libraries/avatars/src/AvatarData.cpp | 45 +++++++++++++++---- libraries/avatars/src/AvatarData.h | 9 +++- 9 files changed, 82 insertions(+), 17 deletions(-) diff --git a/assignment-client/src/Agent.cpp b/assignment-client/src/Agent.cpp index e79085244f..2764fd4031 100644 --- a/assignment-client/src/Agent.cpp +++ b/assignment-client/src/Agent.cpp @@ -499,8 +499,14 @@ void Agent::processAgentAvatar() { if (!_scriptEngine->isFinished() && _isAvatar) { auto scriptedAvatar = DependencyManager::get<ScriptableAvatar>(); - QByteArray avatarByteArray = scriptedAvatar->toByteArray((randFloat() < AVATAR_SEND_FULL_UPDATE_RATIO) - ? AvatarData::SendAllData : AvatarData::CullSmallData); + AvatarData::AvatarDataDetail dataDetail = (randFloat() < AVATAR_SEND_FULL_UPDATE_RATIO) ? AvatarData::SendAllData : AvatarData::CullSmallData; + //AvatarData::AvatarDataDetail dataDetail = AvatarData::SendAllData; + quint64 lastSentTime = 0; + QVector<JointData>& lastSentJointData = scriptedAvatar->getLastSentJointData(); + bool distanceAdjust = false; + glm::vec3 viewerPosition(0); + + QByteArray avatarByteArray = scriptedAvatar->toByteArray(dataDetail, lastSentTime, lastSentJointData, distanceAdjust, viewerPosition); scriptedAvatar->doneEncoding(true); static AvatarDataSequenceNumber sequenceNumber = 0; diff --git a/assignment-client/src/avatars/AvatarMixer.cpp b/assignment-client/src/avatars/AvatarMixer.cpp index 2c8dcb7f8a..59b8429683 100644 --- a/assignment-client/src/avatars/AvatarMixer.cpp +++ b/assignment-client/src/avatars/AvatarMixer.cpp @@ -421,8 +421,11 @@ void AvatarMixer::broadcastAvatarData() { } numAvatarDataBytes += avatarPacketList->write(otherNode->getUUID().toRfc4122()); - quint64 lastEncodeForOther = nodeData->getLastOtherAvatarEncodeTime(otherNode->getUUID()); - auto bytes = otherAvatar.toByteArray(detail, lastEncodeForOther); + auto lastEncodeForOther = nodeData->getLastOtherAvatarEncodeTime(otherNode->getUUID()); + auto lastSentJointsForOther = nodeData->getLastOtherAvatarSentJoints(otherNode->getUUID()); + bool distanceAdjust = true; + glm::vec3 viewerPosition = otherAvatar.getPosition(); + auto bytes = otherAvatar.toByteArray(detail, lastEncodeForOther, lastSentJointsForOther, distanceAdjust, viewerPosition); numAvatarDataBytes += avatarPacketList->write(bytes); avatarPacketList->endSegment(); diff --git a/assignment-client/src/avatars/AvatarMixerClientData.h b/assignment-client/src/avatars/AvatarMixerClientData.h index c9306f73c7..f0b90a57bd 100644 --- a/assignment-client/src/avatars/AvatarMixerClientData.h +++ b/assignment-client/src/avatars/AvatarMixerClientData.h @@ -113,6 +113,12 @@ public: return result; } + QVector<JointData>& getLastOtherAvatarSentJoints(QUuid otherAvatar) { + return _lastOtherAvatarSentJoints[otherAvatar]; + } + + + private: AvatarSharedPointer _avatar { new AvatarData() }; @@ -123,6 +129,7 @@ private: // this is a map of the last time we encoded an "other" avatar for // sending to "this" node std::unordered_map<QUuid, quint64> _lastOtherAvatarEncodeTime; + std::unordered_map<QUuid, QVector<JointData>> _lastOtherAvatarSentJoints; HRCTime _identityChangeTimestamp; bool _gotIdentity { false }; diff --git a/assignment-client/src/avatars/ScriptableAvatar.cpp b/assignment-client/src/avatars/ScriptableAvatar.cpp index b4c9a8e89d..989904ca7b 100644 --- a/assignment-client/src/avatars/ScriptableAvatar.cpp +++ b/assignment-client/src/avatars/ScriptableAvatar.cpp @@ -14,6 +14,13 @@ #include <GLMHelpers.h> #include "ScriptableAvatar.h" +QByteArray ScriptableAvatar::toByteArray(AvatarDataDetail dataDetail, quint64 lastSentTime, QVector<JointData>& lastSentJointData, + bool distanceAdjust, glm::vec3 viewerPosition) { + _globalPosition = getPosition(); + return AvatarData::toByteArray(dataDetail, lastSentTime, lastSentJointData, distanceAdjust, viewerPosition); +} + + // hold and priority unused but kept so that client side JS can run. void ScriptableAvatar::startAnimation(const QString& url, float fps, float priority, bool loop, bool hold, float firstFrame, float lastFrame, const QStringList& maskedJoints) { diff --git a/assignment-client/src/avatars/ScriptableAvatar.h b/assignment-client/src/avatars/ScriptableAvatar.h index 18d64f4ac5..da6c1e0b7f 100644 --- a/assignment-client/src/avatars/ScriptableAvatar.h +++ b/assignment-client/src/avatars/ScriptableAvatar.h @@ -27,6 +27,10 @@ public: Q_INVOKABLE void stopAnimation(); Q_INVOKABLE AnimationDetails getAnimationDetails(); virtual void setSkeletonModelURL(const QUrl& skeletonModelURL) override; + + virtual QByteArray toByteArray(AvatarDataDetail dataDetail, quint64 lastSentTime, QVector<JointData>& lastSentJointData, + bool distanceAdjust, glm::vec3 viewerPosition) override; + private slots: void update(float deltatime); diff --git a/interface/src/avatar/MyAvatar.cpp b/interface/src/avatar/MyAvatar.cpp index 6f5de308ac..2d1dc34b2f 100644 --- a/interface/src/avatar/MyAvatar.cpp +++ b/interface/src/avatar/MyAvatar.cpp @@ -226,7 +226,8 @@ void MyAvatar::simulateAttachments(float deltaTime) { // don't update attachments here, do it in harvestResultsFromPhysicsSimulation() } -QByteArray MyAvatar::toByteArray(AvatarDataDetail dataDetail, quint64 lastSentTime) { +QByteArray MyAvatar::toByteArray(AvatarDataDetail dataDetail, quint64 lastSentTime, QVector<JointData>& lastSentJointData, + bool distanceAdjust, glm::vec3 viewerPosition) { CameraMode mode = qApp->getCamera()->getMode(); _globalPosition = getPosition(); _globalBoundingBoxDimensions.x = _characterController.getCapsuleRadius(); @@ -237,12 +238,12 @@ QByteArray MyAvatar::toByteArray(AvatarDataDetail dataDetail, quint64 lastSentTi // fake the avatar position that is sent up to the AvatarMixer glm::vec3 oldPosition = getPosition(); setPosition(getSkeletonPosition()); - QByteArray array = AvatarData::toByteArray(dataDetail, lastSentTime); + QByteArray array = AvatarData::toByteArray(dataDetail, lastSentTime, lastSentJointData, distanceAdjust, viewerPosition); // copy the correct position back setPosition(oldPosition); return array; } - return AvatarData::toByteArray(dataDetail, lastSentTime); + return AvatarData::toByteArray(dataDetail, lastSentTime, lastSentJointData, distanceAdjust, viewerPosition); } void MyAvatar::centerBody() { diff --git a/interface/src/avatar/MyAvatar.h b/interface/src/avatar/MyAvatar.h index 68e65faad7..1e3b9adf5e 100644 --- a/interface/src/avatar/MyAvatar.h +++ b/interface/src/avatar/MyAvatar.h @@ -333,7 +333,8 @@ private: glm::vec3 getWorldBodyPosition() const; glm::quat getWorldBodyOrientation() const; - QByteArray toByteArray(AvatarDataDetail dataDetail, quint64 lastSentTime) override; + QByteArray toByteArray(AvatarDataDetail dataDetail, quint64 lastSentTime, QVector<JointData>& lastSentJointData, + bool distanceAdjust, glm::vec3 viewerPosition) override; void simulate(float deltaTime); void updateFromTrackers(float deltaTime); virtual void render(RenderArgs* renderArgs, const glm::vec3& cameraPositio) override; diff --git a/libraries/avatars/src/AvatarData.cpp b/libraries/avatars/src/AvatarData.cpp index 3f590c2ebd..3e7b087224 100644 --- a/libraries/avatars/src/AvatarData.cpp +++ b/libraries/avatars/src/AvatarData.cpp @@ -193,7 +193,17 @@ bool AvatarData::faceTrackerInfoChangedSince(quint64 time) { return true; // FIXME! } -QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSentTime) { +float AvatarData::getDistanceBasedMinRotationDOT(glm::vec3 viewerPosition) { + return AVATAR_MIN_ROTATION_DOT; // FIXME +} + +float AvatarData::getDistanceBasedMinTranslationDistance(glm::vec3 viewerPosition) { + return AVATAR_MIN_TRANSLATION; // FIXME +} + + +QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSentTime, QVector<JointData>& lastSentJointData, + bool distanceAdjust, glm::vec3 viewerPosition) { // if no timestamp was included, then assume the avatarData is single instance // and is tracking its own last encoding time. @@ -230,6 +240,8 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSent // BUG -- if you enter a space bubble, and then back away, the avatar has wrong orientation until "send all" happens... // this is an iFrame issue... what to do about that? // + // BUG -- Resizing avatar seems to "take too long"... the avatar doesn't redraw at smaller size right away + // BUG -- summoned avatars seem low? // // TODO - @@ -242,8 +254,15 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSent // 63 rotations * 6 bytes = 136kbps // 3 translations * 6 bytes = 6.48kbps // + // How we need to handle joints: + // 1) need to track "_lastSentJointData" for each "viewer" so it can't be a member variable of the + // AvatarData. instead it should be like lastSentTime where it's passed in. Store it in the node data + // and in AvatarMixer pass it accordingly + // + // 2) we also want to know the "distance" to the viewer to adjust the relative tolerance for changes and + // whether or not we actually want to do this distance adjust + // - auto localPosition = getLocalPosition(); auto parentID = getParentID(); bool hasAvatarGlobalPosition = true; // always include global position @@ -426,14 +445,16 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSent unsigned char* beforeRotations = destinationBuffer; #endif - _lastSentJointData.resize(_jointData.size()); + lastSentJointData.resize(_jointData.size()); + + float minRotationDOT = !distanceAdjust ? AVATAR_MIN_ROTATION_DOT : getDistanceBasedMinRotationDOT(viewerPosition); for (int i = 0; i < _jointData.size(); i++) { const JointData& data = _jointData[i]; - if (sendAll || _lastSentJointData[i].rotation != data.rotation) { + if (sendAll || lastSentJointData[i].rotation != data.rotation) { if (sendAll || !cullSmallChanges || - fabsf(glm::dot(data.rotation, _lastSentJointData[i].rotation)) <= AVATAR_MIN_ROTATION_DOT) { + fabsf(glm::dot(data.rotation, lastSentJointData[i].rotation)) <= minRotationDOT) { if (data.rotationSet) { validity |= (1 << validityBit); #if 1 //def WANT_DEBUG @@ -475,13 +496,15 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSent unsigned char* beforeTranslations = destinationBuffer; #endif + float minTranslation = !distanceAdjust ? AVATAR_MIN_TRANSLATION : getDistanceBasedMinTranslationDistance(viewerPosition); + float maxTranslationDimension = 0.0; for (int i = 0; i < _jointData.size(); i++) { const JointData& data = _jointData[i]; - if (sendAll || _lastSentJointData[i].translation != data.translation) { + if (sendAll || lastSentJointData[i].translation != data.translation) { if (sendAll || !cullSmallChanges || - glm::distance(data.translation, _lastSentJointData[i].translation) > AVATAR_MIN_TRANSLATION) { + glm::distance(data.translation, lastSentJointData[i].translation) > minTranslation) { if (data.translationSet) { validity |= (1 << validityBit); #if 1 //def WANT_DEBUG @@ -1508,7 +1531,13 @@ void AvatarData::sendAvatarDataPacket() { // about 2% of the time, we send a full update (meaning, we transmit all the joint data), even if nothing has changed. // this is to guard against a joint moving once, the packet getting lost, and the joint never moving again. - QByteArray avatarByteArray = toByteArray((randFloat() < AVATAR_SEND_FULL_UPDATE_RATIO) ? SendAllData : CullSmallData); + + auto dataDetail = (randFloat() < AVATAR_SEND_FULL_UPDATE_RATIO) ? SendAllData : CullSmallData; + quint64 lastSentTime = 0; + QVector<JointData>& lastSentJointData = _lastSentJointData; + bool distanceAdjust = false; + glm::vec3 viewerPosition(0); + QByteArray avatarByteArray = toByteArray(dataDetail, lastSentTime, lastSentJointData, distanceAdjust, viewerPosition); doneEncoding(true); // FIXME - doneEncoding() takes a bool for culling small changes, that's janky! diff --git a/libraries/avatars/src/AvatarData.h b/libraries/avatars/src/AvatarData.h index 433c5441eb..5652ccd705 100644 --- a/libraries/avatars/src/AvatarData.h +++ b/libraries/avatars/src/AvatarData.h @@ -347,7 +347,9 @@ public: SendAllData } AvatarDataDetail; - virtual QByteArray toByteArray(AvatarDataDetail dataDetail, quint64 lastSentTime = 0); + virtual QByteArray toByteArray(AvatarDataDetail dataDetail, quint64 lastSentTime, QVector<JointData>& lastSentJointData, + bool distanceAdjust, glm::vec3 viewerPosition); + virtual void doneEncoding(bool cullSmallChanges); /// \return true if an error should be logged @@ -526,6 +528,8 @@ public: float getDataRate(const QString& rateName = QString("")); + QVector<JointData>& getLastSentJointData() { return _lastSentJointData; } + public slots: void sendAvatarDataPacket(); void sendIdentityPacket(); @@ -545,6 +549,9 @@ public slots: protected: void lazyInitHeadData(); + float getDistanceBasedMinRotationDOT(glm::vec3 viewerPosition); + float getDistanceBasedMinTranslationDistance(glm::vec3 viewerPosition); + bool avatarBoundingBoxChangedSince(quint64 time); bool avatarScaleChangedSince(quint64 time); bool lookAtPositionChangedSince(quint64 time); From 6e5e9d3492e02b7c4b685d859c3bb622f299ff97 Mon Sep 17 00:00:00 2001 From: ZappoMan <brad@highfidelity.io> Date: Mon, 16 Jan 2017 18:28:01 -0800 Subject: [PATCH 28/43] more work --- assignment-client/src/Agent.cpp | 4 +-- assignment-client/src/avatars/AvatarMixer.cpp | 4 +-- .../src/avatars/ScriptableAvatar.cpp | 9 +++++++ libraries/avatars/src/AvatarData.cpp | 26 ++++++++++++++++--- libraries/avatars/src/AvatarData.h | 3 +++ 5 files changed, 39 insertions(+), 7 deletions(-) diff --git a/assignment-client/src/Agent.cpp b/assignment-client/src/Agent.cpp index 2764fd4031..32cecd3801 100644 --- a/assignment-client/src/Agent.cpp +++ b/assignment-client/src/Agent.cpp @@ -499,8 +499,8 @@ void Agent::processAgentAvatar() { if (!_scriptEngine->isFinished() && _isAvatar) { auto scriptedAvatar = DependencyManager::get<ScriptableAvatar>(); - AvatarData::AvatarDataDetail dataDetail = (randFloat() < AVATAR_SEND_FULL_UPDATE_RATIO) ? AvatarData::SendAllData : AvatarData::CullSmallData; - //AvatarData::AvatarDataDetail dataDetail = AvatarData::SendAllData; + //AvatarData::AvatarDataDetail dataDetail = (randFloat() < AVATAR_SEND_FULL_UPDATE_RATIO) ? AvatarData::SendAllData : AvatarData::CullSmallData; + AvatarData::AvatarDataDetail dataDetail = AvatarData::SendAllData; quint64 lastSentTime = 0; QVector<JointData>& lastSentJointData = scriptedAvatar->getLastSentJointData(); bool distanceAdjust = false; diff --git a/assignment-client/src/avatars/AvatarMixer.cpp b/assignment-client/src/avatars/AvatarMixer.cpp index 59b8429683..ab9d024556 100644 --- a/assignment-client/src/avatars/AvatarMixer.cpp +++ b/assignment-client/src/avatars/AvatarMixer.cpp @@ -416,7 +416,7 @@ void AvatarMixer::broadcastAvatarData() { nodeData->incrementAvatarOutOfView(); } else { detail = distribution(generator) < AVATAR_SEND_FULL_UPDATE_RATIO - ? AvatarData::SendAllData : AvatarData::IncludeSmallData; + ? AvatarData::SendAllData : AvatarData::CullSmallData; nodeData->incrementAvatarInView(); } @@ -424,7 +424,7 @@ void AvatarMixer::broadcastAvatarData() { auto lastEncodeForOther = nodeData->getLastOtherAvatarEncodeTime(otherNode->getUUID()); auto lastSentJointsForOther = nodeData->getLastOtherAvatarSentJoints(otherNode->getUUID()); bool distanceAdjust = true; - glm::vec3 viewerPosition = otherAvatar.getPosition(); + glm::vec3 viewerPosition = nodeData->getPosition(); auto bytes = otherAvatar.toByteArray(detail, lastEncodeForOther, lastSentJointsForOther, distanceAdjust, viewerPosition); numAvatarDataBytes += avatarPacketList->write(bytes); diff --git a/assignment-client/src/avatars/ScriptableAvatar.cpp b/assignment-client/src/avatars/ScriptableAvatar.cpp index 989904ca7b..d4d5b470b8 100644 --- a/assignment-client/src/avatars/ScriptableAvatar.cpp +++ b/assignment-client/src/avatars/ScriptableAvatar.cpp @@ -57,18 +57,26 @@ void ScriptableAvatar::setSkeletonModelURL(const QUrl& skeletonModelURL) { _bind.reset(); _animSkeleton.reset(); AvatarData::setSkeletonModelURL(skeletonModelURL); + //qDebug() << "skeletonModelURL:" << skeletonModelURL; + //qDebug() << "_skeletonFBXURL:" << _skeletonFBXURL; } void ScriptableAvatar::update(float deltatime) { + //qDebug() << __FUNCTION__ << "delta:" << deltatime; if (_bind.isNull() && !_skeletonFBXURL.isEmpty()) { // AvatarData will parse the .fst, but not get the .fbx skeleton. _bind = DependencyManager::get<AnimationCache>()->getAnimation(_skeletonFBXURL); + //qDebug() << "_skeletonFBXURL:" << _skeletonFBXURL; } + //qDebug() << "bind:" << _bind << "isLoaded:" << (!_bind.isNull() && _bind->isLoaded()); + //qDebug() << "_animation:" << _animation << "isLoaded:" << (_animation && _animation->isLoaded()); + // Run animation if (_animation && _animation->isLoaded() && _animation->getFrames().size() > 0 && !_bind.isNull() && _bind->isLoaded()) { if (!_animSkeleton) { _animSkeleton = std::make_shared<AnimSkeleton>(_bind->getGeometry()); } float currentFrame = _animationDetails.currentFrame + deltatime * _animationDetails.fps; + //qDebug() << "currentFrame:" << currentFrame; if (_animationDetails.loop || currentFrame < _animationDetails.lastFrame) { while (currentFrame >= _animationDetails.lastFrame) { currentFrame -= (_animationDetails.lastFrame - _animationDetails.firstFrame); @@ -107,6 +115,7 @@ void ScriptableAvatar::update(float deltatime) { if (data.rotation != pose.rot()) { data.rotation = pose.rot(); data.rotationSet = true; + //qDebug() << "joint[" << i << "].rotation:" << data.rotation; } } diff --git a/libraries/avatars/src/AvatarData.cpp b/libraries/avatars/src/AvatarData.cpp index 3e7b087224..e7262bb38c 100644 --- a/libraries/avatars/src/AvatarData.cpp +++ b/libraries/avatars/src/AvatarData.cpp @@ -194,7 +194,18 @@ bool AvatarData::faceTrackerInfoChangedSince(quint64 time) { } float AvatarData::getDistanceBasedMinRotationDOT(glm::vec3 viewerPosition) { - return AVATAR_MIN_ROTATION_DOT; // FIXME + auto distance = glm::distance(_globalPosition, viewerPosition); + //qDebug() << "_globalPosition:" << _globalPosition << "viewerPosition:" << viewerPosition << "distance:" << distance; + float result = ROTATION_90D_DOT; // assume worst + if (distance < 1.0f) { + result = AVATAR_MIN_ROTATION_DOT; + } else if (distance < 5.0f) { + result = ROTATION_15D_DOT; + } else if (distance < 10.0f) { + result = ROTATION_45D_DOT; + } + //qDebug() << __FUNCTION__ << "result:" << result; + return result; } float AvatarData::getDistanceBasedMinTranslationDistance(glm::vec3 viewerPosition) { @@ -241,7 +252,6 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSent // this is an iFrame issue... what to do about that? // // BUG -- Resizing avatar seems to "take too long"... the avatar doesn't redraw at smaller size right away - // BUG -- summoned avatars seem low? // // TODO - @@ -448,13 +458,17 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSent lastSentJointData.resize(_jointData.size()); float minRotationDOT = !distanceAdjust ? AVATAR_MIN_ROTATION_DOT : getDistanceBasedMinRotationDOT(viewerPosition); + //qDebug() << "sendAll:" << sendAll << "cullSmallChanges:" << cullSmallChanges; for (int i = 0; i < _jointData.size(); i++) { const JointData& data = _jointData[i]; + //qDebug() << "joint[" << i << "].dot:" << fabsf(glm::dot(data.rotation, lastSentJointData[i].rotation)); + + if (sendAll || lastSentJointData[i].rotation != data.rotation) { if (sendAll || !cullSmallChanges || - fabsf(glm::dot(data.rotation, lastSentJointData[i].rotation)) <= minRotationDOT) { + fabsf(glm::dot(data.rotation, lastSentJointData[i].rotation)) > minRotationDOT) { if (data.rotationSet) { validity |= (1 << validityBit); #if 1 //def WANT_DEBUG @@ -1484,6 +1498,9 @@ void AvatarData::detachAll(const QString& modelURL, const QString& jointName) { } void AvatarData::setJointMappingsFromNetworkReply() { + + //qDebug() << __FUNCTION__ << "_skeletonModelURL:" << _skeletonModelURL; + QNetworkReply* networkReply = static_cast<QNetworkReply*>(sender()); { @@ -1577,6 +1594,9 @@ void AvatarData::updateJointMappings() { } if (_skeletonModelURL.fileName().toLower().endsWith(".fst")) { + + //qDebug() << __FUNCTION__ << "_skeletonModelURL:" << _skeletonModelURL; + QNetworkAccessManager& networkAccessManager = NetworkAccessManager::getInstance(); QNetworkRequest networkRequest = QNetworkRequest(_skeletonModelURL); networkRequest.setAttribute(QNetworkRequest::FollowRedirectsAttribute, true); diff --git a/libraries/avatars/src/AvatarData.h b/libraries/avatars/src/AvatarData.h index 5652ccd705..cfac697df3 100644 --- a/libraries/avatars/src/AvatarData.h +++ b/libraries/avatars/src/AvatarData.h @@ -258,6 +258,9 @@ const float AVATAR_SEND_FULL_UPDATE_RATIO = 0.02f; const float AVATAR_MIN_ROTATION_DOT = 0.9999999f; const float AVATAR_MIN_TRANSLATION = 0.0001f; +const float ROTATION_15D_DOT = 0.9914449f; +const float ROTATION_45D_DOT = 0.9238795f; +const float ROTATION_90D_DOT = 0.7071068f; // Where one's own Avatar begins in the world (will be overwritten if avatar data file is found). // This is the start location in the Sandbox (xyz: 6270, 211, 6000). From 060c63045b7b7e8574db2411d6e25e16b192d3b5 Mon Sep 17 00:00:00 2001 From: ZappoMan <brad@highfidelity.io> Date: Mon, 16 Jan 2017 18:28:27 -0800 Subject: [PATCH 29/43] a test script --- script-archive/acScripts/simpleBot.js | 81 +++++++++++++++++++++++++++ 1 file changed, 81 insertions(+) create mode 100644 script-archive/acScripts/simpleBot.js diff --git a/script-archive/acScripts/simpleBot.js b/script-archive/acScripts/simpleBot.js new file mode 100644 index 0000000000..914f9e560d --- /dev/null +++ b/script-archive/acScripts/simpleBot.js @@ -0,0 +1,81 @@ +// +// bot_randomExpression.js +// examples +// +// Created by Ben Arnold on 7/23/14. +// Copyright 2014 High Fidelity, Inc. +// +// This is an example script that demonstrates an NPC avatar with +// random facial expressions. +// +// Distributed under the Apache License, Version 2.0. +// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html +// + +HIFI_PUBLIC_BUCKET = "http://s3.amazonaws.com/hifi-public/"; + +function getRandomFloat(min, max) { + return Math.random() * (max - min) + min; +} + +function getRandomInt (min, max) { + return Math.floor(Math.random() * (max - min + 1)) + min; +} + +function printVector(string, vector) { + print(string + " " + vector.x + ", " + vector.y + ", " + vector.z); +} + +var timePassed = 0.0; +var updateSpeed = 3.0; + +var X_MIN = 5.0; +var X_MAX = 15.0; +var Z_MIN = 5.0; +var Z_MAX = 15.0; +var Y_PELVIS = 1.0; + +Agent.isAvatar = true; + +// change the avatar's position to the random one +Avatar.position = {x:0,y:0,z:0}; // { x: getRandomFloat(X_MIN, X_MAX), y: Y_PELVIS, z: getRandomFloat(Z_MIN, Z_MAX) };; +printVector("New bot, position = ", Avatar.position); + +var animationData = {url: "file:///D:/Development/HiFi/hifi/interface/resources/avatar/animations/walk_fwd.fbx", lastFrame: 35}; +//Avatar.startAnimation(animationData.url, animationData.fps || 30, 1, true, false, animationData.firstFrame || 0, animationData.lastFrame); +//Avatar.skeletonModelURL = "file:///D:/Development/HiFi/hifi/interface/resources/meshes/being_of_light/being_of_light.fbx"; + +var millisecondsToWaitBeforeStarting = 10 * 1000; +Script.setTimeout(function () { + print("Starting at", JSON.stringify(Avatar.position)); + Avatar.startAnimation(animationData.url, animationData.fps || 30, 1, true, false, animationData.firstFrame || 0, animationData.lastFrame); +}, millisecondsToWaitBeforeStarting); + + + +function update(deltaTime) { + timePassed += deltaTime; + if (timePassed > updateSpeed) { + timePassed = 0; + var newPosition = Vec3.sum(Avatar.position, { x: getRandomFloat(-0.1, 0.1), y: 0, z: getRandomFloat(-0.1, 0.1) }); + Avatar.position = newPosition; + Vec3.print("new:", newPosition); + + /* + var q0 = Quat.fromPitchYawRollDegrees(0, 0, 0); // degrees + var q15 = Quat.fromPitchYawRollDegrees(0, 15, 0); // degrees + var q45 = Quat.fromPitchYawRollDegrees(0, 45, 0); // degrees + var q90 = Quat.fromPitchYawRollDegrees(0, 90, 0); // degrees + print("dot 15 deg:" + Quat.dot(q0,q15)); + print("dot 45 deg:" + Quat.dot(q0,q45)); + print("dot 95 deg:" + Quat.dot(q0,q90)); + + var q45r = Quat.fromPitchYawRollDegrees(0, 0, 45); // degrees + var q90r = Quat.fromPitchYawRollDegrees(0, 0, 90); // degrees + print("dot 45 deg roll:" + Quat.dot(q0,q45r)); + print("dot 95 deg roll:" + Quat.dot(q0,q90r)); + */ + } +} + +Script.update.connect(update); \ No newline at end of file From 292d472a59aaf3094dc2232a03ba6bdba16ee320 Mon Sep 17 00:00:00 2001 From: ZappoMan <brad@highfidelity.io> Date: Mon, 23 Jan 2017 13:06:29 -0800 Subject: [PATCH 30/43] checkpoint - got first cut at distance based rotation tolerance working --- assignment-client/src/Agent.cpp | 1 + assignment-client/src/avatars/AvatarMixer.cpp | 4 +- .../src/avatars/ScriptableAvatar.cpp | 4 +- .../src/avatars/ScriptableAvatar.h | 4 +- interface/src/avatar/MyAvatar.cpp | 8 +-- interface/src/avatar/MyAvatar.h | 7 ++- libraries/avatars/src/AvatarData.cpp | 59 +++++++++++++++---- libraries/avatars/src/AvatarData.h | 13 ++-- 8 files changed, 71 insertions(+), 29 deletions(-) diff --git a/assignment-client/src/Agent.cpp b/assignment-client/src/Agent.cpp index 32cecd3801..ad29cee75c 100644 --- a/assignment-client/src/Agent.cpp +++ b/assignment-client/src/Agent.cpp @@ -507,6 +507,7 @@ void Agent::processAgentAvatar() { glm::vec3 viewerPosition(0); QByteArray avatarByteArray = scriptedAvatar->toByteArray(dataDetail, lastSentTime, lastSentJointData, distanceAdjust, viewerPosition); + scriptedAvatar->doneEncoding(true); static AvatarDataSequenceNumber sequenceNumber = 0; diff --git a/assignment-client/src/avatars/AvatarMixer.cpp b/assignment-client/src/avatars/AvatarMixer.cpp index ab9d024556..9dd9f76adf 100644 --- a/assignment-client/src/avatars/AvatarMixer.cpp +++ b/assignment-client/src/avatars/AvatarMixer.cpp @@ -422,10 +422,10 @@ void AvatarMixer::broadcastAvatarData() { numAvatarDataBytes += avatarPacketList->write(otherNode->getUUID().toRfc4122()); auto lastEncodeForOther = nodeData->getLastOtherAvatarEncodeTime(otherNode->getUUID()); - auto lastSentJointsForOther = nodeData->getLastOtherAvatarSentJoints(otherNode->getUUID()); + QVector<JointData>& lastSentJointsForOther = nodeData->getLastOtherAvatarSentJoints(otherNode->getUUID()); bool distanceAdjust = true; glm::vec3 viewerPosition = nodeData->getPosition(); - auto bytes = otherAvatar.toByteArray(detail, lastEncodeForOther, lastSentJointsForOther, distanceAdjust, viewerPosition); + auto bytes = otherAvatar.toByteArray(detail, lastEncodeForOther, lastSentJointsForOther, distanceAdjust, viewerPosition, &lastSentJointsForOther); numAvatarDataBytes += avatarPacketList->write(bytes); avatarPacketList->endSegment(); diff --git a/assignment-client/src/avatars/ScriptableAvatar.cpp b/assignment-client/src/avatars/ScriptableAvatar.cpp index d4d5b470b8..eccc140f32 100644 --- a/assignment-client/src/avatars/ScriptableAvatar.cpp +++ b/assignment-client/src/avatars/ScriptableAvatar.cpp @@ -14,8 +14,8 @@ #include <GLMHelpers.h> #include "ScriptableAvatar.h" -QByteArray ScriptableAvatar::toByteArray(AvatarDataDetail dataDetail, quint64 lastSentTime, QVector<JointData>& lastSentJointData, - bool distanceAdjust, glm::vec3 viewerPosition) { +QByteArray ScriptableAvatar::toByteArray(AvatarDataDetail dataDetail, quint64 lastSentTime, const QVector<JointData>& lastSentJointData, + bool distanceAdjust, glm::vec3 viewerPosition, QVector<JointData>* sentJointDataOut) { _globalPosition = getPosition(); return AvatarData::toByteArray(dataDetail, lastSentTime, lastSentJointData, distanceAdjust, viewerPosition); } diff --git a/assignment-client/src/avatars/ScriptableAvatar.h b/assignment-client/src/avatars/ScriptableAvatar.h index da6c1e0b7f..59f47a9ebb 100644 --- a/assignment-client/src/avatars/ScriptableAvatar.h +++ b/assignment-client/src/avatars/ScriptableAvatar.h @@ -28,8 +28,8 @@ public: Q_INVOKABLE AnimationDetails getAnimationDetails(); virtual void setSkeletonModelURL(const QUrl& skeletonModelURL) override; - virtual QByteArray toByteArray(AvatarDataDetail dataDetail, quint64 lastSentTime, QVector<JointData>& lastSentJointData, - bool distanceAdjust, glm::vec3 viewerPosition) override; + virtual QByteArray toByteArray(AvatarDataDetail dataDetail, quint64 lastSentTime, const QVector<JointData>& lastSentJointData, + bool distanceAdjust, glm::vec3 viewerPosition, QVector<JointData>* sentJointDataOut = nullptr) override; private slots: diff --git a/interface/src/avatar/MyAvatar.cpp b/interface/src/avatar/MyAvatar.cpp index 2d1dc34b2f..acc920dc86 100644 --- a/interface/src/avatar/MyAvatar.cpp +++ b/interface/src/avatar/MyAvatar.cpp @@ -226,8 +226,8 @@ void MyAvatar::simulateAttachments(float deltaTime) { // don't update attachments here, do it in harvestResultsFromPhysicsSimulation() } -QByteArray MyAvatar::toByteArray(AvatarDataDetail dataDetail, quint64 lastSentTime, QVector<JointData>& lastSentJointData, - bool distanceAdjust, glm::vec3 viewerPosition) { +QByteArray MyAvatar::toByteArray(AvatarDataDetail dataDetail, quint64 lastSentTime, const QVector<JointData>& lastSentJointData, + bool distanceAdjust, glm::vec3 viewerPosition, QVector<JointData>* sentJointDataOut) { CameraMode mode = qApp->getCamera()->getMode(); _globalPosition = getPosition(); _globalBoundingBoxDimensions.x = _characterController.getCapsuleRadius(); @@ -238,12 +238,12 @@ QByteArray MyAvatar::toByteArray(AvatarDataDetail dataDetail, quint64 lastSentTi // fake the avatar position that is sent up to the AvatarMixer glm::vec3 oldPosition = getPosition(); setPosition(getSkeletonPosition()); - QByteArray array = AvatarData::toByteArray(dataDetail, lastSentTime, lastSentJointData, distanceAdjust, viewerPosition); + QByteArray array = AvatarData::toByteArray(dataDetail, lastSentTime, lastSentJointData, distanceAdjust, viewerPosition, sentJointDataOut); // copy the correct position back setPosition(oldPosition); return array; } - return AvatarData::toByteArray(dataDetail, lastSentTime, lastSentJointData, distanceAdjust, viewerPosition); + return AvatarData::toByteArray(dataDetail, lastSentTime, lastSentJointData, distanceAdjust, viewerPosition, sentJointDataOut); } void MyAvatar::centerBody() { diff --git a/interface/src/avatar/MyAvatar.h b/interface/src/avatar/MyAvatar.h index 1e3b9adf5e..bb591afd51 100644 --- a/interface/src/avatar/MyAvatar.h +++ b/interface/src/avatar/MyAvatar.h @@ -333,8 +333,11 @@ private: glm::vec3 getWorldBodyPosition() const; glm::quat getWorldBodyOrientation() const; - QByteArray toByteArray(AvatarDataDetail dataDetail, quint64 lastSentTime, QVector<JointData>& lastSentJointData, - bool distanceAdjust, glm::vec3 viewerPosition) override; + + + virtual QByteArray toByteArray(AvatarDataDetail dataDetail, quint64 lastSentTime, const QVector<JointData>& lastSentJointData, + bool distanceAdjust, glm::vec3 viewerPosition, QVector<JointData>* sentJointDataOut = nullptr) override; + void simulate(float deltaTime); void updateFromTrackers(float deltaTime); virtual void render(RenderArgs* renderArgs, const glm::vec3& cameraPositio) override; diff --git a/libraries/avatars/src/AvatarData.cpp b/libraries/avatars/src/AvatarData.cpp index e7262bb38c..c31fdfbdb6 100644 --- a/libraries/avatars/src/AvatarData.cpp +++ b/libraries/avatars/src/AvatarData.cpp @@ -196,15 +196,18 @@ bool AvatarData::faceTrackerInfoChangedSince(quint64 time) { float AvatarData::getDistanceBasedMinRotationDOT(glm::vec3 viewerPosition) { auto distance = glm::distance(_globalPosition, viewerPosition); //qDebug() << "_globalPosition:" << _globalPosition << "viewerPosition:" << viewerPosition << "distance:" << distance; - float result = ROTATION_90D_DOT; // assume worst + float result = ROTATION_179D_DOT; // assume worst if (distance < 1.0f) { result = AVATAR_MIN_ROTATION_DOT; } else if (distance < 5.0f) { result = ROTATION_15D_DOT; } else if (distance < 10.0f) { result = ROTATION_45D_DOT; + } else if (distance < 20.0f) { + result = ROTATION_90D_DOT; } //qDebug() << __FUNCTION__ << "result:" << result; + return result; } @@ -213,8 +216,8 @@ float AvatarData::getDistanceBasedMinTranslationDistance(glm::vec3 viewerPositio } -QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSentTime, QVector<JointData>& lastSentJointData, - bool distanceAdjust, glm::vec3 viewerPosition) { +QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSentTime, const QVector<JointData>& lastSentJointData, + bool distanceAdjust, glm::vec3 viewerPosition, QVector<JointData>* sentJointDataOut) { // if no timestamp was included, then assume the avatarData is single instance // and is tracking its own last encoding time. @@ -455,25 +458,43 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSent unsigned char* beforeRotations = destinationBuffer; #endif - lastSentJointData.resize(_jointData.size()); - + if (sentJointDataOut) { + if (sentJointDataOut->size() != _jointData.size()) { + sentJointDataOut->resize(_jointData.size()); + } + } float minRotationDOT = !distanceAdjust ? AVATAR_MIN_ROTATION_DOT : getDistanceBasedMinRotationDOT(viewerPosition); - //qDebug() << "sendAll:" << sendAll << "cullSmallChanges:" << cullSmallChanges; + auto distance = glm::distance(_globalPosition, viewerPosition); + //qDebug() << "sendAll:" << sendAll << "cullSmallChanges:" << cullSmallChanges << "minRotationDOT:" << minRotationDOT << "distance:" << distance; for (int i = 0; i < _jointData.size(); i++) { const JointData& data = _jointData[i]; - //qDebug() << "joint[" << i << "].dot:" << fabsf(glm::dot(data.rotation, lastSentJointData[i].rotation)); + // The dot product for smaller rotations is a smaller number. + // + // const float AVATAR_MIN_ROTATION_DOT = 0.9999999f; + // const float ROTATION_15D_DOT = 0.9914449f; + // const float ROTATION_45D_DOT = 0.9238795f; + // const float ROTATION_90D_DOT = 0.7071068f; + // So if the dot() is less than the value, then the rotation is a larger angle of rotation + // + bool largeEnoughRotation = fabsf(glm::dot(data.rotation, lastSentJointData[i].rotation)) < minRotationDOT; + + //qDebug() << "joint[" << i << "].dot:" << fabsf(glm::dot(data.rotation, lastSentJointData[i].rotation)) << "minRotationDOT:" << minRotationDOT << "largeEnoughRotation:" << largeEnoughRotation; + if (sendAll || lastSentJointData[i].rotation != data.rotation) { - if (sendAll || - !cullSmallChanges || - fabsf(glm::dot(data.rotation, lastSentJointData[i].rotation)) > minRotationDOT) { + if (sendAll || !cullSmallChanges || largeEnoughRotation) { if (data.rotationSet) { validity |= (1 << validityBit); #if 1 //def WANT_DEBUG rotationSentCount++; #endif + if (sentJointDataOut) { + auto jointDataOut = *sentJointDataOut; + jointDataOut[i].rotation = data.rotation; + } + } } } @@ -527,6 +548,12 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSent maxTranslationDimension = glm::max(fabsf(data.translation.x), maxTranslationDimension); maxTranslationDimension = glm::max(fabsf(data.translation.y), maxTranslationDimension); maxTranslationDimension = glm::max(fabsf(data.translation.z), maxTranslationDimension); + + if (sentJointDataOut) { + auto jointDataOut = *sentJointDataOut; + jointDataOut[i].translation = data.translation; + } + } } } @@ -585,6 +612,7 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSent return avatarDataByteArray.left(avatarDataSize); } +// NOTE: This is never used in a "distanceAdjust" mode, so it's ok that it doesn't use a variable minimum rotation/translation void AvatarData::doneEncoding(bool cullSmallChanges) { // The server has finished sending this version of the joint-data to other nodes. Update _lastSentJointData. QReadLocker readLock(&_jointDataLock); @@ -1549,14 +1577,19 @@ void AvatarData::sendAvatarDataPacket() { // about 2% of the time, we send a full update (meaning, we transmit all the joint data), even if nothing has changed. // this is to guard against a joint moving once, the packet getting lost, and the joint never moving again. - auto dataDetail = (randFloat() < AVATAR_SEND_FULL_UPDATE_RATIO) ? SendAllData : CullSmallData; + bool cullSmallData = (randFloat() < AVATAR_SEND_FULL_UPDATE_RATIO); + auto dataDetail = cullSmallData ? SendAllData : CullSmallData; quint64 lastSentTime = 0; - QVector<JointData>& lastSentJointData = _lastSentJointData; + QVector<JointData> lastSentJointData; + { + QReadLocker readLock(&_jointDataLock); + lastSentJointData = _lastSentJointData; + } bool distanceAdjust = false; glm::vec3 viewerPosition(0); QByteArray avatarByteArray = toByteArray(dataDetail, lastSentTime, lastSentJointData, distanceAdjust, viewerPosition); - doneEncoding(true); // FIXME - doneEncoding() takes a bool for culling small changes, that's janky! + doneEncoding(cullSmallData); // FIXME - doneEncoding() takes a bool for culling small changes, that's janky! static AvatarDataSequenceNumber sequenceNumber = 0; diff --git a/libraries/avatars/src/AvatarData.h b/libraries/avatars/src/AvatarData.h index cfac697df3..bd0591973a 100644 --- a/libraries/avatars/src/AvatarData.h +++ b/libraries/avatars/src/AvatarData.h @@ -260,7 +260,8 @@ const float AVATAR_MIN_TRANSLATION = 0.0001f; const float ROTATION_15D_DOT = 0.9914449f; const float ROTATION_45D_DOT = 0.9238795f; -const float ROTATION_90D_DOT = 0.7071068f; +const float ROTATION_90D_DOT = 0.7071068f; +const float ROTATION_179D_DOT = 0.0087266f; // Where one's own Avatar begins in the world (will be overwritten if avatar data file is found). // This is the start location in the Sandbox (xyz: 6270, 211, 6000). @@ -350,8 +351,8 @@ public: SendAllData } AvatarDataDetail; - virtual QByteArray toByteArray(AvatarDataDetail dataDetail, quint64 lastSentTime, QVector<JointData>& lastSentJointData, - bool distanceAdjust, glm::vec3 viewerPosition); + virtual QByteArray toByteArray(AvatarDataDetail dataDetail, quint64 lastSentTime, const QVector<JointData>& lastSentJointData, + bool distanceAdjust, glm::vec3 viewerPosition, QVector<JointData>* sentJointDataOut = nullptr); virtual void doneEncoding(bool cullSmallChanges); @@ -531,7 +532,11 @@ public: float getDataRate(const QString& rateName = QString("")); - QVector<JointData>& getLastSentJointData() { return _lastSentJointData; } + QVector<JointData> getLastSentJointData() { + QReadLocker readLock(&_jointDataLock); + return _lastSentJointData; + } + public slots: void sendAvatarDataPacket(); From e0bad95257b782410f4d60e7fbbe32a07bb73c94 Mon Sep 17 00:00:00 2001 From: ZappoMan <brad@highfidelity.io> Date: Mon, 23 Jan 2017 13:18:55 -0800 Subject: [PATCH 31/43] some cleanup --- .../src/avatars/ScriptableAvatar.cpp | 11 +---- libraries/avatars/src/AvatarData.cpp | 44 +++++-------------- 2 files changed, 12 insertions(+), 43 deletions(-) diff --git a/assignment-client/src/avatars/ScriptableAvatar.cpp b/assignment-client/src/avatars/ScriptableAvatar.cpp index eccc140f32..199f5b31d1 100644 --- a/assignment-client/src/avatars/ScriptableAvatar.cpp +++ b/assignment-client/src/avatars/ScriptableAvatar.cpp @@ -17,7 +17,7 @@ QByteArray ScriptableAvatar::toByteArray(AvatarDataDetail dataDetail, quint64 lastSentTime, const QVector<JointData>& lastSentJointData, bool distanceAdjust, glm::vec3 viewerPosition, QVector<JointData>* sentJointDataOut) { _globalPosition = getPosition(); - return AvatarData::toByteArray(dataDetail, lastSentTime, lastSentJointData, distanceAdjust, viewerPosition); + return AvatarData::toByteArray(dataDetail, lastSentTime, lastSentJointData, distanceAdjust, viewerPosition, sentJointDataOut); } @@ -57,26 +57,18 @@ void ScriptableAvatar::setSkeletonModelURL(const QUrl& skeletonModelURL) { _bind.reset(); _animSkeleton.reset(); AvatarData::setSkeletonModelURL(skeletonModelURL); - //qDebug() << "skeletonModelURL:" << skeletonModelURL; - //qDebug() << "_skeletonFBXURL:" << _skeletonFBXURL; } void ScriptableAvatar::update(float deltatime) { - //qDebug() << __FUNCTION__ << "delta:" << deltatime; if (_bind.isNull() && !_skeletonFBXURL.isEmpty()) { // AvatarData will parse the .fst, but not get the .fbx skeleton. _bind = DependencyManager::get<AnimationCache>()->getAnimation(_skeletonFBXURL); - //qDebug() << "_skeletonFBXURL:" << _skeletonFBXURL; } - //qDebug() << "bind:" << _bind << "isLoaded:" << (!_bind.isNull() && _bind->isLoaded()); - //qDebug() << "_animation:" << _animation << "isLoaded:" << (_animation && _animation->isLoaded()); - // Run animation if (_animation && _animation->isLoaded() && _animation->getFrames().size() > 0 && !_bind.isNull() && _bind->isLoaded()) { if (!_animSkeleton) { _animSkeleton = std::make_shared<AnimSkeleton>(_bind->getGeometry()); } float currentFrame = _animationDetails.currentFrame + deltatime * _animationDetails.fps; - //qDebug() << "currentFrame:" << currentFrame; if (_animationDetails.loop || currentFrame < _animationDetails.lastFrame) { while (currentFrame >= _animationDetails.lastFrame) { currentFrame -= (_animationDetails.lastFrame - _animationDetails.firstFrame); @@ -115,7 +107,6 @@ void ScriptableAvatar::update(float deltatime) { if (data.rotation != pose.rot()) { data.rotation = pose.rot(); data.rotationSet = true; - //qDebug() << "joint[" << i << "].rotation:" << data.rotation; } } diff --git a/libraries/avatars/src/AvatarData.cpp b/libraries/avatars/src/AvatarData.cpp index 5c203b584a..1d511139e6 100644 --- a/libraries/avatars/src/AvatarData.cpp +++ b/libraries/avatars/src/AvatarData.cpp @@ -195,7 +195,6 @@ bool AvatarData::faceTrackerInfoChangedSince(quint64 time) { float AvatarData::getDistanceBasedMinRotationDOT(glm::vec3 viewerPosition) { auto distance = glm::distance(_globalPosition, viewerPosition); - //qDebug() << "_globalPosition:" << _globalPosition << "viewerPosition:" << viewerPosition << "distance:" << distance; float result = ROTATION_179D_DOT; // assume worst if (distance < 1.0f) { result = AVATAR_MIN_ROTATION_DOT; @@ -206,8 +205,6 @@ float AvatarData::getDistanceBasedMinRotationDOT(glm::vec3 viewerPosition) { } else if (distance < 20.0f) { result = ROTATION_90D_DOT; } - //qDebug() << __FUNCTION__ << "result:" << result; - return result; } @@ -465,29 +462,19 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSent } float minRotationDOT = !distanceAdjust ? AVATAR_MIN_ROTATION_DOT : getDistanceBasedMinRotationDOT(viewerPosition); auto distance = glm::distance(_globalPosition, viewerPosition); - //qDebug() << "sendAll:" << sendAll << "cullSmallChanges:" << cullSmallChanges << "minRotationDOT:" << minRotationDOT << "distance:" << distance; for (int i = 0; i < _jointData.size(); i++) { const JointData& data = _jointData[i]; - // The dot product for smaller rotations is a smaller number. - // - // const float AVATAR_MIN_ROTATION_DOT = 0.9999999f; - // const float ROTATION_15D_DOT = 0.9914449f; - // const float ROTATION_45D_DOT = 0.9238795f; - // const float ROTATION_90D_DOT = 0.7071068f; // So if the dot() is less than the value, then the rotation is a larger angle of rotation - // bool largeEnoughRotation = fabsf(glm::dot(data.rotation, lastSentJointData[i].rotation)) < minRotationDOT; - //qDebug() << "joint[" << i << "].dot:" << fabsf(glm::dot(data.rotation, lastSentJointData[i].rotation)) << "minRotationDOT:" << minRotationDOT << "largeEnoughRotation:" << largeEnoughRotation; - if (sendAll || lastSentJointData[i].rotation != data.rotation) { if (sendAll || !cullSmallChanges || largeEnoughRotation) { if (data.rotationSet) { validity |= (1 << validityBit); -#if 1 //def WANT_DEBUG +#ifdef WANT_DEBUG rotationSentCount++; #endif if (sentJointDataOut) { @@ -542,9 +529,9 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSent glm::distance(data.translation, lastSentJointData[i].translation) > minTranslation) { if (data.translationSet) { validity |= (1 << validityBit); -#if 1 //def WANT_DEBUG + #ifdef WANT_DEBUG translationSentCount++; -#endif + #endif maxTranslationDimension = glm::max(fabsf(data.translation.x), maxTranslationDimension); maxTranslationDimension = glm::max(fabsf(data.translation.y), maxTranslationDimension); maxTranslationDimension = glm::max(fabsf(data.translation.z), maxTranslationDimension); @@ -591,19 +578,16 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSent destinationBuffer += packFloatVec3ToSignedTwoByteFixed(destinationBuffer, controllerRightHandTransform.getTranslation(), TRANSLATION_COMPRESSION_RADIX); - //qDebug() << "hasJointData rotationSentCount:" << rotationSentCount << "translationSentCount:" << translationSentCount; - - #ifdef WANT_DEBUG if (sendAll) { qCDebug(avatars) << "AvatarData::toByteArray" << cullSmallChanges << sendAll - << "rotations:" << rotationSentCount << "translations:" << translationSentCount - << "largest:" << maxTranslationDimension - << "size:" - << (beforeRotations - startPosition) << "+" - << (beforeTranslations - beforeRotations) << "+" - << (destinationBuffer - beforeTranslations) << "=" - << (destinationBuffer - startPosition); + << "rotations:" << rotationSentCount << "translations:" << translationSentCount + << "largest:" << maxTranslationDimension + << "size:" + << (beforeRotations - startPosition) << "+" + << (beforeTranslations - beforeRotations) << "+" + << (destinationBuffer - beforeTranslations) << "=" + << (destinationBuffer - startPosition); } #endif } @@ -1029,7 +1013,7 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) { } } -#if 0 //def WANT_DEBUG +#ifdef WANT_DEBUG if (numValidJointRotations > 15) { qCDebug(avatars) << "RECEIVING -- rotations:" << numValidJointRotations << "translations:" << numValidJointTranslations @@ -1040,7 +1024,6 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) { sourceBuffer = unpackFauxJoint(sourceBuffer, _controllerLeftHandMatrixCache); sourceBuffer = unpackFauxJoint(sourceBuffer, _controllerRightHandMatrixCache); - //qDebug() << "hasJointData numValidJointRotations:" << numValidJointRotations << "numValidJointTranslations:" << numValidJointTranslations; int numBytesRead = sourceBuffer - startSection; _jointDataRate.increment(numBytesRead); } @@ -1530,8 +1513,6 @@ void AvatarData::detachAll(const QString& modelURL, const QString& jointName) { void AvatarData::setJointMappingsFromNetworkReply() { - //qDebug() << __FUNCTION__ << "_skeletonModelURL:" << _skeletonModelURL; - QNetworkReply* networkReply = static_cast<QNetworkReply*>(sender()); { @@ -1630,9 +1611,6 @@ void AvatarData::updateJointMappings() { } if (_skeletonModelURL.fileName().toLower().endsWith(".fst")) { - - //qDebug() << __FUNCTION__ << "_skeletonModelURL:" << _skeletonModelURL; - QNetworkAccessManager& networkAccessManager = NetworkAccessManager::getInstance(); QNetworkRequest networkRequest = QNetworkRequest(_skeletonModelURL); networkRequest.setAttribute(QNetworkRequest::FollowRedirectsAttribute, true); From 8ffd9412f378e4bc14a0f5d720c3c83290140c9c Mon Sep 17 00:00:00 2001 From: ZappoMan <brad@highfidelity.io> Date: Mon, 23 Jan 2017 13:23:05 -0800 Subject: [PATCH 32/43] cleanup simpleBot --- script-archive/acScripts/simpleBot.js | 28 +++++---------------------- 1 file changed, 5 insertions(+), 23 deletions(-) diff --git a/script-archive/acScripts/simpleBot.js b/script-archive/acScripts/simpleBot.js index 914f9e560d..a79e44484b 100644 --- a/script-archive/acScripts/simpleBot.js +++ b/script-archive/acScripts/simpleBot.js @@ -1,12 +1,9 @@ // -// bot_randomExpression.js +// simpleBot.js // examples // -// Created by Ben Arnold on 7/23/14. -// Copyright 2014 High Fidelity, Inc. -// -// This is an example script that demonstrates an NPC avatar with -// random facial expressions. +// Created by Brad Hefta-Gaub on 12/23/16. +// Copyright 2016 High Fidelity, Inc. // // Distributed under the Apache License, Version 2.0. // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html @@ -38,14 +35,14 @@ var Y_PELVIS = 1.0; Agent.isAvatar = true; // change the avatar's position to the random one -Avatar.position = {x:0,y:0,z:0}; // { x: getRandomFloat(X_MIN, X_MAX), y: Y_PELVIS, z: getRandomFloat(Z_MIN, Z_MAX) };; +Avatar.position = {x:0,y:1.1,z:0}; // { x: getRandomFloat(X_MIN, X_MAX), y: Y_PELVIS, z: getRandomFloat(Z_MIN, Z_MAX) };; printVector("New bot, position = ", Avatar.position); var animationData = {url: "file:///D:/Development/HiFi/hifi/interface/resources/avatar/animations/walk_fwd.fbx", lastFrame: 35}; //Avatar.startAnimation(animationData.url, animationData.fps || 30, 1, true, false, animationData.firstFrame || 0, animationData.lastFrame); //Avatar.skeletonModelURL = "file:///D:/Development/HiFi/hifi/interface/resources/meshes/being_of_light/being_of_light.fbx"; -var millisecondsToWaitBeforeStarting = 10 * 1000; +var millisecondsToWaitBeforeStarting = 4 * 1000; Script.setTimeout(function () { print("Starting at", JSON.stringify(Avatar.position)); Avatar.startAnimation(animationData.url, animationData.fps || 30, 1, true, false, animationData.firstFrame || 0, animationData.lastFrame); @@ -60,21 +57,6 @@ function update(deltaTime) { var newPosition = Vec3.sum(Avatar.position, { x: getRandomFloat(-0.1, 0.1), y: 0, z: getRandomFloat(-0.1, 0.1) }); Avatar.position = newPosition; Vec3.print("new:", newPosition); - - /* - var q0 = Quat.fromPitchYawRollDegrees(0, 0, 0); // degrees - var q15 = Quat.fromPitchYawRollDegrees(0, 15, 0); // degrees - var q45 = Quat.fromPitchYawRollDegrees(0, 45, 0); // degrees - var q90 = Quat.fromPitchYawRollDegrees(0, 90, 0); // degrees - print("dot 15 deg:" + Quat.dot(q0,q15)); - print("dot 45 deg:" + Quat.dot(q0,q45)); - print("dot 95 deg:" + Quat.dot(q0,q90)); - - var q45r = Quat.fromPitchYawRollDegrees(0, 0, 45); // degrees - var q90r = Quat.fromPitchYawRollDegrees(0, 0, 90); // degrees - print("dot 45 deg roll:" + Quat.dot(q0,q45r)); - print("dot 95 deg roll:" + Quat.dot(q0,q90r)); - */ } } From 7ba41c72ef375b3421f0e4f072cabacc3779abfd Mon Sep 17 00:00:00 2001 From: ZappoMan <brad@highfidelity.io> Date: Mon, 23 Jan 2017 13:31:06 -0800 Subject: [PATCH 33/43] cleanup debugAvatarMixer --- .../developer/debugging/debugAvatarMixer.js | 40 +------------------ 1 file changed, 1 insertion(+), 39 deletions(-) diff --git a/scripts/developer/debugging/debugAvatarMixer.js b/scripts/developer/debugging/debugAvatarMixer.js index 1a16832e0d..6c0a935b70 100644 --- a/scripts/developer/debugging/debugAvatarMixer.js +++ b/scripts/developer/debugging/debugAvatarMixer.js @@ -17,28 +17,7 @@ Script.include("/~/system/libraries/controllers.js"); -// grab the toolbar -var toolbar = Toolbars.getToolbar("com.highfidelity.interface.toolbar.system"); - -var ASSETS_PATH = Script.resolvePath("assets"); -var TOOLS_PATH = Script.resolvePath("assets/images/tools/"); - -function buttonImageURL() { - return TOOLS_PATH + (Users.canKick ? 'kick.svg' : 'ignore.svg'); -} - -// setup the mod button and add it to the toolbar -var button = toolbar.addButton({ - objectName: 'debugAvatarMixer', - imageURL: buttonImageURL(), - visible: true, - buttonState: 1, - defaultState: 1, - hoverState: 3, - alpha: 0.9 -}); - -var isShowingOverlays = false; +var isShowingOverlays = true; var debugOverlays = {}; function removeOverlays() { @@ -55,22 +34,6 @@ function removeOverlays() { debugOverlays = {}; } -// handle clicks on the toolbar button -function buttonClicked(){ - if (isShowingOverlays) { - removeOverlays(); - isShowingOverlays = false; - } else { - isShowingOverlays = true; - } - - button.writeProperty('buttonState', isShowingOverlays ? 0 : 1); - button.writeProperty('defaultState', isShowingOverlays ? 0 : 1); - button.writeProperty('hoverState', isShowingOverlays ? 2 : 3); -} - -button.clicked.connect(buttonClicked); - function updateOverlays() { if (isShowingOverlays) { @@ -161,7 +124,6 @@ AvatarList.avatarRemovedEvent.connect(function(avatarID){ // cleanup the toolbar button and overlays when script is stopped Script.scriptEnding.connect(function() { - toolbar.removeButton('debugAvatarMixer'); removeOverlays(); }); From 5f9f051c3ce84608bf16ffb194317112b582d45e Mon Sep 17 00:00:00 2001 From: ZappoMan <brad@highfidelity.io> Date: Mon, 23 Jan 2017 15:01:50 -0800 Subject: [PATCH 34/43] more tweaks --- libraries/avatars/src/AvatarData.cpp | 49 ++++++++-------------------- libraries/avatars/src/AvatarData.h | 6 ++++ 2 files changed, 20 insertions(+), 35 deletions(-) diff --git a/libraries/avatars/src/AvatarData.cpp b/libraries/avatars/src/AvatarData.cpp index 1d511139e6..9a7090049b 100644 --- a/libraries/avatars/src/AvatarData.cpp +++ b/libraries/avatars/src/AvatarData.cpp @@ -196,20 +196,20 @@ bool AvatarData::faceTrackerInfoChangedSince(quint64 time) { float AvatarData::getDistanceBasedMinRotationDOT(glm::vec3 viewerPosition) { auto distance = glm::distance(_globalPosition, viewerPosition); float result = ROTATION_179D_DOT; // assume worst - if (distance < 1.0f) { + if (distance < AVATAR_DISTANCE_LEVEL_1) { result = AVATAR_MIN_ROTATION_DOT; - } else if (distance < 5.0f) { + } else if (distance < AVATAR_DISTANCE_LEVEL_2) { result = ROTATION_15D_DOT; - } else if (distance < 10.0f) { + } else if (distance < AVATAR_DISTANCE_LEVEL_3) { result = ROTATION_45D_DOT; - } else if (distance < 20.0f) { + } else if (distance < AVATAR_DISTANCE_LEVEL_4) { result = ROTATION_90D_DOT; } return result; } float AvatarData::getDistanceBasedMinTranslationDistance(glm::vec3 viewerPosition) { - return AVATAR_MIN_TRANSLATION; // FIXME + return AVATAR_MIN_TRANSLATION; // Eventually make this distance sensitive as well } @@ -233,19 +233,6 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSent unsigned char* destinationBuffer = reinterpret_cast<unsigned char*>(avatarDataByteArray.data()); unsigned char* startPosition = destinationBuffer; - // FIXME - things to consider - // - // - how to dry up this code? - // - // - the sections below are basically little repeats of each other, where they - // cast the destination pointer to the section struct type, set the struct - // members in some specific way (not just assigning), then advance the buffer, - // and then remember the last value sent. This could be macro-ized and/or - // templatized or lambda-ized - // - // - also, we could determine the "hasXXX" flags in the little sections, - // and then set the actual flag values AFTER the rest are done... - // // FIXME - // // BUG -- if you enter a space bubble, and then back away, the avatar has wrong orientation until "send all" happens... @@ -253,24 +240,16 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSent // // BUG -- Resizing avatar seems to "take too long"... the avatar doesn't redraw at smaller size right away // - - // TODO - + // TODO consider these additional optimizations in the future + // 1) SensorToWorld - should we only send this for avatars with attachments?? - 20 bytes - 7.20 kbps + // 2) GUIID for the session change to 2byte index (savings) - 14 bytes - 5.04 kbps + // 3) Improve Joints -- currently we use rotational tolerances, but if we had skeleton/bone length data + // we could do a better job of determining if the change in joints actually translates to visible + // changes at distance. // - // 1) Joints... use more aggressive quantization and/or culling for more distance between avatars - // 2) SensorToWorld - should we only send this for avatars with attachments?? - 20 bytes - 7.20 kbps - // 3) GUIID for the session change to 2byte index (savings) - 14 bytes - 5.04 kbps - // - // Joints -- - // 63 rotations * 6 bytes = 136kbps - // 3 translations * 6 bytes = 6.48kbps - // - // How we need to handle joints: - // 1) need to track "_lastSentJointData" for each "viewer" so it can't be a member variable of the - // AvatarData. instead it should be like lastSentTime where it's passed in. Store it in the node data - // and in AvatarMixer pass it accordingly - // - // 2) we also want to know the "distance" to the viewer to adjust the relative tolerance for changes and - // whether or not we actually want to do this distance adjust + // Potential savings: + // 63 rotations * 6 bytes = 136kbps + // 3 translations * 6 bytes = 6.48kbps // auto parentID = getParentID(); diff --git a/libraries/avatars/src/AvatarData.h b/libraries/avatars/src/AvatarData.h index bd0591973a..ef23d515c7 100644 --- a/libraries/avatars/src/AvatarData.h +++ b/libraries/avatars/src/AvatarData.h @@ -263,6 +263,12 @@ const float ROTATION_45D_DOT = 0.9238795f; const float ROTATION_90D_DOT = 0.7071068f; const float ROTATION_179D_DOT = 0.0087266f; +const float AVATAR_DISTANCE_LEVEL_1 = 10.0f; +const float AVATAR_DISTANCE_LEVEL_2 = 100.0f; +const float AVATAR_DISTANCE_LEVEL_3 = 1000.0f; +const float AVATAR_DISTANCE_LEVEL_4 = 10000.0f; + + // Where one's own Avatar begins in the world (will be overwritten if avatar data file is found). // This is the start location in the Sandbox (xyz: 6270, 211, 6000). const glm::vec3 START_LOCATION(6270, 211, 6000); From 81b5a3c585aba9d2774ae51f2771b7f011196e04 Mon Sep 17 00:00:00 2001 From: ZappoMan <brad@highfidelity.io> Date: Mon, 23 Jan 2017 15:12:54 -0800 Subject: [PATCH 35/43] more cleanup --- assignment-client/src/Agent.cpp | 11 ++--------- assignment-client/src/avatars/ScriptableAvatar.cpp | 2 +- assignment-client/src/avatars/ScriptableAvatar.h | 2 +- interface/src/avatar/MyAvatar.h | 2 +- libraries/avatars/src/AvatarData.cpp | 8 ++------ libraries/avatars/src/AvatarData.h | 2 +- 6 files changed, 8 insertions(+), 19 deletions(-) diff --git a/assignment-client/src/Agent.cpp b/assignment-client/src/Agent.cpp index ad29cee75c..39e22ac3b0 100644 --- a/assignment-client/src/Agent.cpp +++ b/assignment-client/src/Agent.cpp @@ -499,15 +499,8 @@ void Agent::processAgentAvatar() { if (!_scriptEngine->isFinished() && _isAvatar) { auto scriptedAvatar = DependencyManager::get<ScriptableAvatar>(); - //AvatarData::AvatarDataDetail dataDetail = (randFloat() < AVATAR_SEND_FULL_UPDATE_RATIO) ? AvatarData::SendAllData : AvatarData::CullSmallData; - AvatarData::AvatarDataDetail dataDetail = AvatarData::SendAllData; - quint64 lastSentTime = 0; - QVector<JointData>& lastSentJointData = scriptedAvatar->getLastSentJointData(); - bool distanceAdjust = false; - glm::vec3 viewerPosition(0); - - QByteArray avatarByteArray = scriptedAvatar->toByteArray(dataDetail, lastSentTime, lastSentJointData, distanceAdjust, viewerPosition); - + AvatarData::AvatarDataDetail dataDetail = (randFloat() < AVATAR_SEND_FULL_UPDATE_RATIO) ? AvatarData::SendAllData : AvatarData::CullSmallData; + QByteArray avatarByteArray = scriptedAvatar->toByteArray(dataDetail, 0, scriptedAvatar->getLastSentJointData()); scriptedAvatar->doneEncoding(true); static AvatarDataSequenceNumber sequenceNumber = 0; diff --git a/assignment-client/src/avatars/ScriptableAvatar.cpp b/assignment-client/src/avatars/ScriptableAvatar.cpp index 199f5b31d1..95bcbb587e 100644 --- a/assignment-client/src/avatars/ScriptableAvatar.cpp +++ b/assignment-client/src/avatars/ScriptableAvatar.cpp @@ -15,7 +15,7 @@ #include "ScriptableAvatar.h" QByteArray ScriptableAvatar::toByteArray(AvatarDataDetail dataDetail, quint64 lastSentTime, const QVector<JointData>& lastSentJointData, - bool distanceAdjust, glm::vec3 viewerPosition, QVector<JointData>* sentJointDataOut) { + bool distanceAdjust, glm::vec3 viewerPosition, QVector<JointData>* sentJointDataOut) { _globalPosition = getPosition(); return AvatarData::toByteArray(dataDetail, lastSentTime, lastSentJointData, distanceAdjust, viewerPosition, sentJointDataOut); } diff --git a/assignment-client/src/avatars/ScriptableAvatar.h b/assignment-client/src/avatars/ScriptableAvatar.h index 59f47a9ebb..be7a90adf9 100644 --- a/assignment-client/src/avatars/ScriptableAvatar.h +++ b/assignment-client/src/avatars/ScriptableAvatar.h @@ -29,7 +29,7 @@ public: virtual void setSkeletonModelURL(const QUrl& skeletonModelURL) override; virtual QByteArray toByteArray(AvatarDataDetail dataDetail, quint64 lastSentTime, const QVector<JointData>& lastSentJointData, - bool distanceAdjust, glm::vec3 viewerPosition, QVector<JointData>* sentJointDataOut = nullptr) override; + bool distanceAdjust = false, glm::vec3 viewerPosition = glm::vec3(0), QVector<JointData>* sentJointDataOut = nullptr) override; private slots: diff --git a/interface/src/avatar/MyAvatar.h b/interface/src/avatar/MyAvatar.h index bb591afd51..18774c8719 100644 --- a/interface/src/avatar/MyAvatar.h +++ b/interface/src/avatar/MyAvatar.h @@ -336,7 +336,7 @@ private: virtual QByteArray toByteArray(AvatarDataDetail dataDetail, quint64 lastSentTime, const QVector<JointData>& lastSentJointData, - bool distanceAdjust, glm::vec3 viewerPosition, QVector<JointData>* sentJointDataOut = nullptr) override; + bool distanceAdjust = false, glm::vec3 viewerPosition = glm::vec3(0), QVector<JointData>* sentJointDataOut = nullptr) override; void simulate(float deltaTime); void updateFromTrackers(float deltaTime); diff --git a/libraries/avatars/src/AvatarData.cpp b/libraries/avatars/src/AvatarData.cpp index 9a7090049b..920a8cdb98 100644 --- a/libraries/avatars/src/AvatarData.cpp +++ b/libraries/avatars/src/AvatarData.cpp @@ -1542,17 +1542,13 @@ void AvatarData::sendAvatarDataPacket() { bool cullSmallData = (randFloat() < AVATAR_SEND_FULL_UPDATE_RATIO); auto dataDetail = cullSmallData ? SendAllData : CullSmallData; - quint64 lastSentTime = 0; QVector<JointData> lastSentJointData; { QReadLocker readLock(&_jointDataLock); lastSentJointData = _lastSentJointData; } - bool distanceAdjust = false; - glm::vec3 viewerPosition(0); - QByteArray avatarByteArray = toByteArray(dataDetail, lastSentTime, lastSentJointData, distanceAdjust, viewerPosition); - - doneEncoding(cullSmallData); // FIXME - doneEncoding() takes a bool for culling small changes, that's janky! + QByteArray avatarByteArray = toByteArray(dataDetail, 0, lastSentJointData); + doneEncoding(cullSmallData); static AvatarDataSequenceNumber sequenceNumber = 0; diff --git a/libraries/avatars/src/AvatarData.h b/libraries/avatars/src/AvatarData.h index ef23d515c7..1f92f6b08f 100644 --- a/libraries/avatars/src/AvatarData.h +++ b/libraries/avatars/src/AvatarData.h @@ -358,7 +358,7 @@ public: } AvatarDataDetail; virtual QByteArray toByteArray(AvatarDataDetail dataDetail, quint64 lastSentTime, const QVector<JointData>& lastSentJointData, - bool distanceAdjust, glm::vec3 viewerPosition, QVector<JointData>* sentJointDataOut = nullptr); + bool distanceAdjust = false, glm::vec3 viewerPosition = glm::vec3(0), QVector<JointData>* sentJointDataOut = nullptr); virtual void doneEncoding(bool cullSmallChanges); From 6801099cd96c99d6e92e237bb48da41511b801df Mon Sep 17 00:00:00 2001 From: ZappoMan <brad@highfidelity.io> Date: Mon, 23 Jan 2017 16:03:07 -0800 Subject: [PATCH 36/43] more cleanup --- libraries/avatars/src/AvatarData.cpp | 5 ----- libraries/avatars/src/AvatarData.h | 4 +--- libraries/shared/src/GLMHelpers.cpp | 12 ------------ libraries/shared/src/GLMHelpers.h | 4 ---- 4 files changed, 1 insertion(+), 24 deletions(-) diff --git a/libraries/avatars/src/AvatarData.cpp b/libraries/avatars/src/AvatarData.cpp index 920a8cdb98..34396fb9c0 100644 --- a/libraries/avatars/src/AvatarData.cpp +++ b/libraries/avatars/src/AvatarData.cpp @@ -417,10 +417,6 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSent if (hasJointData) { QReadLocker readLock(&_jointDataLock); - int rotationSentCount = 0; - int translationSentCount = 0; - - // joint rotation data int numJoints = _jointData.size(); *destinationBuffer++ = (uint8_t)numJoints; @@ -440,7 +436,6 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSent } } float minRotationDOT = !distanceAdjust ? AVATAR_MIN_ROTATION_DOT : getDistanceBasedMinRotationDOT(viewerPosition); - auto distance = glm::distance(_globalPosition, viewerPosition); for (int i = 0; i < _jointData.size(); i++) { const JointData& data = _jointData[i]; diff --git a/libraries/avatars/src/AvatarData.h b/libraries/avatars/src/AvatarData.h index 1f92f6b08f..75a9a6ee40 100644 --- a/libraries/avatars/src/AvatarData.h +++ b/libraries/avatars/src/AvatarData.h @@ -180,9 +180,7 @@ namespace AvatarDataPacket { const size_t LOOK_AT_POSITION_SIZE = 12; PACKED_BEGIN struct AudioLoudness { - uint8_t audioLoudness; // current loudness of microphone, clamped to MAX_AUDIO_LOUDNESS and - // scaled by AUDIO_LOUDNESS_SCALE typical values 0 to 255 or once - // rescaled 0.0 to 1000.0 + uint8_t audioLoudness; // current loudness of microphone compressed with packFloatGainToByte() } PACKED_END; const size_t AUDIO_LOUDNESS_SIZE = 1; diff --git a/libraries/shared/src/GLMHelpers.cpp b/libraries/shared/src/GLMHelpers.cpp index 85b2e1f57e..ec244553f8 100644 --- a/libraries/shared/src/GLMHelpers.cpp +++ b/libraries/shared/src/GLMHelpers.cpp @@ -81,18 +81,6 @@ int unpackFloatScalarFromSignedTwoByteFixed(const int16_t* byteFixedPointer, flo return sizeof(int16_t); } -// Allows sending of fixed-point numbers: radix 1 makes 15.1 number, radix 8 makes 8.8 number, etc -int packFloatScalarToSignedOneByteFixed(unsigned char* buffer, float scalar, int radix) { - uint8_t outVal = (uint8_t)(scalar * (float)(1 << radix)); - memcpy(buffer, &outVal, sizeof(uint8_t)); - return sizeof(outVal); -} - -int unpackFloatScalarFromSignedOneByteFixed(const uint8_t* byteFixedPointer, float* destinationPointer, int radix) { - *destinationPointer = *byteFixedPointer / (float)(1 << radix); - return sizeof(uint8_t); -} - int packFloatVec3ToSignedTwoByteFixed(unsigned char* destBuffer, const glm::vec3& srcVector, int radix) { const unsigned char* startPosition = destBuffer; destBuffer += packFloatScalarToSignedTwoByteFixed(destBuffer, srcVector.x, radix); diff --git a/libraries/shared/src/GLMHelpers.h b/libraries/shared/src/GLMHelpers.h index ed84e45ad8..4aac913768 100644 --- a/libraries/shared/src/GLMHelpers.h +++ b/libraries/shared/src/GLMHelpers.h @@ -125,10 +125,6 @@ int unpackFloatFromByte(const unsigned char* buffer, float& value, float scaleBy int packFloatScalarToSignedTwoByteFixed(unsigned char* buffer, float scalar, int radix); int unpackFloatScalarFromSignedTwoByteFixed(const int16_t* byteFixedPointer, float* destinationPointer, int radix); -// Allows sending of fixed-point numbers: radix 1 makes 7.1 number, radix 4 makes 4.4 number, etc -int unpackFloatScalarFromSignedOneByteFixed(const uint8_t* byteFixedPointer, float* destinationPointer, int radix); -int packFloatScalarToSignedOneByteFixed(unsigned char* buffer, float scalar, int radix); - // A convenience for sending vec3's as fixed-point floats int packFloatVec3ToSignedTwoByteFixed(unsigned char* destBuffer, const glm::vec3& srcVector, int radix); int unpackFloatVec3FromSignedTwoByteFixed(const unsigned char* sourceBuffer, glm::vec3& destination, int radix); From 0655b47dd780b19e58abb458e11c1f6a50041dd3 Mon Sep 17 00:00:00 2001 From: ZappoMan <brad@highfidelity.io> Date: Mon, 23 Jan 2017 16:44:55 -0800 Subject: [PATCH 37/43] fix warnings --- assignment-client/src/assets/AssetServer.cpp | 2 ++ libraries/avatars/src/AvatarData.cpp | 1 - 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/assignment-client/src/assets/AssetServer.cpp b/assignment-client/src/assets/AssetServer.cpp index 2498af8010..a052f3772a 100644 --- a/assignment-client/src/assets/AssetServer.cpp +++ b/assignment-client/src/assets/AssetServer.cpp @@ -34,7 +34,9 @@ static const uint8_t MIN_CORES_FOR_MULTICORE = 4; static const uint8_t CPU_AFFINITY_COUNT_HIGH = 2; static const uint8_t CPU_AFFINITY_COUNT_LOW = 1; +#ifdef Q_OS_WIN static const int INTERFACE_RUNNING_CHECK_FREQUENCY_MS = 1000; +#endif const QString ASSET_SERVER_LOGGING_TARGET_NAME = "asset-server"; diff --git a/libraries/avatars/src/AvatarData.cpp b/libraries/avatars/src/AvatarData.cpp index 34396fb9c0..71eb3d3829 100644 --- a/libraries/avatars/src/AvatarData.cpp +++ b/libraries/avatars/src/AvatarData.cpp @@ -52,7 +52,6 @@ const QString AvatarData::FRAME_NAME = "com.highfidelity.recording.AvatarData"; static const int TRANSLATION_COMPRESSION_RADIX = 12; static const int SENSOR_TO_WORLD_SCALE_RADIX = 10; -static const int AUDIO_LOUDNESS_RADIX = 2; static const float AUDIO_LOUDNESS_SCALE = 1024.0f; //static const int MODEL_OFFSET_RADIX = 6; From 8b662647bd52d56a0a6a4a50172e9521f029fe8e Mon Sep 17 00:00:00 2001 From: ZappoMan <brad@highfidelity.io> Date: Tue, 24 Jan 2017 12:01:57 -0800 Subject: [PATCH 38/43] CR feedback --- libraries/avatars/src/AvatarData.cpp | 1 - libraries/avatars/src/AvatarData.h | 6 +++--- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/libraries/avatars/src/AvatarData.cpp b/libraries/avatars/src/AvatarData.cpp index 71eb3d3829..92b7f378a3 100644 --- a/libraries/avatars/src/AvatarData.cpp +++ b/libraries/avatars/src/AvatarData.cpp @@ -53,7 +53,6 @@ const QString AvatarData::FRAME_NAME = "com.highfidelity.recording.AvatarData"; static const int TRANSLATION_COMPRESSION_RADIX = 12; static const int SENSOR_TO_WORLD_SCALE_RADIX = 10; static const float AUDIO_LOUDNESS_SCALE = 1024.0f; -//static const int MODEL_OFFSET_RADIX = 6; #define ASSERT(COND) do { if (!(COND)) { abort(); } } while(0) diff --git a/libraries/avatars/src/AvatarData.h b/libraries/avatars/src/AvatarData.h index 75a9a6ee40..84c82c5acd 100644 --- a/libraries/avatars/src/AvatarData.h +++ b/libraries/avatars/src/AvatarData.h @@ -114,7 +114,7 @@ const char IS_FINGER_POINTING_FLAG = 4; // before the "header" structure const char AVATARDATA_FLAGS_MINIMUM = 0; -using smallFloat = uint16_t; // a compressed float with less precision, user defined radix +using SmallFloat = uint16_t; // a compressed float with less precision, user defined radix namespace AvatarDataPacket { @@ -164,7 +164,7 @@ namespace AvatarDataPacket { const size_t AVATAR_ORIENTATION_SIZE = 6; PACKED_BEGIN struct AvatarScale { - smallFloat scale; // avatar's scale, compressed by packFloatRatioToTwoByte() + SmallFloat scale; // avatar's scale, compressed by packFloatRatioToTwoByte() } PACKED_END; const size_t AVATAR_SCALE_SIZE = 2; @@ -190,7 +190,7 @@ namespace AvatarDataPacket { // // POTENTIAL SAVINGS - 20 bytes - uint8_t sensorToWorldQuat[6]; // 6 byte compressed quaternion part of sensor to world matrix + SixByteQuat sensorToWorldQuat; // 6 byte compressed quaternion part of sensor to world matrix uint16_t sensorToWorldScale; // uniform scale of sensor to world matrix float sensorToWorldTrans[3]; // fourth column of sensor to world matrix // FIXME - sensorToWorldTrans might be able to be better compressed if it was From cb812ca5060c4d9c6012f4b847343c2b29459963 Mon Sep 17 00:00:00 2001 From: ZappoMan <brad@highfidelity.io> Date: Tue, 24 Jan 2017 12:06:31 -0800 Subject: [PATCH 39/43] CR feedback --- libraries/shared/src/SpatiallyNestable.cpp | 30 +++++++++++----------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/libraries/shared/src/SpatiallyNestable.cpp b/libraries/shared/src/SpatiallyNestable.cpp index cb0284b5ba..7ecb0f7409 100644 --- a/libraries/shared/src/SpatiallyNestable.cpp +++ b/libraries/shared/src/SpatiallyNestable.cpp @@ -402,11 +402,11 @@ void SpatiallyNestable::setPosition(const glm::vec3& position, bool& success, bo changed = true; myWorldTransform.setTranslation(position); Transform::inverseMult(_transform, parentTransform, myWorldTransform); + _translationChanged = usecTimestampNow(); } }); if (success && changed) { locationChanged(tellPhysics); - _translationChanged = usecTimestampNow(); } } @@ -455,11 +455,11 @@ void SpatiallyNestable::setOrientation(const glm::quat& orientation, bool& succe changed = true; myWorldTransform.setRotation(orientation); Transform::inverseMult(_transform, parentTransform, myWorldTransform); + _rotationChanged = usecTimestampNow(); } }); if (success && changed) { locationChanged(tellPhysics); - _rotationChanged = usecTimestampNow(); } } @@ -654,12 +654,12 @@ void SpatiallyNestable::setTransform(const Transform& transform, bool& success) Transform::inverseMult(_transform, parentTransform, transform); if (_transform != beforeTransform) { changed = true; + _translationChanged = usecTimestampNow(); + _rotationChanged = usecTimestampNow(); } }); if (success && changed) { locationChanged(); - _translationChanged = usecTimestampNow(); - _rotationChanged = usecTimestampNow(); } } @@ -696,11 +696,11 @@ void SpatiallyNestable::setScale(const glm::vec3& scale) { if (_transform.getScale() != scale) { _transform.setScale(scale); changed = true; + _scaleChanged = usecTimestampNow(); } }); if (changed) { dimensionsChanged(); - _scaleChanged = usecTimestampNow(); } } @@ -718,12 +718,12 @@ void SpatiallyNestable::setScale(float value) { _transform.setScale(value); if (_transform.getScale() != beforeScale) { changed = true; + _scaleChanged = usecTimestampNow(); } }); if (changed) { dimensionsChanged(); - _scaleChanged = usecTimestampNow(); } } @@ -747,14 +747,14 @@ void SpatiallyNestable::setLocalTransform(const Transform& transform) { if (_transform != transform) { _transform = transform; changed = true; + _scaleChanged = usecTimestampNow(); + _translationChanged = usecTimestampNow(); + _rotationChanged = usecTimestampNow(); } }); if (changed) { locationChanged(); - _scaleChanged = usecTimestampNow(); - _translationChanged = usecTimestampNow(); - _rotationChanged = usecTimestampNow(); } } @@ -777,11 +777,11 @@ void SpatiallyNestable::setLocalPosition(const glm::vec3& position, bool tellPhy if (_transform.getTranslation() != position) { _transform.setTranslation(position); changed = true; + _translationChanged = usecTimestampNow(); } }); if (changed) { locationChanged(tellPhysics); - _translationChanged = usecTimestampNow(); } } @@ -804,11 +804,11 @@ void SpatiallyNestable::setLocalOrientation(const glm::quat& orientation) { if (_transform.getRotation() != orientation) { _transform.setRotation(orientation); changed = true; + _rotationChanged = usecTimestampNow(); } }); if (changed) { locationChanged(); - _rotationChanged = usecTimestampNow(); } } @@ -862,11 +862,11 @@ void SpatiallyNestable::setLocalScale(const glm::vec3& scale) { if (_transform.getScale() != scale) { _transform.setScale(scale); changed = true; + _scaleChanged = usecTimestampNow(); } }); if (changed) { dimensionsChanged(); - _scaleChanged = usecTimestampNow(); } } @@ -1076,6 +1076,9 @@ void SpatiallyNestable::setLocalTransformAndVelocities( if (_transform != localTransform) { _transform = localTransform; changed = true; + _scaleChanged = usecTimestampNow(); + _translationChanged = usecTimestampNow(); + _rotationChanged = usecTimestampNow(); } }); // linear velocity @@ -1089,9 +1092,6 @@ void SpatiallyNestable::setLocalTransformAndVelocities( if (changed) { locationChanged(false); - _scaleChanged = usecTimestampNow(); - _translationChanged = usecTimestampNow(); - _rotationChanged = usecTimestampNow(); } } From 44b8a57ed3813c159db0a146da8759f71b78f42b Mon Sep 17 00:00:00 2001 From: ZappoMan <brad@highfidelity.io> Date: Tue, 24 Jan 2017 12:18:33 -0800 Subject: [PATCH 40/43] more CR feedback --- libraries/avatars/src/AvatarData.cpp | 8 ++++---- libraries/avatars/src/AvatarData.h | 8 ++++---- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/libraries/avatars/src/AvatarData.cpp b/libraries/avatars/src/AvatarData.cpp index 92b7f378a3..17f85372eb 100644 --- a/libraries/avatars/src/AvatarData.cpp +++ b/libraries/avatars/src/AvatarData.cpp @@ -193,15 +193,15 @@ bool AvatarData::faceTrackerInfoChangedSince(quint64 time) { float AvatarData::getDistanceBasedMinRotationDOT(glm::vec3 viewerPosition) { auto distance = glm::distance(_globalPosition, viewerPosition); - float result = ROTATION_179D_DOT; // assume worst + float result = ROTATION_CHANGE_179D; // assume worst if (distance < AVATAR_DISTANCE_LEVEL_1) { result = AVATAR_MIN_ROTATION_DOT; } else if (distance < AVATAR_DISTANCE_LEVEL_2) { - result = ROTATION_15D_DOT; + result = ROTATION_CHANGE_15D; } else if (distance < AVATAR_DISTANCE_LEVEL_3) { - result = ROTATION_45D_DOT; + result = ROTATION_CHANGE_45D; } else if (distance < AVATAR_DISTANCE_LEVEL_4) { - result = ROTATION_90D_DOT; + result = ROTATION_CHANGE_90D; } return result; } diff --git a/libraries/avatars/src/AvatarData.h b/libraries/avatars/src/AvatarData.h index 84c82c5acd..9c19deae1b 100644 --- a/libraries/avatars/src/AvatarData.h +++ b/libraries/avatars/src/AvatarData.h @@ -256,10 +256,10 @@ const float AVATAR_SEND_FULL_UPDATE_RATIO = 0.02f; const float AVATAR_MIN_ROTATION_DOT = 0.9999999f; const float AVATAR_MIN_TRANSLATION = 0.0001f; -const float ROTATION_15D_DOT = 0.9914449f; -const float ROTATION_45D_DOT = 0.9238795f; -const float ROTATION_90D_DOT = 0.7071068f; -const float ROTATION_179D_DOT = 0.0087266f; +const float ROTATION_CHANGE_15D = 0.9914449f; +const float ROTATION_CHANGE_45D = 0.9238795f; +const float ROTATION_CHANGE_90D = 0.7071068f; +const float ROTATION_CHANGE_179D = 0.0087266f; const float AVATAR_DISTANCE_LEVEL_1 = 10.0f; const float AVATAR_DISTANCE_LEVEL_2 = 100.0f; From 1309af6d7a7d7f03601d8f34c69a762058dfab97 Mon Sep 17 00:00:00 2001 From: ZappoMan <brad@highfidelity.io> Date: Tue, 24 Jan 2017 12:47:47 -0800 Subject: [PATCH 41/43] CR feedback --- assignment-client/src/avatars/AvatarMixerClientData.h | 4 +++- libraries/avatars/src/AvatarData.cpp | 5 ++--- libraries/avatars/src/AvatarData.h | 3 +++ 3 files changed, 8 insertions(+), 4 deletions(-) diff --git a/assignment-client/src/avatars/AvatarMixerClientData.h b/assignment-client/src/avatars/AvatarMixerClientData.h index 7984848831..cb3ebdf99f 100644 --- a/assignment-client/src/avatars/AvatarMixerClientData.h +++ b/assignment-client/src/avatars/AvatarMixerClientData.h @@ -114,7 +114,9 @@ public: } QVector<JointData>& getLastOtherAvatarSentJoints(QUuid otherAvatar) { - return _lastOtherAvatarSentJoints[otherAvatar]; + auto result = _lastOtherAvatarSentJoints[otherAvatar]; + result.resize(_avatar->getJointCount()); + return result; } diff --git a/libraries/avatars/src/AvatarData.cpp b/libraries/avatars/src/AvatarData.cpp index 17f85372eb..eac21a9fe2 100644 --- a/libraries/avatars/src/AvatarData.cpp +++ b/libraries/avatars/src/AvatarData.cpp @@ -429,9 +429,7 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSent #endif if (sentJointDataOut) { - if (sentJointDataOut->size() != _jointData.size()) { - sentJointDataOut->resize(_jointData.size()); - } + sentJointDataOut->resize(_jointData.size()); // Make sure the destination is resized before using it } float minRotationDOT = !distanceAdjust ? AVATAR_MIN_ROTATION_DOT : getDistanceBasedMinRotationDOT(viewerPosition); @@ -1538,6 +1536,7 @@ void AvatarData::sendAvatarDataPacket() { QVector<JointData> lastSentJointData; { QReadLocker readLock(&_jointDataLock); + _lastSentJointData.resize(_jointData.size()); lastSentJointData = _lastSentJointData; } QByteArray avatarByteArray = toByteArray(dataDetail, 0, lastSentJointData); diff --git a/libraries/avatars/src/AvatarData.h b/libraries/avatars/src/AvatarData.h index 9c19deae1b..c56660ca06 100644 --- a/libraries/avatars/src/AvatarData.h +++ b/libraries/avatars/src/AvatarData.h @@ -536,8 +536,11 @@ public: float getDataRate(const QString& rateName = QString("")); + int getJointCount() { return _jointData.size(); } + QVector<JointData> getLastSentJointData() { QReadLocker readLock(&_jointDataLock); + _lastSentJointData.resize(_jointData.size()); return _lastSentJointData; } From 8c2c05719a4f4412b947b30a4414cbf10da1d4fd Mon Sep 17 00:00:00 2001 From: ZappoMan <brad@highfidelity.io> Date: Tue, 24 Jan 2017 14:21:47 -0800 Subject: [PATCH 42/43] fix warning --- assignment-client/src/avatars/AvatarMixerClientData.h | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/assignment-client/src/avatars/AvatarMixerClientData.h b/assignment-client/src/avatars/AvatarMixerClientData.h index cb3ebdf99f..aa011f8baf 100644 --- a/assignment-client/src/avatars/AvatarMixerClientData.h +++ b/assignment-client/src/avatars/AvatarMixerClientData.h @@ -114,9 +114,8 @@ public: } QVector<JointData>& getLastOtherAvatarSentJoints(QUuid otherAvatar) { - auto result = _lastOtherAvatarSentJoints[otherAvatar]; - result.resize(_avatar->getJointCount()); - return result; + _lastOtherAvatarSentJoints[otherAvatar].resize(_avatar->getJointCount()); + return _lastOtherAvatarSentJoints[otherAvatar]; } From af426063ac9ffe9ab29b1e49306ceef77b80187b Mon Sep 17 00:00:00 2001 From: ZappoMan <brad@highfidelity.io> Date: Thu, 26 Jan 2017 14:37:28 -0800 Subject: [PATCH 43/43] fix bad merge --- libraries/avatars/src/AvatarData.cpp | 2 +- libraries/avatars/src/AvatarData.h | 3 --- 2 files changed, 1 insertion(+), 4 deletions(-) diff --git a/libraries/avatars/src/AvatarData.cpp b/libraries/avatars/src/AvatarData.cpp index b726af3c3b..34c6b2072f 100644 --- a/libraries/avatars/src/AvatarData.cpp +++ b/libraries/avatars/src/AvatarData.cpp @@ -938,7 +938,7 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) { JointData& data = _jointData[i]; if (validRotations[i]) { sourceBuffer += unpackOrientationQuatFromSixBytes(sourceBuffer, data.rotation); - _hasNewJointRotations = true; + _hasNewJointData = true; data.rotationSet = true; } } diff --git a/libraries/avatars/src/AvatarData.h b/libraries/avatars/src/AvatarData.h index fc4cc78447..5604e41f63 100644 --- a/libraries/avatars/src/AvatarData.h +++ b/libraries/avatars/src/AvatarData.h @@ -82,9 +82,6 @@ const quint32 AVATAR_MOTION_DEFAULTS = const quint32 AVATAR_MOTION_SCRIPTABLE_BITS = AVATAR_MOTION_SCRIPTED_MOTOR_ENABLED; -const qint64 AVATAR_SILENCE_THRESHOLD_USECS = 5 * USECS_PER_SECOND; - - // Bitset of state flags - we store the key state, hand state, Faceshift, eye tracking, and existence of // referential data in this bit set. The hand state is an octal, but is split into two sections to maintain // backward compatibility. The bits are ordered as such (0-7 left to right).