diff --git a/assignment-client/src/Agent.cpp b/assignment-client/src/Agent.cpp index 1a034e5c8a..669c2b177f 100644 --- a/assignment-client/src/Agent.cpp +++ b/assignment-client/src/Agent.cpp @@ -502,8 +502,8 @@ void Agent::processAgentAvatar() { if (!_scriptEngine->isFinished() && _isAvatar) { auto scriptedAvatar = DependencyManager::get(); - QByteArray avatarByteArray = scriptedAvatar->toByteArray((randFloat() < AVATAR_SEND_FULL_UPDATE_RATIO) - ? AvatarData::SendAllData : AvatarData::CullSmallData); + AvatarData::AvatarDataDetail dataDetail = (randFloat() < AVATAR_SEND_FULL_UPDATE_RATIO) ? AvatarData::SendAllData : AvatarData::CullSmallData; + QByteArray avatarByteArray = scriptedAvatar->toByteArray(dataDetail, 0, scriptedAvatar->getLastSentJointData()); scriptedAvatar->doneEncoding(true); static AvatarDataSequenceNumber sequenceNumber = 0; diff --git a/assignment-client/src/assets/AssetServer.cpp b/assignment-client/src/assets/AssetServer.cpp index 6674f5eb7a..82dd23a9de 100644 --- a/assignment-client/src/assets/AssetServer.cpp +++ b/assignment-client/src/assets/AssetServer.cpp @@ -35,7 +35,9 @@ static const uint8_t MIN_CORES_FOR_MULTICORE = 4; static const uint8_t CPU_AFFINITY_COUNT_HIGH = 2; static const uint8_t CPU_AFFINITY_COUNT_LOW = 1; +#ifdef Q_OS_WIN static const int INTERFACE_RUNNING_CHECK_FREQUENCY_MS = 1000; +#endif const QString ASSET_SERVER_LOGGING_TARGET_NAME = "asset-server"; diff --git a/assignment-client/src/avatars/AvatarMixer.cpp b/assignment-client/src/avatars/AvatarMixer.cpp index 8c25effe45..457787013f 100644 --- a/assignment-client/src/avatars/AvatarMixer.cpp +++ b/assignment-client/src/avatars/AvatarMixer.cpp @@ -423,12 +423,17 @@ void AvatarMixer::broadcastAvatarData() { nodeData->incrementAvatarOutOfView(); } else { detail = distribution(generator) < AVATAR_SEND_FULL_UPDATE_RATIO - ? AvatarData::SendAllData : AvatarData::IncludeSmallData; + ? AvatarData::SendAllData : AvatarData::CullSmallData; nodeData->incrementAvatarInView(); } numAvatarDataBytes += avatarPacketList->write(otherNode->getUUID().toRfc4122()); - numAvatarDataBytes += avatarPacketList->write(otherAvatar.toByteArray(detail)); + auto lastEncodeForOther = nodeData->getLastOtherAvatarEncodeTime(otherNode->getUUID()); + QVector& lastSentJointsForOther = nodeData->getLastOtherAvatarSentJoints(otherNode->getUUID()); + bool distanceAdjust = true; + glm::vec3 viewerPosition = nodeData->getPosition(); + auto bytes = otherAvatar.toByteArray(detail, lastEncodeForOther, lastSentJointsForOther, distanceAdjust, viewerPosition, &lastSentJointsForOther); + numAvatarDataBytes += avatarPacketList->write(bytes); avatarPacketList->endSegment(); }); diff --git a/assignment-client/src/avatars/AvatarMixerClientData.h b/assignment-client/src/avatars/AvatarMixerClientData.h index 38db2e74d2..aa011f8baf 100644 --- a/assignment-client/src/avatars/AvatarMixerClientData.h +++ b/assignment-client/src/avatars/AvatarMixerClientData.h @@ -104,6 +104,22 @@ public: bool getRequestsDomainListData() { return _requestsDomainListData; } void setRequestsDomainListData(bool requesting) { _requestsDomainListData = requesting; } + quint64 getLastOtherAvatarEncodeTime(QUuid otherAvatar) { + quint64 result = 0; + if (_lastOtherAvatarEncodeTime.find(otherAvatar) != _lastOtherAvatarEncodeTime.end()) { + result = _lastOtherAvatarEncodeTime[otherAvatar]; + } + _lastOtherAvatarEncodeTime[otherAvatar] = usecTimestampNow(); + return result; + } + + QVector& getLastOtherAvatarSentJoints(QUuid otherAvatar) { + _lastOtherAvatarSentJoints[otherAvatar].resize(_avatar->getJointCount()); + return _lastOtherAvatarSentJoints[otherAvatar]; + } + + + private: AvatarSharedPointer _avatar { new AvatarData() }; @@ -111,6 +127,11 @@ private: std::unordered_map _lastBroadcastSequenceNumbers; std::unordered_set _hasReceivedFirstPacketsFrom; + // this is a map of the last time we encoded an "other" avatar for + // sending to "this" node + std::unordered_map _lastOtherAvatarEncodeTime; + std::unordered_map> _lastOtherAvatarSentJoints; + HRCTime _identityChangeTimestamp; bool _avatarSessionDisplayNameMustChange{ false }; diff --git a/assignment-client/src/avatars/ScriptableAvatar.cpp b/assignment-client/src/avatars/ScriptableAvatar.cpp index b4c9a8e89d..95bcbb587e 100644 --- a/assignment-client/src/avatars/ScriptableAvatar.cpp +++ b/assignment-client/src/avatars/ScriptableAvatar.cpp @@ -14,6 +14,13 @@ #include #include "ScriptableAvatar.h" +QByteArray ScriptableAvatar::toByteArray(AvatarDataDetail dataDetail, quint64 lastSentTime, const QVector& lastSentJointData, + bool distanceAdjust, glm::vec3 viewerPosition, QVector* sentJointDataOut) { + _globalPosition = getPosition(); + return AvatarData::toByteArray(dataDetail, lastSentTime, lastSentJointData, distanceAdjust, viewerPosition, sentJointDataOut); +} + + // hold and priority unused but kept so that client side JS can run. void ScriptableAvatar::startAnimation(const QString& url, float fps, float priority, bool loop, bool hold, float firstFrame, float lastFrame, const QStringList& maskedJoints) { diff --git a/assignment-client/src/avatars/ScriptableAvatar.h b/assignment-client/src/avatars/ScriptableAvatar.h index 18d64f4ac5..be7a90adf9 100644 --- a/assignment-client/src/avatars/ScriptableAvatar.h +++ b/assignment-client/src/avatars/ScriptableAvatar.h @@ -27,6 +27,10 @@ public: Q_INVOKABLE void stopAnimation(); Q_INVOKABLE AnimationDetails getAnimationDetails(); virtual void setSkeletonModelURL(const QUrl& skeletonModelURL) override; + + virtual QByteArray toByteArray(AvatarDataDetail dataDetail, quint64 lastSentTime, const QVector& lastSentJointData, + bool distanceAdjust = false, glm::vec3 viewerPosition = glm::vec3(0), QVector* sentJointDataOut = nullptr) override; + private slots: void update(float deltatime); diff --git a/interface/src/Application.cpp b/interface/src/Application.cpp index fef812ce71..576241ddc2 100644 --- a/interface/src/Application.cpp +++ b/interface/src/Application.cpp @@ -5173,6 +5173,7 @@ void Application::nodeAdded(SharedNodePointer node) const { if (node->getType() == NodeType::AvatarMixer) { // new avatar mixer, send off our identity packet right away getMyAvatar()->sendIdentityPacket(); + getMyAvatar()->resetLastSent(); } } diff --git a/interface/src/avatar/AvatarManager.cpp b/interface/src/avatar/AvatarManager.cpp index 2d0860b355..df3164e6fc 100644 --- a/interface/src/avatar/AvatarManager.cpp +++ b/interface/src/avatar/AvatarManager.cpp @@ -132,6 +132,11 @@ void AvatarManager::updateMyAvatar(float deltaTime) { Q_LOGGING_CATEGORY(trace_simulation_avatar, "trace.simulation.avatar"); +float AvatarManager::getAvatarDataRate(const QUuid& sessionID, const QString& rateName) { + auto avatar = getAvatarBySessionID(sessionID); + return avatar->getDataRate(rateName); +} + class AvatarPriority { public: AvatarPriority(AvatarSharedPointer a, float p) : avatar(a), priority(p) {} diff --git a/interface/src/avatar/AvatarManager.h b/interface/src/avatar/AvatarManager.h index 817a3e580f..787d6f2d83 100644 --- a/interface/src/avatar/AvatarManager.h +++ b/interface/src/avatar/AvatarManager.h @@ -69,6 +69,7 @@ public: void handleOutgoingChanges(const VectorOfMotionStates& motionStates); void handleCollisionEvents(const CollisionEvents& collisionEvents); + Q_INVOKABLE float getAvatarDataRate(const QUuid& sessionID, const QString& rateName = QString("")); Q_INVOKABLE RayToAvatarIntersectionResult findRayIntersection(const PickRay& ray, const QScriptValue& avatarIdsToInclude = QScriptValue(), const QScriptValue& avatarIdsToDiscard = QScriptValue()); diff --git a/interface/src/avatar/MyAvatar.cpp b/interface/src/avatar/MyAvatar.cpp index dd95c5963d..fd2f113f2a 100644 --- a/interface/src/avatar/MyAvatar.cpp +++ b/interface/src/avatar/MyAvatar.cpp @@ -226,23 +226,24 @@ void MyAvatar::simulateAttachments(float deltaTime) { // don't update attachments here, do it in harvestResultsFromPhysicsSimulation() } -QByteArray MyAvatar::toByteArray(AvatarDataDetail dataDetail) { +QByteArray MyAvatar::toByteArray(AvatarDataDetail dataDetail, quint64 lastSentTime, const QVector& lastSentJointData, + bool distanceAdjust, glm::vec3 viewerPosition, QVector* sentJointDataOut) { CameraMode mode = qApp->getCamera()->getMode(); _globalPosition = getPosition(); - _globalBoundingBoxCorner.x = _characterController.getCapsuleRadius(); - _globalBoundingBoxCorner.y = _characterController.getCapsuleHalfHeight(); - _globalBoundingBoxCorner.z = _characterController.getCapsuleRadius(); - _globalBoundingBoxCorner += _characterController.getCapsuleLocalOffset(); + _globalBoundingBoxDimensions.x = _characterController.getCapsuleRadius(); + _globalBoundingBoxDimensions.y = _characterController.getCapsuleHalfHeight(); + _globalBoundingBoxDimensions.z = _characterController.getCapsuleRadius(); + _globalBoundingBoxOffset = _characterController.getCapsuleLocalOffset(); if (mode == CAMERA_MODE_THIRD_PERSON || mode == CAMERA_MODE_INDEPENDENT) { // fake the avatar position that is sent up to the AvatarMixer glm::vec3 oldPosition = getPosition(); setPosition(getSkeletonPosition()); - QByteArray array = AvatarData::toByteArray(dataDetail); + QByteArray array = AvatarData::toByteArray(dataDetail, lastSentTime, lastSentJointData, distanceAdjust, viewerPosition, sentJointDataOut); // copy the correct position back setPosition(oldPosition); return array; } - return AvatarData::toByteArray(dataDetail); + return AvatarData::toByteArray(dataDetail, lastSentTime, lastSentJointData, distanceAdjust, viewerPosition, sentJointDataOut); } void MyAvatar::centerBody() { diff --git a/interface/src/avatar/MyAvatar.h b/interface/src/avatar/MyAvatar.h index 0e5ce0fe7b..18774c8719 100644 --- a/interface/src/avatar/MyAvatar.h +++ b/interface/src/avatar/MyAvatar.h @@ -333,7 +333,11 @@ private: glm::vec3 getWorldBodyPosition() const; glm::quat getWorldBodyOrientation() const; - QByteArray toByteArray(AvatarDataDetail dataDetail) override; + + + virtual QByteArray toByteArray(AvatarDataDetail dataDetail, quint64 lastSentTime, const QVector& lastSentJointData, + bool distanceAdjust = false, glm::vec3 viewerPosition = glm::vec3(0), QVector* sentJointDataOut = nullptr) override; + void simulate(float deltaTime); void updateFromTrackers(float deltaTime); virtual void render(RenderArgs* renderArgs, const glm::vec3& cameraPositio) override; diff --git a/libraries/avatars/src/AvatarData.cpp b/libraries/avatars/src/AvatarData.cpp index 701fae1b2b..34c6b2072f 100644 --- a/libraries/avatars/src/AvatarData.cpp +++ b/libraries/avatars/src/AvatarData.cpp @@ -35,6 +35,7 @@ #include #include #include +#include #include "AvatarLogging.h" @@ -49,67 +50,9 @@ const glm::vec3 DEFAULT_LOCAL_AABOX_SCALE(1.0f); const QString AvatarData::FRAME_NAME = "com.highfidelity.recording.AvatarData"; -namespace AvatarDataPacket { - - // NOTE: AvatarDataPackets start with a uint16_t sequence number that is not reflected in the Header structure. - - PACKED_BEGIN struct Header { - uint8_t packetStateFlags; // state flags, currently used to indicate if the packet is a minimal or fuller packet - } PACKED_END; - const size_t HEADER_SIZE = 1; - - PACKED_BEGIN struct MinimalAvatarInfo { - float globalPosition[3]; // avatar's position - } PACKED_END; - const size_t MINIMAL_AVATAR_INFO_SIZE = 12; - - PACKED_BEGIN struct AvatarInfo { - float position[3]; // skeletal model's position - float globalPosition[3]; // avatar's position - float globalBoundingBoxCorner[3]; // global position of the lowest corner of the avatar's bounding box - uint16_t localOrientation[3]; // avatar's local euler angles (degrees, compressed) relative to the thing it's attached to - uint16_t scale; // (compressed) 'ratio' encoding uses sign bit as flag. - float lookAtPosition[3]; // world space position that eyes are focusing on. - float audioLoudness; // current loundess of microphone - uint8_t sensorToWorldQuat[6]; // 6 byte compressed quaternion part of sensor to world matrix - uint16_t sensorToWorldScale; // uniform scale of sensor to world matrix - float sensorToWorldTrans[3]; // fourth column of sensor to world matrix - uint8_t flags; - } PACKED_END; - const size_t AVATAR_INFO_SIZE = 81; - - // only present if HAS_REFERENTIAL flag is set in AvatarInfo.flags - PACKED_BEGIN struct ParentInfo { - uint8_t parentUUID[16]; // rfc 4122 encoded - uint16_t parentJointIndex; - } PACKED_END; - const size_t PARENT_INFO_SIZE = 18; - - // only present if IS_FACESHIFT_CONNECTED flag is set in AvatarInfo.flags - PACKED_BEGIN struct FaceTrackerInfo { - float leftEyeBlink; - float rightEyeBlink; - float averageLoudness; - float browAudioLift; - uint8_t numBlendshapeCoefficients; - // float blendshapeCoefficients[numBlendshapeCoefficients]; - } PACKED_END; - const size_t FACE_TRACKER_INFO_SIZE = 17; - - // variable length structure follows - /* - struct JointData { - uint8_t numJoints; - uint8_t rotationValidityBits[ceil(numJoints / 8)]; // one bit per joint, if true then a compressed rotation follows. - SixByteQuat rotation[numValidRotations]; // encodeded and compressed by packOrientationQuatToSixBytes() - uint8_t translationValidityBits[ceil(numJoints / 8)]; // one bit per joint, if true then a compressed translation follows. - SixByteTrans translation[numValidTranslations]; // encodeded and compressed by packFloatVec3ToSignedTwoByteFixed() - }; - */ -} - static const int TRANSLATION_COMPRESSION_RADIX = 12; static const int SENSOR_TO_WORLD_SCALE_RADIX = 10; +static const float AUDIO_LOUDNESS_SCALE = 1024.0f; #define ASSERT(COND) do { if (!(COND)) { abort(); } } while(0) @@ -134,8 +77,15 @@ AvatarData::AvatarData() : setBodyRoll(0.0f); ASSERT(sizeof(AvatarDataPacket::Header) == AvatarDataPacket::HEADER_SIZE); - ASSERT(sizeof(AvatarDataPacket::MinimalAvatarInfo) == AvatarDataPacket::MINIMAL_AVATAR_INFO_SIZE); - ASSERT(sizeof(AvatarDataPacket::AvatarInfo) == AvatarDataPacket::AVATAR_INFO_SIZE); + ASSERT(sizeof(AvatarDataPacket::AvatarGlobalPosition) == AvatarDataPacket::AVATAR_GLOBAL_POSITION_SIZE); + ASSERT(sizeof(AvatarDataPacket::AvatarLocalPosition) == AvatarDataPacket::AVATAR_LOCAL_POSITION_SIZE); + ASSERT(sizeof(AvatarDataPacket::AvatarBoundingBox) == AvatarDataPacket::AVATAR_BOUNDING_BOX_SIZE); + ASSERT(sizeof(AvatarDataPacket::AvatarOrientation) == AvatarDataPacket::AVATAR_ORIENTATION_SIZE); + ASSERT(sizeof(AvatarDataPacket::AvatarScale) == AvatarDataPacket::AVATAR_SCALE_SIZE); + ASSERT(sizeof(AvatarDataPacket::LookAtPosition) == AvatarDataPacket::LOOK_AT_POSITION_SIZE); + ASSERT(sizeof(AvatarDataPacket::AudioLoudness) == AvatarDataPacket::AUDIO_LOUDNESS_SIZE); + ASSERT(sizeof(AvatarDataPacket::SensorToWorldMatrix) == AvatarDataPacket::SENSOR_TO_WORLD_SIZE); + ASSERT(sizeof(AvatarDataPacket::AdditionalFlags) == AvatarDataPacket::ADDITIONAL_FLAGS_SIZE); ASSERT(sizeof(AvatarDataPacket::ParentInfo) == AvatarDataPacket::PARENT_INFO_SIZE); ASSERT(sizeof(AvatarDataPacket::FaceTrackerInfo) == AvatarDataPacket::FACE_TRACKER_INFO_SIZE); } @@ -176,7 +126,11 @@ float AvatarData::getTargetScale() const { } void AvatarData::setTargetScale(float targetScale) { - _targetScale = glm::clamp(targetScale, MIN_AVATAR_SCALE, MAX_AVATAR_SCALE); + auto newValue = glm::clamp(targetScale, MIN_AVATAR_SCALE, MAX_AVATAR_SCALE); + if (_targetScale != newValue) { + _targetScale = newValue; + _scaleChanged = usecTimestampNow(); + } } glm::vec3 AvatarData::getHandPosition() const { @@ -188,15 +142,7 @@ void AvatarData::setHandPosition(const glm::vec3& handPosition) { _handPosition = glm::inverse(getOrientation()) * (handPosition - getPosition()); } - -QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail) { - bool cullSmallChanges = (dataDetail == CullSmallData); - bool sendAll = (dataDetail == SendAllData); - bool sendMinimum = (dataDetail == MinimumData); - // TODO: DRY this up to a shared method - // that can pack any type given the number of bytes - // and return the number of bytes to push the pointer - +void AvatarData::lazyInitHeadData() { // lazily allocate memory for HeadData in case we're not an Avatar instance if (!_headData) { _headData = new HeadData(this); @@ -204,126 +150,302 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail) { if (_forceFaceTrackerConnected) { _headData->_isFaceTrackerConnected = true; } +} + + +bool AvatarData::avatarBoundingBoxChangedSince(quint64 time) { + return _avatarBoundingBoxChanged >= time; +} + +bool AvatarData::avatarScaleChangedSince(quint64 time) { + return _avatarScaleChanged >= time; +} + +bool AvatarData::lookAtPositionChangedSince(quint64 time) { + return _headData->lookAtPositionChangedSince(time); +} + +bool AvatarData::audioLoudnessChangedSince(quint64 time) { + return _headData->audioLoudnessChangedSince(time); +} + +bool AvatarData::sensorToWorldMatrixChangedSince(quint64 time) { + return _sensorToWorldMatrixChanged >= time; +} + +bool AvatarData::additionalFlagsChangedSince(quint64 time) { + return _additionalFlagsChanged >= time; +} + +bool AvatarData::parentInfoChangedSince(quint64 time) { + return _parentChanged >= time; +} + +bool AvatarData::faceTrackerInfoChangedSince(quint64 time) { + return true; // FIXME! +} + +float AvatarData::getDistanceBasedMinRotationDOT(glm::vec3 viewerPosition) { + auto distance = glm::distance(_globalPosition, viewerPosition); + float result = ROTATION_CHANGE_179D; // assume worst + if (distance < AVATAR_DISTANCE_LEVEL_1) { + result = AVATAR_MIN_ROTATION_DOT; + } else if (distance < AVATAR_DISTANCE_LEVEL_2) { + result = ROTATION_CHANGE_15D; + } else if (distance < AVATAR_DISTANCE_LEVEL_3) { + result = ROTATION_CHANGE_45D; + } else if (distance < AVATAR_DISTANCE_LEVEL_4) { + result = ROTATION_CHANGE_90D; + } + return result; +} + +float AvatarData::getDistanceBasedMinTranslationDistance(glm::vec3 viewerPosition) { + return AVATAR_MIN_TRANSLATION; // Eventually make this distance sensitive as well +} + + +QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSentTime, const QVector& lastSentJointData, + bool distanceAdjust, glm::vec3 viewerPosition, QVector* sentJointDataOut) { + + // if no timestamp was included, then assume the avatarData is single instance + // and is tracking its own last encoding time. + if (lastSentTime == 0) { + lastSentTime = _lastToByteArray; + _lastToByteArray = usecTimestampNow(); + } + + bool cullSmallChanges = (dataDetail == CullSmallData); + bool sendAll = (dataDetail == SendAllData); + bool sendMinimum = (dataDetail == MinimumData); + + lazyInitHeadData(); QByteArray avatarDataByteArray(udt::MAX_PACKET_SIZE, 0); - unsigned char* destinationBuffer = reinterpret_cast(avatarDataByteArray.data()); unsigned char* startPosition = destinationBuffer; + // FIXME - + // + // BUG -- if you enter a space bubble, and then back away, the avatar has wrong orientation until "send all" happens... + // this is an iFrame issue... what to do about that? + // + // BUG -- Resizing avatar seems to "take too long"... the avatar doesn't redraw at smaller size right away + // + // TODO consider these additional optimizations in the future + // 1) SensorToWorld - should we only send this for avatars with attachments?? - 20 bytes - 7.20 kbps + // 2) GUIID for the session change to 2byte index (savings) - 14 bytes - 5.04 kbps + // 3) Improve Joints -- currently we use rotational tolerances, but if we had skeleton/bone length data + // we could do a better job of determining if the change in joints actually translates to visible + // changes at distance. + // + // Potential savings: + // 63 rotations * 6 bytes = 136kbps + // 3 translations * 6 bytes = 6.48kbps + // + + auto parentID = getParentID(); + + bool hasAvatarGlobalPosition = true; // always include global position + bool hasAvatarOrientation = sendAll || rotationChangedSince(lastSentTime); + bool hasAvatarBoundingBox = sendAll || avatarBoundingBoxChangedSince(lastSentTime); + bool hasAvatarScale = sendAll || avatarScaleChangedSince(lastSentTime); + bool hasLookAtPosition = sendAll || lookAtPositionChangedSince(lastSentTime); + bool hasAudioLoudness = sendAll || audioLoudnessChangedSince(lastSentTime); + bool hasSensorToWorldMatrix = sendAll || sensorToWorldMatrixChangedSince(lastSentTime); + bool hasAdditionalFlags = sendAll || additionalFlagsChangedSince(lastSentTime); + + // local position, and parent info only apply to avatars that are parented. The local position + // and the parent info can change independently though, so we track their "changed since" + // separately + bool hasParentInfo = hasParent() && (sendAll || parentInfoChangedSince(lastSentTime)); + bool hasAvatarLocalPosition = hasParent() && (sendAll || tranlationChangedSince(lastSentTime)); + + bool hasFaceTrackerInfo = hasFaceTracker() && (sendAll || faceTrackerInfoChangedSince(lastSentTime)); + bool hasJointData = sendAll || !sendMinimum; + // Leading flags, to indicate how much data is actually included in the packet... - uint8_t packetStateFlags = 0; - if (sendMinimum) { - setAtBit(packetStateFlags, AVATARDATA_FLAGS_MINIMUM); - } + AvatarDataPacket::HasFlags packetStateFlags = + (hasAvatarGlobalPosition ? AvatarDataPacket::PACKET_HAS_AVATAR_GLOBAL_POSITION : 0) + | (hasAvatarBoundingBox ? AvatarDataPacket::PACKET_HAS_AVATAR_BOUNDING_BOX : 0) + | (hasAvatarOrientation ? AvatarDataPacket::PACKET_HAS_AVATAR_ORIENTATION : 0) + | (hasAvatarScale ? AvatarDataPacket::PACKET_HAS_AVATAR_SCALE : 0) + | (hasLookAtPosition ? AvatarDataPacket::PACKET_HAS_LOOK_AT_POSITION : 0) + | (hasAudioLoudness ? AvatarDataPacket::PACKET_HAS_AUDIO_LOUDNESS : 0) + | (hasSensorToWorldMatrix ? AvatarDataPacket::PACKET_HAS_SENSOR_TO_WORLD_MATRIX : 0) + | (hasAdditionalFlags ? AvatarDataPacket::PACKET_HAS_ADDITIONAL_FLAGS : 0) + | (hasParentInfo ? AvatarDataPacket::PACKET_HAS_PARENT_INFO : 0) + | (hasAvatarLocalPosition ? AvatarDataPacket::PACKET_HAS_AVATAR_LOCAL_POSITION : 0) + | (hasFaceTrackerInfo ? AvatarDataPacket::PACKET_HAS_FACE_TRACKER_INFO : 0) + | (hasJointData ? AvatarDataPacket::PACKET_HAS_JOINT_DATA : 0); memcpy(destinationBuffer, &packetStateFlags, sizeof(packetStateFlags)); destinationBuffer += sizeof(packetStateFlags); - if (sendMinimum) { - memcpy(destinationBuffer, &_globalPosition, sizeof(_globalPosition)); - destinationBuffer += sizeof(_globalPosition); - } else { - auto avatarInfo = reinterpret_cast(destinationBuffer); - avatarInfo->position[0] = getLocalPosition().x; - avatarInfo->position[1] = getLocalPosition().y; - avatarInfo->position[2] = getLocalPosition().z; - avatarInfo->globalPosition[0] = _globalPosition.x; - avatarInfo->globalPosition[1] = _globalPosition.y; - avatarInfo->globalPosition[2] = _globalPosition.z; - avatarInfo->globalBoundingBoxCorner[0] = getPosition().x - _globalBoundingBoxCorner.x; - avatarInfo->globalBoundingBoxCorner[1] = getPosition().y - _globalBoundingBoxCorner.y; - avatarInfo->globalBoundingBoxCorner[2] = getPosition().z - _globalBoundingBoxCorner.z; + if (hasAvatarGlobalPosition) { + auto data = reinterpret_cast(destinationBuffer); + data->globalPosition[0] = _globalPosition.x; + data->globalPosition[1] = _globalPosition.y; + data->globalPosition[2] = _globalPosition.z; + destinationBuffer += sizeof(AvatarDataPacket::AvatarGlobalPosition); + } - glm::vec3 bodyEulerAngles = glm::degrees(safeEulerAngles(getLocalOrientation())); - packFloatAngleToTwoByte((uint8_t*)(avatarInfo->localOrientation + 0), bodyEulerAngles.y); - packFloatAngleToTwoByte((uint8_t*)(avatarInfo->localOrientation + 1), bodyEulerAngles.x); - packFloatAngleToTwoByte((uint8_t*)(avatarInfo->localOrientation + 2), bodyEulerAngles.z); - packFloatRatioToTwoByte((uint8_t*)(&avatarInfo->scale), getDomainLimitedScale()); - avatarInfo->lookAtPosition[0] = _headData->_lookAtPosition.x; - avatarInfo->lookAtPosition[1] = _headData->_lookAtPosition.y; - avatarInfo->lookAtPosition[2] = _headData->_lookAtPosition.z; - avatarInfo->audioLoudness = _headData->_audioLoudness; + if (hasAvatarBoundingBox) { + auto data = reinterpret_cast(destinationBuffer); + data->avatarDimensions[0] = _globalBoundingBoxDimensions.x; + data->avatarDimensions[1] = _globalBoundingBoxDimensions.y; + data->avatarDimensions[2] = _globalBoundingBoxDimensions.z; + + data->boundOriginOffset[0] = _globalBoundingBoxOffset.x; + data->boundOriginOffset[1] = _globalBoundingBoxOffset.y; + data->boundOriginOffset[2] = _globalBoundingBoxOffset.z; + + destinationBuffer += sizeof(AvatarDataPacket::AvatarBoundingBox); + } + + if (hasAvatarOrientation) { + auto localOrientation = getLocalOrientation(); + destinationBuffer += packOrientationQuatToSixBytes(destinationBuffer, localOrientation); + } + + if (hasAvatarScale) { + auto data = reinterpret_cast(destinationBuffer); + auto scale = getDomainLimitedScale(); + packFloatRatioToTwoByte((uint8_t*)(&data->scale), scale); + destinationBuffer += sizeof(AvatarDataPacket::AvatarScale); + } + + if (hasLookAtPosition) { + auto data = reinterpret_cast(destinationBuffer); + auto lookAt = _headData->getLookAtPosition(); + data->lookAtPosition[0] = lookAt.x; + data->lookAtPosition[1] = lookAt.y; + data->lookAtPosition[2] = lookAt.z; + destinationBuffer += sizeof(AvatarDataPacket::LookAtPosition); + } + + if (hasAudioLoudness) { + auto data = reinterpret_cast(destinationBuffer); + data->audioLoudness = packFloatGainToByte(_headData->getAudioLoudness() / AUDIO_LOUDNESS_SCALE); + destinationBuffer += sizeof(AvatarDataPacket::AudioLoudness); + } + + if (hasSensorToWorldMatrix) { + auto data = reinterpret_cast(destinationBuffer); glm::mat4 sensorToWorldMatrix = getSensorToWorldMatrix(); - packOrientationQuatToSixBytes(avatarInfo->sensorToWorldQuat, glmExtractRotation(sensorToWorldMatrix)); + packOrientationQuatToSixBytes(data->sensorToWorldQuat, glmExtractRotation(sensorToWorldMatrix)); glm::vec3 scale = extractScale(sensorToWorldMatrix); - packFloatScalarToSignedTwoByteFixed((uint8_t*)&avatarInfo->sensorToWorldScale, scale.x, SENSOR_TO_WORLD_SCALE_RADIX); - avatarInfo->sensorToWorldTrans[0] = sensorToWorldMatrix[3][0]; - avatarInfo->sensorToWorldTrans[1] = sensorToWorldMatrix[3][1]; - avatarInfo->sensorToWorldTrans[2] = sensorToWorldMatrix[3][2]; + packFloatScalarToSignedTwoByteFixed((uint8_t*)&data->sensorToWorldScale, scale.x, SENSOR_TO_WORLD_SCALE_RADIX); + data->sensorToWorldTrans[0] = sensorToWorldMatrix[3][0]; + data->sensorToWorldTrans[1] = sensorToWorldMatrix[3][1]; + data->sensorToWorldTrans[2] = sensorToWorldMatrix[3][2]; + destinationBuffer += sizeof(AvatarDataPacket::SensorToWorldMatrix); + } + + if (hasAdditionalFlags) { + auto data = reinterpret_cast(destinationBuffer); + + uint8_t flags { 0 }; + + setSemiNibbleAt(flags, KEY_STATE_START_BIT, _keyState); - setSemiNibbleAt(avatarInfo->flags, KEY_STATE_START_BIT, _keyState); // hand state bool isFingerPointing = _handState & IS_FINGER_POINTING_FLAG; - setSemiNibbleAt(avatarInfo->flags, HAND_STATE_START_BIT, _handState & ~IS_FINGER_POINTING_FLAG); + setSemiNibbleAt(flags, HAND_STATE_START_BIT, _handState & ~IS_FINGER_POINTING_FLAG); if (isFingerPointing) { - setAtBit(avatarInfo->flags, HAND_STATE_FINGER_POINTING_BIT); + setAtBit(flags, HAND_STATE_FINGER_POINTING_BIT); } // faceshift state if (_headData->_isFaceTrackerConnected) { - setAtBit(avatarInfo->flags, IS_FACESHIFT_CONNECTED); + setAtBit(flags, IS_FACESHIFT_CONNECTED); } // eye tracker state if (_headData->_isEyeTrackerConnected) { - setAtBit(avatarInfo->flags, IS_EYE_TRACKER_CONNECTED); + setAtBit(flags, IS_EYE_TRACKER_CONNECTED); } // referential state - QUuid parentID = getParentID(); if (!parentID.isNull()) { - setAtBit(avatarInfo->flags, HAS_REFERENTIAL); + setAtBit(flags, HAS_REFERENTIAL); } - destinationBuffer += sizeof(AvatarDataPacket::AvatarInfo); + data->flags = flags; + destinationBuffer += sizeof(AvatarDataPacket::AdditionalFlags); + } - if (!parentID.isNull()) { - auto parentInfo = reinterpret_cast(destinationBuffer); - QByteArray referentialAsBytes = parentID.toRfc4122(); - memcpy(parentInfo->parentUUID, referentialAsBytes.data(), referentialAsBytes.size()); - parentInfo->parentJointIndex = _parentJointIndex; - destinationBuffer += sizeof(AvatarDataPacket::ParentInfo); - } + if (hasAvatarLocalPosition) { + auto data = reinterpret_cast(destinationBuffer); + auto localPosition = getLocalPosition(); + data->localPosition[0] = localPosition.x; + data->localPosition[1] = localPosition.y; + data->localPosition[2] = localPosition.z; + destinationBuffer += sizeof(AvatarDataPacket::AvatarLocalPosition); + } - // If it is connected, pack up the data - if (_headData->_isFaceTrackerConnected) { - auto faceTrackerInfo = reinterpret_cast(destinationBuffer); + if (hasParentInfo) { + auto parentInfo = reinterpret_cast(destinationBuffer); + QByteArray referentialAsBytes = parentID.toRfc4122(); + memcpy(parentInfo->parentUUID, referentialAsBytes.data(), referentialAsBytes.size()); + parentInfo->parentJointIndex = _parentJointIndex; + destinationBuffer += sizeof(AvatarDataPacket::ParentInfo); + } - faceTrackerInfo->leftEyeBlink = _headData->_leftEyeBlink; - faceTrackerInfo->rightEyeBlink = _headData->_rightEyeBlink; - faceTrackerInfo->averageLoudness = _headData->_averageLoudness; - faceTrackerInfo->browAudioLift = _headData->_browAudioLift; - faceTrackerInfo->numBlendshapeCoefficients = _headData->_blendshapeCoefficients.size(); - destinationBuffer += sizeof(AvatarDataPacket::FaceTrackerInfo); + // If it is connected, pack up the data + if (hasFaceTrackerInfo) { + auto faceTrackerInfo = reinterpret_cast(destinationBuffer); - // followed by a variable number of float coefficients - memcpy(destinationBuffer, _headData->_blendshapeCoefficients.data(), _headData->_blendshapeCoefficients.size() * sizeof(float)); - destinationBuffer += _headData->_blendshapeCoefficients.size() * sizeof(float); - } + faceTrackerInfo->leftEyeBlink = _headData->_leftEyeBlink; + faceTrackerInfo->rightEyeBlink = _headData->_rightEyeBlink; + faceTrackerInfo->averageLoudness = _headData->_averageLoudness; + faceTrackerInfo->browAudioLift = _headData->_browAudioLift; + faceTrackerInfo->numBlendshapeCoefficients = _headData->_blendshapeCoefficients.size(); + destinationBuffer += sizeof(AvatarDataPacket::FaceTrackerInfo); + // followed by a variable number of float coefficients + memcpy(destinationBuffer, _headData->_blendshapeCoefficients.data(), _headData->_blendshapeCoefficients.size() * sizeof(float)); + destinationBuffer += _headData->_blendshapeCoefficients.size() * sizeof(float); + } + + // If it is connected, pack up the data + if (hasJointData) { QReadLocker readLock(&_jointDataLock); // joint rotation data - *destinationBuffer++ = _jointData.size(); + int numJoints = _jointData.size(); + *destinationBuffer++ = (uint8_t)numJoints; + unsigned char* validityPosition = destinationBuffer; unsigned char validity = 0; int validityBit = 0; - #ifdef WANT_DEBUG +#ifdef WANT_DEBUG int rotationSentCount = 0; unsigned char* beforeRotations = destinationBuffer; - #endif +#endif - _lastSentJointData.resize(_jointData.size()); + if (sentJointDataOut) { + sentJointDataOut->resize(_jointData.size()); // Make sure the destination is resized before using it + } + float minRotationDOT = !distanceAdjust ? AVATAR_MIN_ROTATION_DOT : getDistanceBasedMinRotationDOT(viewerPosition); - for (int i=0; i < _jointData.size(); i++) { + for (int i = 0; i < _jointData.size(); i++) { const JointData& data = _jointData[i]; - if (sendAll || _lastSentJointData[i].rotation != data.rotation) { - if (sendAll || - !cullSmallChanges || - fabsf(glm::dot(data.rotation, _lastSentJointData[i].rotation)) <= AVATAR_MIN_ROTATION_DOT) { + + // The dot product for smaller rotations is a smaller number. + // So if the dot() is less than the value, then the rotation is a larger angle of rotation + bool largeEnoughRotation = fabsf(glm::dot(data.rotation, lastSentJointData[i].rotation)) < minRotationDOT; + + if (sendAll || lastSentJointData[i].rotation != data.rotation) { + if (sendAll || !cullSmallChanges || largeEnoughRotation) { if (data.rotationSet) { validity |= (1 << validityBit); - #ifdef WANT_DEBUG +#ifdef WANT_DEBUG rotationSentCount++; - #endif +#endif + if (sentJointDataOut) { + auto jointDataOut = *sentJointDataOut; + jointDataOut[i].rotation = data.rotation; + } + } } } @@ -338,7 +460,7 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail) { validityBit = 0; validity = *validityPosition++; - for (int i = 0; i < _jointData.size(); i ++) { + for (int i = 0; i < _jointData.size(); i++) { const JointData& data = _jointData[i]; if (validity & (1 << validityBit)) { destinationBuffer += packOrientationQuatToSixBytes(destinationBuffer, data.rotation); @@ -355,18 +477,20 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail) { validity = 0; validityBit = 0; - #ifdef WANT_DEBUG +#ifdef WANT_DEBUG int translationSentCount = 0; unsigned char* beforeTranslations = destinationBuffer; - #endif +#endif + + float minTranslation = !distanceAdjust ? AVATAR_MIN_TRANSLATION : getDistanceBasedMinTranslationDistance(viewerPosition); float maxTranslationDimension = 0.0; - for (int i=0; i < _jointData.size(); i++) { + for (int i = 0; i < _jointData.size(); i++) { const JointData& data = _jointData[i]; - if (sendAll || _lastSentJointData[i].translation != data.translation) { + if (sendAll || lastSentJointData[i].translation != data.translation) { if (sendAll || !cullSmallChanges || - glm::distance(data.translation, _lastSentJointData[i].translation) > AVATAR_MIN_TRANSLATION) { + glm::distance(data.translation, lastSentJointData[i].translation) > minTranslation) { if (data.translationSet) { validity |= (1 << validityBit); #ifdef WANT_DEBUG @@ -375,6 +499,12 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail) { maxTranslationDimension = glm::max(fabsf(data.translation.x), maxTranslationDimension); maxTranslationDimension = glm::max(fabsf(data.translation.y), maxTranslationDimension); maxTranslationDimension = glm::max(fabsf(data.translation.z), maxTranslationDimension); + + if (sentJointDataOut) { + auto jointDataOut = *sentJointDataOut; + jointDataOut[i].translation = data.translation; + } + } } } @@ -390,7 +520,7 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail) { validityBit = 0; validity = *validityPosition++; - for (int i = 0; i < _jointData.size(); i ++) { + for (int i = 0; i < _jointData.size(); i++) { const JointData& data = _jointData[i]; if (validity & (1 << validityBit)) { destinationBuffer += @@ -406,29 +536,31 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail) { Transform controllerLeftHandTransform = Transform(getControllerLeftHandMatrix()); destinationBuffer += packOrientationQuatToSixBytes(destinationBuffer, controllerLeftHandTransform.getRotation()); destinationBuffer += packFloatVec3ToSignedTwoByteFixed(destinationBuffer, controllerLeftHandTransform.getTranslation(), - TRANSLATION_COMPRESSION_RADIX); + TRANSLATION_COMPRESSION_RADIX); Transform controllerRightHandTransform = Transform(getControllerRightHandMatrix()); destinationBuffer += packOrientationQuatToSixBytes(destinationBuffer, controllerRightHandTransform.getRotation()); destinationBuffer += packFloatVec3ToSignedTwoByteFixed(destinationBuffer, controllerRightHandTransform.getTranslation(), - TRANSLATION_COMPRESSION_RADIX); + TRANSLATION_COMPRESSION_RADIX); - #ifdef WANT_DEBUG +#ifdef WANT_DEBUG if (sendAll) { qCDebug(avatars) << "AvatarData::toByteArray" << cullSmallChanges << sendAll - << "rotations:" << rotationSentCount << "translations:" << translationSentCount - << "largest:" << maxTranslationDimension - << "size:" - << (beforeRotations - startPosition) << "+" - << (beforeTranslations - beforeRotations) << "+" - << (destinationBuffer - beforeTranslations) << "=" - << (destinationBuffer - startPosition); + << "rotations:" << rotationSentCount << "translations:" << translationSentCount + << "largest:" << maxTranslationDimension + << "size:" + << (beforeRotations - startPosition) << "+" + << (beforeTranslations - beforeRotations) << "+" + << (destinationBuffer - beforeTranslations) << "=" + << (destinationBuffer - startPosition); } - #endif +#endif } - return avatarDataByteArray.left(destinationBuffer - startPosition); + int avatarDataSize = destinationBuffer - startPosition; + return avatarDataByteArray.left(avatarDataSize); } +// NOTE: This is never used in a "distanceAdjust" mode, so it's ok that it doesn't use a variable minimum rotation/translation void AvatarData::doneEncoding(bool cullSmallChanges) { // The server has finished sending this version of the joint-data to other nodes. Update _lastSentJointData. QReadLocker readLock(&_jointDataLock); @@ -495,104 +627,180 @@ const unsigned char* unpackFauxJoint(const unsigned char* sourceBuffer, ThreadSa // read data in packet starting at byte offset and return number of bytes parsed int AvatarData::parseDataFromBuffer(const QByteArray& buffer) { // lazily allocate memory for HeadData in case we're not an Avatar instance - if (!_headData) { - _headData = new HeadData(this); - } + lazyInitHeadData(); - uint8_t packetStateFlags = buffer.at(0); - bool minimumSent = oneAtBit(packetStateFlags, AVATARDATA_FLAGS_MINIMUM); + AvatarDataPacket::HasFlags packetStateFlags; const unsigned char* startPosition = reinterpret_cast(buffer.data()); const unsigned char* endPosition = startPosition + buffer.size(); - const unsigned char* sourceBuffer = startPosition + sizeof(packetStateFlags); // skip the flags!! + const unsigned char* sourceBuffer = startPosition; + + // read the packet flags + memcpy(&packetStateFlags, sourceBuffer, sizeof(packetStateFlags)); + sourceBuffer += sizeof(packetStateFlags); + + #define HAS_FLAG(B,F) ((B & F) == F) + + bool hasAvatarGlobalPosition = HAS_FLAG(packetStateFlags, AvatarDataPacket::PACKET_HAS_AVATAR_GLOBAL_POSITION); + bool hasAvatarBoundingBox = HAS_FLAG(packetStateFlags, AvatarDataPacket::PACKET_HAS_AVATAR_BOUNDING_BOX); + bool hasAvatarOrientation = HAS_FLAG(packetStateFlags, AvatarDataPacket::PACKET_HAS_AVATAR_ORIENTATION); + bool hasAvatarScale = HAS_FLAG(packetStateFlags, AvatarDataPacket::PACKET_HAS_AVATAR_SCALE); + bool hasLookAtPosition = HAS_FLAG(packetStateFlags, AvatarDataPacket::PACKET_HAS_LOOK_AT_POSITION); + bool hasAudioLoudness = HAS_FLAG(packetStateFlags, AvatarDataPacket::PACKET_HAS_AUDIO_LOUDNESS); + bool hasSensorToWorldMatrix = HAS_FLAG(packetStateFlags, AvatarDataPacket::PACKET_HAS_SENSOR_TO_WORLD_MATRIX); + bool hasAdditionalFlags = HAS_FLAG(packetStateFlags, AvatarDataPacket::PACKET_HAS_ADDITIONAL_FLAGS); + bool hasParentInfo = HAS_FLAG(packetStateFlags, AvatarDataPacket::PACKET_HAS_PARENT_INFO); + bool hasAvatarLocalPosition = HAS_FLAG(packetStateFlags, AvatarDataPacket::PACKET_HAS_AVATAR_LOCAL_POSITION); + bool hasFaceTrackerInfo = HAS_FLAG(packetStateFlags, AvatarDataPacket::PACKET_HAS_FACE_TRACKER_INFO); + bool hasJointData = HAS_FLAG(packetStateFlags, AvatarDataPacket::PACKET_HAS_JOINT_DATA); - // if this is the minimum, then it only includes the flags - if (minimumSent) { - memcpy(&_globalPosition, sourceBuffer, sizeof(_globalPosition)); - sourceBuffer += sizeof(_globalPosition); - int numBytesRead = (sourceBuffer - startPosition); - _averageBytesReceived.updateAverage(numBytesRead); - return numBytesRead; - } quint64 now = usecTimestampNow(); - PACKET_READ_CHECK(AvatarInfo, sizeof(AvatarDataPacket::AvatarInfo)); - auto avatarInfo = reinterpret_cast(sourceBuffer); - sourceBuffer += sizeof(AvatarDataPacket::AvatarInfo); + if (hasAvatarGlobalPosition) { + auto startSection = sourceBuffer; - glm::vec3 position = glm::vec3(avatarInfo->position[0], avatarInfo->position[1], avatarInfo->position[2]); - _globalPosition = glm::vec3(avatarInfo->globalPosition[0], avatarInfo->globalPosition[1], avatarInfo->globalPosition[2]); - _globalBoundingBoxCorner = glm::vec3(avatarInfo->globalBoundingBoxCorner[0], avatarInfo->globalBoundingBoxCorner[1], avatarInfo->globalBoundingBoxCorner[2]); - if (isNaN(position)) { - if (shouldLogError(now)) { - qCWarning(avatars) << "Discard AvatarData packet: position NaN, uuid " << getSessionUUID(); + PACKET_READ_CHECK(AvatarGlobalPosition, sizeof(AvatarDataPacket::AvatarGlobalPosition)); + auto data = reinterpret_cast(sourceBuffer); + auto newValue = glm::vec3(data->globalPosition[0], data->globalPosition[1], data->globalPosition[2]); + if (_globalPosition != newValue) { + _globalPosition = newValue; + _globalPositionChanged = usecTimestampNow(); } - return buffer.size(); - } - setLocalPosition(position); + sourceBuffer += sizeof(AvatarDataPacket::AvatarGlobalPosition); + int numBytesRead = sourceBuffer - startSection; + _globalPositionRate.increment(numBytesRead); - float pitch, yaw, roll; - unpackFloatAngleFromTwoByte(avatarInfo->localOrientation + 0, &yaw); - unpackFloatAngleFromTwoByte(avatarInfo->localOrientation + 1, &pitch); - unpackFloatAngleFromTwoByte(avatarInfo->localOrientation + 2, &roll); - if (isNaN(yaw) || isNaN(pitch) || isNaN(roll)) { - if (shouldLogError(now)) { - qCWarning(avatars) << "Discard AvatarData packet: localOriention is NaN, uuid " << getSessionUUID(); + // if we don't have a parent, make sure to also set our local position + if (!hasParent()) { + setLocalPosition(newValue); } - return buffer.size(); } - glm::quat currentOrientation = getLocalOrientation(); - glm::vec3 newEulerAngles(pitch, yaw, roll); - glm::quat newOrientation = glm::quat(glm::radians(newEulerAngles)); - if (currentOrientation != newOrientation) { - _hasNewJointData = true; - setLocalOrientation(newOrientation); - } + if (hasAvatarBoundingBox) { + auto startSection = sourceBuffer; - float scale; - unpackFloatRatioFromTwoByte((uint8_t*)&avatarInfo->scale, scale); - if (isNaN(scale)) { - if (shouldLogError(now)) { - qCWarning(avatars) << "Discard AvatarData packet: scale NaN, uuid " << getSessionUUID(); + PACKET_READ_CHECK(AvatarBoundingBox, sizeof(AvatarDataPacket::AvatarBoundingBox)); + auto data = reinterpret_cast(sourceBuffer); + auto newDimensions = glm::vec3(data->avatarDimensions[0], data->avatarDimensions[1], data->avatarDimensions[2]); + auto newOffset = glm::vec3(data->boundOriginOffset[0], data->boundOriginOffset[1], data->boundOriginOffset[2]); + + + if (_globalBoundingBoxDimensions != newDimensions) { + _globalBoundingBoxDimensions = newDimensions; + _avatarBoundingBoxChanged = usecTimestampNow(); } - return buffer.size(); - } - setTargetScale(scale); - - glm::vec3 lookAt = glm::vec3(avatarInfo->lookAtPosition[0], avatarInfo->lookAtPosition[1], avatarInfo->lookAtPosition[2]); - if (isNaN(lookAt)) { - if (shouldLogError(now)) { - qCWarning(avatars) << "Discard AvatarData packet: lookAtPosition is NaN, uuid " << getSessionUUID(); + if (_globalBoundingBoxOffset != newOffset) { + _globalBoundingBoxOffset = newOffset; + _avatarBoundingBoxChanged = usecTimestampNow(); } - return buffer.size(); - } - _headData->_lookAtPosition = lookAt; - float audioLoudness = avatarInfo->audioLoudness; - if (isNaN(audioLoudness)) { - if (shouldLogError(now)) { - qCWarning(avatars) << "Discard AvatarData packet: audioLoudness is NaN, uuid " << getSessionUUID(); + sourceBuffer += sizeof(AvatarDataPacket::AvatarBoundingBox); + int numBytesRead = sourceBuffer - startSection; + _avatarBoundingBoxRate.increment(numBytesRead); + } + + if (hasAvatarOrientation) { + auto startSection = sourceBuffer; + PACKET_READ_CHECK(AvatarOrientation, sizeof(AvatarDataPacket::AvatarOrientation)); + glm::quat newOrientation; + sourceBuffer += unpackOrientationQuatFromSixBytes(sourceBuffer, newOrientation); + glm::quat currentOrientation = getLocalOrientation(); + + if (currentOrientation != newOrientation) { + _hasNewJointData = true; + setLocalOrientation(newOrientation); } - return buffer.size(); + int numBytesRead = sourceBuffer - startSection; + _avatarOrientationRate.increment(numBytesRead); } - _headData->_audioLoudness = audioLoudness; - glm::quat sensorToWorldQuat; - unpackOrientationQuatFromSixBytes(avatarInfo->sensorToWorldQuat, sensorToWorldQuat); - float sensorToWorldScale; - unpackFloatScalarFromSignedTwoByteFixed((int16_t*)&avatarInfo->sensorToWorldScale, &sensorToWorldScale, SENSOR_TO_WORLD_SCALE_RADIX); - glm::vec3 sensorToWorldTrans(avatarInfo->sensorToWorldTrans[0], avatarInfo->sensorToWorldTrans[1], avatarInfo->sensorToWorldTrans[2]); - glm::mat4 sensorToWorldMatrix = createMatFromScaleQuatAndPos(glm::vec3(sensorToWorldScale), sensorToWorldQuat, sensorToWorldTrans); + if (hasAvatarScale) { + auto startSection = sourceBuffer; - _sensorToWorldMatrixCache.set(sensorToWorldMatrix); + PACKET_READ_CHECK(AvatarScale, sizeof(AvatarDataPacket::AvatarScale)); + auto data = reinterpret_cast(sourceBuffer); + float scale; + unpackFloatRatioFromTwoByte((uint8_t*)&data->scale, scale); + if (isNaN(scale)) { + if (shouldLogError(now)) { + qCWarning(avatars) << "Discard AvatarData packet: scale NaN, uuid " << getSessionUUID(); + } + return buffer.size(); + } + setTargetScale(scale); + sourceBuffer += sizeof(AvatarDataPacket::AvatarScale); + int numBytesRead = sourceBuffer - startSection; + _avatarScaleRate.increment(numBytesRead); + } - { // bitFlags and face data - uint8_t bitItems = avatarInfo->flags; + if (hasLookAtPosition) { + auto startSection = sourceBuffer; + + PACKET_READ_CHECK(LookAtPosition, sizeof(AvatarDataPacket::LookAtPosition)); + auto data = reinterpret_cast(sourceBuffer); + glm::vec3 lookAt = glm::vec3(data->lookAtPosition[0], data->lookAtPosition[1], data->lookAtPosition[2]); + if (isNaN(lookAt)) { + if (shouldLogError(now)) { + qCWarning(avatars) << "Discard AvatarData packet: lookAtPosition is NaN, uuid " << getSessionUUID(); + } + return buffer.size(); + } + _headData->setLookAtPosition(lookAt); + sourceBuffer += sizeof(AvatarDataPacket::LookAtPosition); + int numBytesRead = sourceBuffer - startSection; + _lookAtPositionRate.increment(numBytesRead); + } + + if (hasAudioLoudness) { + auto startSection = sourceBuffer; + + PACKET_READ_CHECK(AudioLoudness, sizeof(AvatarDataPacket::AudioLoudness)); + auto data = reinterpret_cast(sourceBuffer); + float audioLoudness; + audioLoudness = unpackFloatGainFromByte(data->audioLoudness) * AUDIO_LOUDNESS_SCALE; + sourceBuffer += sizeof(AvatarDataPacket::AudioLoudness); + + if (isNaN(audioLoudness)) { + if (shouldLogError(now)) { + qCWarning(avatars) << "Discard AvatarData packet: audioLoudness is NaN, uuid " << getSessionUUID(); + } + return buffer.size(); + } + _headData->setAudioLoudness(audioLoudness); + int numBytesRead = sourceBuffer - startSection; + _audioLoudnessRate.increment(numBytesRead); + } + + if (hasSensorToWorldMatrix) { + auto startSection = sourceBuffer; + + PACKET_READ_CHECK(SensorToWorldMatrix, sizeof(AvatarDataPacket::SensorToWorldMatrix)); + auto data = reinterpret_cast(sourceBuffer); + glm::quat sensorToWorldQuat; + unpackOrientationQuatFromSixBytes(data->sensorToWorldQuat, sensorToWorldQuat); + float sensorToWorldScale; + unpackFloatScalarFromSignedTwoByteFixed((int16_t*)&data->sensorToWorldScale, &sensorToWorldScale, SENSOR_TO_WORLD_SCALE_RADIX); + glm::vec3 sensorToWorldTrans(data->sensorToWorldTrans[0], data->sensorToWorldTrans[1], data->sensorToWorldTrans[2]); + glm::mat4 sensorToWorldMatrix = createMatFromScaleQuatAndPos(glm::vec3(sensorToWorldScale), sensorToWorldQuat, sensorToWorldTrans); + if (_sensorToWorldMatrixCache.get() != sensorToWorldMatrix) { + _sensorToWorldMatrixCache.set(sensorToWorldMatrix); + _sensorToWorldMatrixChanged = usecTimestampNow(); + } + sourceBuffer += sizeof(AvatarDataPacket::SensorToWorldMatrix); + int numBytesRead = sourceBuffer - startSection; + _sensorToWorldRate.increment(numBytesRead); + } + + if (hasAdditionalFlags) { + auto startSection = sourceBuffer; + + PACKET_READ_CHECK(AdditionalFlags, sizeof(AvatarDataPacket::AdditionalFlags)); + auto data = reinterpret_cast(sourceBuffer); + uint8_t bitItems = data->flags; // key state, stored as a semi-nibble in the bitItems - _keyState = (KeyState)getSemiNibbleAt(bitItems, KEY_STATE_START_BIT); + auto newKeyState = (KeyState)getSemiNibbleAt(bitItems, KEY_STATE_START_BIT); // hand state, stored as a semi-nibble plus a bit in the bitItems // we store the hand state as well as other items in a shared bitset. The hand state is an octal, but is split @@ -601,136 +809,230 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) { // |x,x|H0,H1|x,x,x|H2| // +---+-----+-----+--+ // Hand state - H0,H1,H2 is found in the 3rd, 4th, and 8th bits - _handState = getSemiNibbleAt(bitItems, HAND_STATE_START_BIT) + auto newHandState = getSemiNibbleAt(bitItems, HAND_STATE_START_BIT) + (oneAtBit(bitItems, HAND_STATE_FINGER_POINTING_BIT) ? IS_FINGER_POINTING_FLAG : 0); - _headData->_isFaceTrackerConnected = oneAtBit(bitItems, IS_FACESHIFT_CONNECTED); - _headData->_isEyeTrackerConnected = oneAtBit(bitItems, IS_EYE_TRACKER_CONNECTED); - bool hasReferential = oneAtBit(bitItems, HAS_REFERENTIAL); + auto newFaceTrackerConnected = oneAtBit(bitItems, IS_FACESHIFT_CONNECTED); + auto newEyeTrackerConnected = oneAtBit(bitItems, IS_EYE_TRACKER_CONNECTED); - if (hasReferential) { - PACKET_READ_CHECK(ParentInfo, sizeof(AvatarDataPacket::ParentInfo)); - auto parentInfo = reinterpret_cast(sourceBuffer); - sourceBuffer += sizeof(AvatarDataPacket::ParentInfo); + bool keyStateChanged = (_keyState != newKeyState); + bool handStateChanged = (_handState != newHandState); + bool faceStateChanged = (_headData->_isFaceTrackerConnected != newFaceTrackerConnected); + bool eyeStateChanged = (_headData->_isEyeTrackerConnected != newEyeTrackerConnected); + bool somethingChanged = keyStateChanged || handStateChanged || faceStateChanged || eyeStateChanged; - QByteArray byteArray((const char*)parentInfo->parentUUID, NUM_BYTES_RFC4122_UUID); - _parentID = QUuid::fromRfc4122(byteArray); + _keyState = newKeyState; + _handState = newHandState; + _headData->_isFaceTrackerConnected = newFaceTrackerConnected; + _headData->_isEyeTrackerConnected = newEyeTrackerConnected; + + sourceBuffer += sizeof(AvatarDataPacket::AdditionalFlags); + + if (somethingChanged) { + _additionalFlagsChanged = usecTimestampNow(); + } + int numBytesRead = sourceBuffer - startSection; + _additionalFlagsRate.increment(numBytesRead); + } + + // FIXME -- make sure to handle the existance of a parent vs a change in the parent... + //bool hasReferential = oneAtBit(bitItems, HAS_REFERENTIAL); + if (hasParentInfo) { + auto startSection = sourceBuffer; + PACKET_READ_CHECK(ParentInfo, sizeof(AvatarDataPacket::ParentInfo)); + auto parentInfo = reinterpret_cast(sourceBuffer); + sourceBuffer += sizeof(AvatarDataPacket::ParentInfo); + + QByteArray byteArray((const char*)parentInfo->parentUUID, NUM_BYTES_RFC4122_UUID); + + auto newParentID = QUuid::fromRfc4122(byteArray); + + if ((_parentID != newParentID) || (_parentJointIndex = parentInfo->parentJointIndex)) { + _parentID = newParentID; _parentJointIndex = parentInfo->parentJointIndex; - } else { - _parentID = QUuid(); + _parentChanged = usecTimestampNow(); } - if (_headData->_isFaceTrackerConnected) { - PACKET_READ_CHECK(FaceTrackerInfo, sizeof(AvatarDataPacket::FaceTrackerInfo)); - auto faceTrackerInfo = reinterpret_cast(sourceBuffer); - sourceBuffer += sizeof(AvatarDataPacket::FaceTrackerInfo); - - _headData->_leftEyeBlink = faceTrackerInfo->leftEyeBlink; - _headData->_rightEyeBlink = faceTrackerInfo->rightEyeBlink; - _headData->_averageLoudness = faceTrackerInfo->averageLoudness; - _headData->_browAudioLift = faceTrackerInfo->browAudioLift; - - int numCoefficients = faceTrackerInfo->numBlendshapeCoefficients; - const int coefficientsSize = sizeof(float) * numCoefficients; - PACKET_READ_CHECK(FaceTrackerCoefficients, coefficientsSize); - _headData->_blendshapeCoefficients.resize(numCoefficients); // make sure there's room for the copy! - memcpy(_headData->_blendshapeCoefficients.data(), sourceBuffer, coefficientsSize); - sourceBuffer += coefficientsSize; - } + int numBytesRead = sourceBuffer - startSection; + _parentInfoRate.increment(numBytesRead); + } else { + // FIXME - this aint totally right, for switching to parent/no-parent + _parentID = QUuid(); } - PACKET_READ_CHECK(NumJoints, sizeof(uint8_t)); - int numJoints = *sourceBuffer++; + if (hasAvatarLocalPosition) { + assert(hasParent()); // we shouldn't have local position unless we have a parent + auto startSection = sourceBuffer; - const int bytesOfValidity = (int)ceil((float)numJoints / (float)BITS_IN_BYTE); - PACKET_READ_CHECK(JointRotationValidityBits, bytesOfValidity); + PACKET_READ_CHECK(AvatarLocalPosition, sizeof(AvatarDataPacket::AvatarLocalPosition)); + auto data = reinterpret_cast(sourceBuffer); + glm::vec3 position = glm::vec3(data->localPosition[0], data->localPosition[1], data->localPosition[2]); + if (isNaN(position)) { + if (shouldLogError(now)) { + qCWarning(avatars) << "Discard AvatarData packet: position NaN, uuid " << getSessionUUID(); + } + return buffer.size(); + } + setLocalPosition(position); + sourceBuffer += sizeof(AvatarDataPacket::AvatarLocalPosition); + int numBytesRead = sourceBuffer - startSection; + _localPositionRate.increment(numBytesRead); + } - int numValidJointRotations = 0; - QVector validRotations; - validRotations.resize(numJoints); - { // rotation validity bits - unsigned char validity = 0; - int validityBit = 0; + if (hasFaceTrackerInfo) { + auto startSection = sourceBuffer; + + PACKET_READ_CHECK(FaceTrackerInfo, sizeof(AvatarDataPacket::FaceTrackerInfo)); + auto faceTrackerInfo = reinterpret_cast(sourceBuffer); + sourceBuffer += sizeof(AvatarDataPacket::FaceTrackerInfo); + + _headData->_leftEyeBlink = faceTrackerInfo->leftEyeBlink; + _headData->_rightEyeBlink = faceTrackerInfo->rightEyeBlink; + _headData->_averageLoudness = faceTrackerInfo->averageLoudness; + _headData->_browAudioLift = faceTrackerInfo->browAudioLift; + + int numCoefficients = faceTrackerInfo->numBlendshapeCoefficients; + const int coefficientsSize = sizeof(float) * numCoefficients; + PACKET_READ_CHECK(FaceTrackerCoefficients, coefficientsSize); + _headData->_blendshapeCoefficients.resize(numCoefficients); // make sure there's room for the copy! + memcpy(_headData->_blendshapeCoefficients.data(), sourceBuffer, coefficientsSize); + sourceBuffer += coefficientsSize; + int numBytesRead = sourceBuffer - startSection; + _faceTrackerRate.increment(numBytesRead); + } + + if (hasJointData) { + auto startSection = sourceBuffer; + + PACKET_READ_CHECK(NumJoints, sizeof(uint8_t)); + int numJoints = *sourceBuffer++; + const int bytesOfValidity = (int)ceil((float)numJoints / (float)BITS_IN_BYTE); + PACKET_READ_CHECK(JointRotationValidityBits, bytesOfValidity); + + int numValidJointRotations = 0; + QVector validRotations; + validRotations.resize(numJoints); + { // rotation validity bits + unsigned char validity = 0; + int validityBit = 0; + for (int i = 0; i < numJoints; i++) { + if (validityBit == 0) { + validity = *sourceBuffer++; + } + bool valid = (bool)(validity & (1 << validityBit)); + if (valid) { + ++numValidJointRotations; + } + validRotations[i] = valid; + validityBit = (validityBit + 1) % BITS_IN_BYTE; + } + } + + // each joint rotation is stored in 6 bytes. + QWriteLocker writeLock(&_jointDataLock); + _jointData.resize(numJoints); + + const int COMPRESSED_QUATERNION_SIZE = 6; + PACKET_READ_CHECK(JointRotations, numValidJointRotations * COMPRESSED_QUATERNION_SIZE); for (int i = 0; i < numJoints; i++) { - if (validityBit == 0) { - validity = *sourceBuffer++; + JointData& data = _jointData[i]; + if (validRotations[i]) { + sourceBuffer += unpackOrientationQuatFromSixBytes(sourceBuffer, data.rotation); + _hasNewJointData = true; + data.rotationSet = true; } - bool valid = (bool)(validity & (1 << validityBit)); - if (valid) { - ++numValidJointRotations; + } + + PACKET_READ_CHECK(JointTranslationValidityBits, bytesOfValidity); + + // get translation validity bits -- these indicate which translations were packed + int numValidJointTranslations = 0; + QVector validTranslations; + validTranslations.resize(numJoints); + { // translation validity bits + unsigned char validity = 0; + int validityBit = 0; + for (int i = 0; i < numJoints; i++) { + if (validityBit == 0) { + validity = *sourceBuffer++; + } + bool valid = (bool)(validity & (1 << validityBit)); + if (valid) { + ++numValidJointTranslations; + } + validTranslations[i] = valid; + validityBit = (validityBit + 1) % BITS_IN_BYTE; } - validRotations[i] = valid; - validityBit = (validityBit + 1) % BITS_IN_BYTE; - } - } + } // 1 + bytesOfValidity bytes - // each joint rotation is stored in 6 bytes. - QWriteLocker writeLock(&_jointDataLock); - _jointData.resize(numJoints); + // each joint translation component is stored in 6 bytes. + const int COMPRESSED_TRANSLATION_SIZE = 6; + PACKET_READ_CHECK(JointTranslation, numValidJointTranslations * COMPRESSED_TRANSLATION_SIZE); - const int COMPRESSED_QUATERNION_SIZE = 6; - PACKET_READ_CHECK(JointRotations, numValidJointRotations * COMPRESSED_QUATERNION_SIZE); - for (int i = 0; i < numJoints; i++) { - JointData& data = _jointData[i]; - if (validRotations[i]) { - sourceBuffer += unpackOrientationQuatFromSixBytes(sourceBuffer, data.rotation); - _hasNewJointData = true; - data.rotationSet = true; - } - } - - PACKET_READ_CHECK(JointTranslationValidityBits, bytesOfValidity); - - // get translation validity bits -- these indicate which translations were packed - int numValidJointTranslations = 0; - QVector validTranslations; - validTranslations.resize(numJoints); - { // translation validity bits - unsigned char validity = 0; - int validityBit = 0; for (int i = 0; i < numJoints; i++) { - if (validityBit == 0) { - validity = *sourceBuffer++; + JointData& data = _jointData[i]; + if (validTranslations[i]) { + sourceBuffer += unpackFloatVec3FromSignedTwoByteFixed(sourceBuffer, data.translation, TRANSLATION_COMPRESSION_RADIX); + _hasNewJointData = true; + data.translationSet = true; } - bool valid = (bool)(validity & (1 << validityBit)); - if (valid) { - ++numValidJointTranslations; - } - validTranslations[i] = valid; - validityBit = (validityBit + 1) % BITS_IN_BYTE; } - } // 1 + bytesOfValidity bytes - // each joint translation component is stored in 6 bytes. - const int COMPRESSED_TRANSLATION_SIZE = 6; - PACKET_READ_CHECK(JointTranslation, numValidJointTranslations * COMPRESSED_TRANSLATION_SIZE); - - for (int i = 0; i < numJoints; i++) { - JointData& data = _jointData[i]; - if (validTranslations[i]) { - sourceBuffer += unpackFloatVec3FromSignedTwoByteFixed(sourceBuffer, data.translation, TRANSLATION_COMPRESSION_RADIX); - _hasNewJointData = true; - data.translationSet = true; +#ifdef WANT_DEBUG + if (numValidJointRotations > 15) { + qCDebug(avatars) << "RECEIVING -- rotations:" << numValidJointRotations + << "translations:" << numValidJointTranslations + << "size:" << (int)(sourceBuffer - startPosition); } - } +#endif + // faux joints + sourceBuffer = unpackFauxJoint(sourceBuffer, _controllerLeftHandMatrixCache); + sourceBuffer = unpackFauxJoint(sourceBuffer, _controllerRightHandMatrixCache); - #ifdef WANT_DEBUG - if (numValidJointRotations > 15) { - qCDebug(avatars) << "RECEIVING -- rotations:" << numValidJointRotations - << "translations:" << numValidJointTranslations - << "size:" << (int)(sourceBuffer - startPosition); + int numBytesRead = sourceBuffer - startSection; + _jointDataRate.increment(numBytesRead); } - #endif - - // faux joints - sourceBuffer = unpackFauxJoint(sourceBuffer, _controllerLeftHandMatrixCache); - sourceBuffer = unpackFauxJoint(sourceBuffer, _controllerRightHandMatrixCache); int numBytesRead = sourceBuffer - startPosition; _averageBytesReceived.updateAverage(numBytesRead); + + _parseBufferRate.increment(numBytesRead); + return numBytesRead; } +float AvatarData::getDataRate(const QString& rateName) { + if (rateName == "") { + return _parseBufferRate.rate() / BYTES_PER_KILOBIT; + } else if (rateName == "globalPosition") { + return _globalPositionRate.rate() / BYTES_PER_KILOBIT; + } else if (rateName == "localPosition") { + return _localPositionRate.rate() / BYTES_PER_KILOBIT; + } else if (rateName == "avatarBoundingBox") { + return _avatarBoundingBoxRate.rate() / BYTES_PER_KILOBIT; + } else if (rateName == "avatarOrientation") { + return _avatarOrientationRate.rate() / BYTES_PER_KILOBIT; + } else if (rateName == "avatarScale") { + return _avatarScaleRate.rate() / BYTES_PER_KILOBIT; + } else if (rateName == "lookAtPosition") { + return _lookAtPositionRate.rate() / BYTES_PER_KILOBIT; + } else if (rateName == "audioLoudness") { + return _audioLoudnessRate.rate() / BYTES_PER_KILOBIT; + } else if (rateName == "sensorToWorkMatrix") { + return _sensorToWorldRate.rate() / BYTES_PER_KILOBIT; + } else if (rateName == "additionalFlags") { + return _additionalFlagsRate.rate() / BYTES_PER_KILOBIT; + } else if (rateName == "parentInfo") { + return _parentInfoRate.rate() / BYTES_PER_KILOBIT; + } else if (rateName == "faceTracker") { + return _faceTrackerRate.rate() / BYTES_PER_KILOBIT; + } else if (rateName == "jointData") { + return _jointDataRate.rate() / BYTES_PER_KILOBIT; + } + return 0.0f; +} + + int AvatarData::getAverageBytesReceivedPerSecond() const { return lrint(_averageBytesReceived.getAverageSampleValuePerSecond()); } @@ -1175,6 +1477,7 @@ void AvatarData::detachAll(const QString& modelURL, const QString& jointName) { } void AvatarData::setJointMappingsFromNetworkReply() { + QNetworkReply* networkReply = static_cast(sender()); { @@ -1222,9 +1525,17 @@ void AvatarData::sendAvatarDataPacket() { // about 2% of the time, we send a full update (meaning, we transmit all the joint data), even if nothing has changed. // this is to guard against a joint moving once, the packet getting lost, and the joint never moving again. - QByteArray avatarByteArray = toByteArray((randFloat() < AVATAR_SEND_FULL_UPDATE_RATIO) ? SendAllData : CullSmallData); - doneEncoding(true); // FIXME - doneEncoding() takes a bool for culling small changes, that's janky! + bool cullSmallData = (randFloat() < AVATAR_SEND_FULL_UPDATE_RATIO); + auto dataDetail = cullSmallData ? SendAllData : CullSmallData; + QVector lastSentJointData; + { + QReadLocker readLock(&_jointDataLock); + _lastSentJointData.resize(_jointData.size()); + lastSentJointData = _lastSentJointData; + } + QByteArray avatarByteArray = toByteArray(dataDetail, 0, lastSentJointData); + doneEncoding(cullSmallData); static AvatarDataSequenceNumber sequenceNumber = 0; diff --git a/libraries/avatars/src/AvatarData.h b/libraries/avatars/src/AvatarData.h index f07d9a2ba6..5604e41f63 100644 --- a/libraries/avatars/src/AvatarData.h +++ b/libraries/avatars/src/AvatarData.h @@ -56,6 +56,7 @@ typedef unsigned long long quint64; #include #include #include +#include #include "AABox.h" #include "HeadData.h" @@ -99,6 +100,7 @@ const int IS_EYE_TRACKER_CONNECTED = 5; // 6th bit (was CHAT_CIRCLING) const int HAS_REFERENTIAL = 6; // 7th bit const int HAND_STATE_FINGER_POINTING_BIT = 7; // 8th bit + const char HAND_STATE_NULL = 0; const char LEFT_HAND_POINTING_FLAG = 1; const char RIGHT_HAND_POINTING_FLAG = 2; @@ -108,6 +110,131 @@ const char IS_FINGER_POINTING_FLAG = 4; // before the "header" structure const char AVATARDATA_FLAGS_MINIMUM = 0; +using SmallFloat = uint16_t; // a compressed float with less precision, user defined radix + +namespace AvatarDataPacket { + + // NOTE: every time AvatarData is sent from mixer to client, it also includes the GUIID for the session + // this is 16bytes of data at 45hz that's 5.76kbps + // it might be nice to use a dictionary to compress that + + // Packet State Flags - we store the details about the existence of other records in this bitset: + // AvatarGlobalPosition, Avatar Faceshift, eye tracking, and existence of + using HasFlags = uint16_t; + const HasFlags PACKET_HAS_AVATAR_GLOBAL_POSITION = 1U << 0; + const HasFlags PACKET_HAS_AVATAR_BOUNDING_BOX = 1U << 1; + const HasFlags PACKET_HAS_AVATAR_ORIENTATION = 1U << 2; + const HasFlags PACKET_HAS_AVATAR_SCALE = 1U << 3; + const HasFlags PACKET_HAS_LOOK_AT_POSITION = 1U << 4; + const HasFlags PACKET_HAS_AUDIO_LOUDNESS = 1U << 5; + const HasFlags PACKET_HAS_SENSOR_TO_WORLD_MATRIX = 1U << 6; + const HasFlags PACKET_HAS_ADDITIONAL_FLAGS = 1U << 7; + const HasFlags PACKET_HAS_PARENT_INFO = 1U << 8; + const HasFlags PACKET_HAS_AVATAR_LOCAL_POSITION = 1U << 9; + const HasFlags PACKET_HAS_FACE_TRACKER_INFO = 1U << 10; + const HasFlags PACKET_HAS_JOINT_DATA = 1U << 11; + + // NOTE: AvatarDataPackets start with a uint16_t sequence number that is not reflected in the Header structure. + + PACKED_BEGIN struct Header { + HasFlags packetHasFlags; // state flags, indicated which additional records are included in the packet + } PACKED_END; + const size_t HEADER_SIZE = 2; + + PACKED_BEGIN struct AvatarGlobalPosition { + float globalPosition[3]; // avatar's position + } PACKED_END; + const size_t AVATAR_GLOBAL_POSITION_SIZE = 12; + + PACKED_BEGIN struct AvatarBoundingBox { + float avatarDimensions[3]; // avatar's bounding box in world space units, but relative to the position. + float boundOriginOffset[3]; // offset from the position of the avatar to the origin of the bounding box + } PACKED_END; + const size_t AVATAR_BOUNDING_BOX_SIZE = 24; + + + using SixByteQuat = uint8_t[6]; + PACKED_BEGIN struct AvatarOrientation { + SixByteQuat avatarOrientation; // encodeded and compressed by packOrientationQuatToSixBytes() + } PACKED_END; + const size_t AVATAR_ORIENTATION_SIZE = 6; + + PACKED_BEGIN struct AvatarScale { + SmallFloat scale; // avatar's scale, compressed by packFloatRatioToTwoByte() + } PACKED_END; + const size_t AVATAR_SCALE_SIZE = 2; + + PACKED_BEGIN struct LookAtPosition { + float lookAtPosition[3]; // world space position that eyes are focusing on. + // FIXME - unless the person has an eye tracker, this is simulated... + // a) maybe we can just have the client calculate this + // b) at distance this will be hard to discern and can likely be + // descimated or dropped completely + // + // POTENTIAL SAVINGS - 12 bytes + } PACKED_END; + const size_t LOOK_AT_POSITION_SIZE = 12; + + PACKED_BEGIN struct AudioLoudness { + uint8_t audioLoudness; // current loudness of microphone compressed with packFloatGainToByte() + } PACKED_END; + const size_t AUDIO_LOUDNESS_SIZE = 1; + + PACKED_BEGIN struct SensorToWorldMatrix { + // FIXME - these 20 bytes are only used by viewers if my avatar has "attachments" + // we could save these bytes if no attachments are active. + // + // POTENTIAL SAVINGS - 20 bytes + + SixByteQuat sensorToWorldQuat; // 6 byte compressed quaternion part of sensor to world matrix + uint16_t sensorToWorldScale; // uniform scale of sensor to world matrix + float sensorToWorldTrans[3]; // fourth column of sensor to world matrix + // FIXME - sensorToWorldTrans might be able to be better compressed if it was + // relative to the avatar position. + } PACKED_END; + const size_t SENSOR_TO_WORLD_SIZE = 20; + + PACKED_BEGIN struct AdditionalFlags { + uint8_t flags; // additional flags: hand state, key state, eye tracking + } PACKED_END; + const size_t ADDITIONAL_FLAGS_SIZE = 1; + + // only present if HAS_REFERENTIAL flag is set in AvatarInfo.flags + PACKED_BEGIN struct ParentInfo { + uint8_t parentUUID[16]; // rfc 4122 encoded + uint16_t parentJointIndex; + } PACKED_END; + const size_t PARENT_INFO_SIZE = 18; + + // will only ever be included if the avatar has a parent but can change independent of changes to parent info + // and so we keep it a separate record + PACKED_BEGIN struct AvatarLocalPosition { + float localPosition[3]; // parent frame translation of the avatar + } PACKED_END; + const size_t AVATAR_LOCAL_POSITION_SIZE = 12; + + // only present if IS_FACESHIFT_CONNECTED flag is set in AvatarInfo.flags + PACKED_BEGIN struct FaceTrackerInfo { + float leftEyeBlink; + float rightEyeBlink; + float averageLoudness; + float browAudioLift; + uint8_t numBlendshapeCoefficients; + // float blendshapeCoefficients[numBlendshapeCoefficients]; + } PACKED_END; + const size_t FACE_TRACKER_INFO_SIZE = 17; + + // variable length structure follows + /* + struct JointData { + uint8_t numJoints; + uint8_t rotationValidityBits[ceil(numJoints / 8)]; // one bit per joint, if true then a compressed rotation follows. + SixByteQuat rotation[numValidRotations]; // encodeded and compressed by packOrientationQuatToSixBytes() + uint8_t translationValidityBits[ceil(numJoints / 8)]; // one bit per joint, if true then a compressed translation follows. + SixByteTrans translation[numValidTranslations]; // encodeded and compressed by packFloatVec3ToSignedTwoByteFixed() + }; + */ +} static const float MAX_AVATAR_SCALE = 1000.0f; static const float MIN_AVATAR_SCALE = .005f; @@ -125,6 +252,16 @@ const float AVATAR_SEND_FULL_UPDATE_RATIO = 0.02f; const float AVATAR_MIN_ROTATION_DOT = 0.9999999f; const float AVATAR_MIN_TRANSLATION = 0.0001f; +const float ROTATION_CHANGE_15D = 0.9914449f; +const float ROTATION_CHANGE_45D = 0.9238795f; +const float ROTATION_CHANGE_90D = 0.7071068f; +const float ROTATION_CHANGE_179D = 0.0087266f; + +const float AVATAR_DISTANCE_LEVEL_1 = 10.0f; +const float AVATAR_DISTANCE_LEVEL_2 = 100.0f; +const float AVATAR_DISTANCE_LEVEL_3 = 1000.0f; +const float AVATAR_DISTANCE_LEVEL_4 = 10000.0f; + // Where one's own Avatar begins in the world (will be overwritten if avatar data file is found). // This is the start location in the Sandbox (xyz: 6270, 211, 6000). @@ -214,7 +351,9 @@ public: SendAllData } AvatarDataDetail; - virtual QByteArray toByteArray(AvatarDataDetail dataDetail); + virtual QByteArray toByteArray(AvatarDataDetail dataDetail, quint64 lastSentTime, const QVector& lastSentJointData, + bool distanceAdjust = false, glm::vec3 viewerPosition = glm::vec3(0), QVector* sentJointDataOut = nullptr); + virtual void doneEncoding(bool cullSmallChanges); /// \return true if an error should be logged @@ -265,10 +404,11 @@ public: virtual void setTargetScale(float targetScale); float getDomainLimitedScale() const { return glm::clamp(_targetScale, _domainMinimumScale, _domainMaximumScale); } + void setDomainMinimumScale(float domainMinimumScale) - { _domainMinimumScale = glm::clamp(domainMinimumScale, MIN_AVATAR_SCALE, MAX_AVATAR_SCALE); } - void setDomainMaximumScale(float domainMaximumScale) - { _domainMaximumScale = glm::clamp(domainMaximumScale, MIN_AVATAR_SCALE, MAX_AVATAR_SCALE); } + { _domainMinimumScale = glm::clamp(domainMinimumScale, MIN_AVATAR_SCALE, MAX_AVATAR_SCALE); _scaleChanged = usecTimestampNow(); } + void setDomainMaximumScale(float domainMaximumScale) + { _domainMaximumScale = glm::clamp(domainMaximumScale, MIN_AVATAR_SCALE, MAX_AVATAR_SCALE); _scaleChanged = usecTimestampNow(); } // Hand State Q_INVOKABLE void setHandState(char s) { _handState = s; } @@ -375,7 +515,7 @@ public: void fromJson(const QJsonObject& json); glm::vec3 getClientGlobalPosition() { return _globalPosition; } - glm::vec3 getGlobalBoundingBoxCorner() { return _globalBoundingBoxCorner; } + glm::vec3 getGlobalBoundingBoxCorner() { return _globalPosition + _globalBoundingBoxOffset - _globalBoundingBoxDimensions; } Q_INVOKABLE AvatarEntityMap getAvatarEntityData() const; Q_INVOKABLE void setAvatarEntityData(const AvatarEntityMap& avatarEntityData); @@ -387,6 +527,17 @@ public: Q_INVOKABLE glm::mat4 getControllerLeftHandMatrix() const; Q_INVOKABLE glm::mat4 getControllerRightHandMatrix() const; + float getDataRate(const QString& rateName = QString("")); + + int getJointCount() { return _jointData.size(); } + + QVector getLastSentJointData() { + QReadLocker readLock(&_jointDataLock); + _lastSentJointData.resize(_jointData.size()); + return _lastSentJointData; + } + + public slots: void sendAvatarDataPacket(); void sendIdentityPacket(); @@ -401,7 +552,27 @@ public slots: float getTargetScale() { return _targetScale; } + void resetLastSent() { _lastToByteArray = 0; } + protected: + void lazyInitHeadData(); + + float getDistanceBasedMinRotationDOT(glm::vec3 viewerPosition); + float getDistanceBasedMinTranslationDistance(glm::vec3 viewerPosition); + + bool avatarBoundingBoxChangedSince(quint64 time); + bool avatarScaleChangedSince(quint64 time); + bool lookAtPositionChangedSince(quint64 time); + bool audioLoudnessChangedSince(quint64 time); + bool sensorToWorldMatrixChangedSince(quint64 time); + bool additionalFlagsChangedSince(quint64 time); + + bool hasParent() { return !getParentID().isNull(); } + bool parentInfoChangedSince(quint64 time); + + bool hasFaceTracker() { return _headData ? _headData->_isFaceTrackerConnected : false; } + bool faceTrackerInfoChangedSince(quint64 time); + glm::vec3 _handPosition; virtual const QString& getSessionDisplayNameForTransport() const { return _sessionDisplayName; } virtual void maybeUpdateSessionDisplayNameFromTransport(const QString& sessionDisplayName) { } // No-op in AvatarMixer @@ -460,8 +631,35 @@ protected: // _globalPosition is sent along with localPosition + parent because the avatar-mixer doesn't know // where Entities are located. This is currently only used by the mixer to decide how often to send // updates about one avatar to another. - glm::vec3 _globalPosition; - glm::vec3 _globalBoundingBoxCorner; + glm::vec3 _globalPosition { 0, 0, 0 }; + + + quint64 _globalPositionChanged { 0 }; + quint64 _avatarBoundingBoxChanged { 0 }; + quint64 _avatarScaleChanged { 0 }; + quint64 _sensorToWorldMatrixChanged { 0 }; + quint64 _additionalFlagsChanged { 0 }; + quint64 _parentChanged { 0 }; + + quint64 _lastToByteArray { 0 }; // tracks the last time we did a toByteArray + + // Some rate data for incoming data + RateCounter<> _parseBufferRate; + RateCounter<> _globalPositionRate; + RateCounter<> _localPositionRate; + RateCounter<> _avatarBoundingBoxRate; + RateCounter<> _avatarOrientationRate; + RateCounter<> _avatarScaleRate; + RateCounter<> _lookAtPositionRate; + RateCounter<> _audioLoudnessRate; + RateCounter<> _sensorToWorldRate; + RateCounter<> _additionalFlagsRate; + RateCounter<> _parentInfoRate; + RateCounter<> _faceTrackerRate; + RateCounter<> _jointDataRate; + + glm::vec3 _globalBoundingBoxDimensions; + glm::vec3 _globalBoundingBoxOffset; mutable ReadWriteLockable _avatarEntitiesLock; AvatarEntityIDs _avatarEntityDetached; // recently detached from this avatar diff --git a/libraries/avatars/src/HeadData.h b/libraries/avatars/src/HeadData.h index af657339ba..cbf6c6bb32 100644 --- a/libraries/avatars/src/HeadData.h +++ b/libraries/avatars/src/HeadData.h @@ -19,6 +19,8 @@ #include #include +#include + // degrees const float MIN_HEAD_YAW = -180.0f; const float MAX_HEAD_YAW = 180.0f; @@ -56,7 +58,13 @@ public: void setOrientation(const glm::quat& orientation); float getAudioLoudness() const { return _audioLoudness; } - void setAudioLoudness(float audioLoudness) { _audioLoudness = audioLoudness; } + void setAudioLoudness(float audioLoudness) { + if (audioLoudness != _audioLoudness) { + _audioLoudnessChanged = usecTimestampNow(); + } + _audioLoudness = audioLoudness; + } + bool audioLoudnessChangedSince(quint64 time) { return _audioLoudnessChanged >= time; } float getAudioAverageLoudness() const { return _audioAverageLoudness; } void setAudioAverageLoudness(float audioAverageLoudness) { _audioAverageLoudness = audioAverageLoudness; } @@ -66,7 +74,13 @@ public: void setBlendshapeCoefficients(const QVector& blendshapeCoefficients) { _blendshapeCoefficients = blendshapeCoefficients; } const glm::vec3& getLookAtPosition() const { return _lookAtPosition; } - void setLookAtPosition(const glm::vec3& lookAtPosition) { _lookAtPosition = lookAtPosition; } + void setLookAtPosition(const glm::vec3& lookAtPosition) { + if (_lookAtPosition != lookAtPosition) { + _lookAtPositionChanged = usecTimestampNow(); + } + _lookAtPosition = lookAtPosition; + } + bool lookAtPositionChangedSince(quint64 time) { return _lookAtPositionChanged >= time; } friend class AvatarData; @@ -80,7 +94,11 @@ protected: float _baseRoll; glm::vec3 _lookAtPosition; + quint64 _lookAtPositionChanged { 0 }; + float _audioLoudness; + quint64 _audioLoudnessChanged { 0 }; + bool _isFaceTrackerConnected; bool _isEyeTrackerConnected; float _leftEyeBlink; diff --git a/libraries/networking/src/udt/PacketHeaders.cpp b/libraries/networking/src/udt/PacketHeaders.cpp index 89f30829fd..7cb02010f8 100644 --- a/libraries/networking/src/udt/PacketHeaders.cpp +++ b/libraries/networking/src/udt/PacketHeaders.cpp @@ -55,7 +55,7 @@ PacketVersion versionForPacketType(PacketType packetType) { case PacketType::AvatarData: case PacketType::BulkAvatarData: case PacketType::KillAvatar: - return static_cast(AvatarMixerPacketVersion::Unignore); + return static_cast(AvatarMixerPacketVersion::VariableAvatarData); case PacketType::ICEServerHeartbeat: return 18; // ICE Server Heartbeat signing case PacketType::AssetGetInfo: diff --git a/libraries/networking/src/udt/PacketHeaders.h b/libraries/networking/src/udt/PacketHeaders.h index 23fbbff431..d695bde62a 100644 --- a/libraries/networking/src/udt/PacketHeaders.h +++ b/libraries/networking/src/udt/PacketHeaders.h @@ -220,7 +220,8 @@ enum class AvatarMixerPacketVersion : PacketVersion { HasKillAvatarReason, SessionDisplayName, Unignore, - ImmediateSessionDisplayNameUpdates + ImmediateSessionDisplayNameUpdates, + VariableAvatarData }; enum class DomainConnectRequestVersion : PacketVersion { diff --git a/libraries/shared/src/SpatiallyNestable.cpp b/libraries/shared/src/SpatiallyNestable.cpp index 71e3e850cc..7ecb0f7409 100644 --- a/libraries/shared/src/SpatiallyNestable.cpp +++ b/libraries/shared/src/SpatiallyNestable.cpp @@ -26,6 +26,9 @@ SpatiallyNestable::SpatiallyNestable(NestableType nestableType, QUuid id) : // set flags in _transform _transform.setTranslation(glm::vec3(0.0f)); _transform.setRotation(glm::quat()); + _scaleChanged = usecTimestampNow(); + _translationChanged = usecTimestampNow(); + _rotationChanged = usecTimestampNow(); } SpatiallyNestable::~SpatiallyNestable() { @@ -399,6 +402,7 @@ void SpatiallyNestable::setPosition(const glm::vec3& position, bool& success, bo changed = true; myWorldTransform.setTranslation(position); Transform::inverseMult(_transform, parentTransform, myWorldTransform); + _translationChanged = usecTimestampNow(); } }); if (success && changed) { @@ -451,6 +455,7 @@ void SpatiallyNestable::setOrientation(const glm::quat& orientation, bool& succe changed = true; myWorldTransform.setRotation(orientation); Transform::inverseMult(_transform, parentTransform, myWorldTransform); + _rotationChanged = usecTimestampNow(); } }); if (success && changed) { @@ -649,6 +654,8 @@ void SpatiallyNestable::setTransform(const Transform& transform, bool& success) Transform::inverseMult(_transform, parentTransform, transform); if (_transform != beforeTransform) { changed = true; + _translationChanged = usecTimestampNow(); + _rotationChanged = usecTimestampNow(); } }); if (success && changed) { @@ -689,6 +696,7 @@ void SpatiallyNestable::setScale(const glm::vec3& scale) { if (_transform.getScale() != scale) { _transform.setScale(scale); changed = true; + _scaleChanged = usecTimestampNow(); } }); if (changed) { @@ -710,6 +718,7 @@ void SpatiallyNestable::setScale(float value) { _transform.setScale(value); if (_transform.getScale() != beforeScale) { changed = true; + _scaleChanged = usecTimestampNow(); } }); @@ -738,6 +747,9 @@ void SpatiallyNestable::setLocalTransform(const Transform& transform) { if (_transform != transform) { _transform = transform; changed = true; + _scaleChanged = usecTimestampNow(); + _translationChanged = usecTimestampNow(); + _rotationChanged = usecTimestampNow(); } }); @@ -765,6 +777,7 @@ void SpatiallyNestable::setLocalPosition(const glm::vec3& position, bool tellPhy if (_transform.getTranslation() != position) { _transform.setTranslation(position); changed = true; + _translationChanged = usecTimestampNow(); } }); if (changed) { @@ -791,6 +804,7 @@ void SpatiallyNestable::setLocalOrientation(const glm::quat& orientation) { if (_transform.getRotation() != orientation) { _transform.setRotation(orientation); changed = true; + _rotationChanged = usecTimestampNow(); } }); if (changed) { @@ -848,9 +862,12 @@ void SpatiallyNestable::setLocalScale(const glm::vec3& scale) { if (_transform.getScale() != scale) { _transform.setScale(scale); changed = true; + _scaleChanged = usecTimestampNow(); } }); - dimensionsChanged(); + if (changed) { + dimensionsChanged(); + } } QList SpatiallyNestable::getChildren() const { @@ -1059,6 +1076,9 @@ void SpatiallyNestable::setLocalTransformAndVelocities( if (_transform != localTransform) { _transform = localTransform; changed = true; + _scaleChanged = usecTimestampNow(); + _translationChanged = usecTimestampNow(); + _rotationChanged = usecTimestampNow(); } }); // linear velocity diff --git a/libraries/shared/src/SpatiallyNestable.h b/libraries/shared/src/SpatiallyNestable.h index 04ed14f72e..6f56a108bd 100644 --- a/libraries/shared/src/SpatiallyNestable.h +++ b/libraries/shared/src/SpatiallyNestable.h @@ -178,6 +178,10 @@ public: const glm::vec3& localVelocity, const glm::vec3& localAngularVelocity); + bool scaleChangedSince(quint64 time) { return _scaleChanged > time; } + bool tranlationChangedSince(quint64 time) { return _translationChanged > time; } + bool rotationChangedSince(quint64 time) { return _rotationChanged > time; } + protected: const NestableType _nestableType; // EntityItem or an AvatarData QUuid _id; @@ -201,6 +205,9 @@ protected: mutable bool _queryAACubeSet { false }; bool _missingAncestor { false }; + quint64 _scaleChanged { 0 }; + quint64 _translationChanged { 0 }; + quint64 _rotationChanged { 0 }; private: mutable ReadWriteLockable _transformLock; diff --git a/script-archive/acScripts/simpleBot.js b/script-archive/acScripts/simpleBot.js new file mode 100644 index 0000000000..a79e44484b --- /dev/null +++ b/script-archive/acScripts/simpleBot.js @@ -0,0 +1,63 @@ +// +// simpleBot.js +// examples +// +// Created by Brad Hefta-Gaub on 12/23/16. +// Copyright 2016 High Fidelity, Inc. +// +// Distributed under the Apache License, Version 2.0. +// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html +// + +HIFI_PUBLIC_BUCKET = "http://s3.amazonaws.com/hifi-public/"; + +function getRandomFloat(min, max) { + return Math.random() * (max - min) + min; +} + +function getRandomInt (min, max) { + return Math.floor(Math.random() * (max - min + 1)) + min; +} + +function printVector(string, vector) { + print(string + " " + vector.x + ", " + vector.y + ", " + vector.z); +} + +var timePassed = 0.0; +var updateSpeed = 3.0; + +var X_MIN = 5.0; +var X_MAX = 15.0; +var Z_MIN = 5.0; +var Z_MAX = 15.0; +var Y_PELVIS = 1.0; + +Agent.isAvatar = true; + +// change the avatar's position to the random one +Avatar.position = {x:0,y:1.1,z:0}; // { x: getRandomFloat(X_MIN, X_MAX), y: Y_PELVIS, z: getRandomFloat(Z_MIN, Z_MAX) };; +printVector("New bot, position = ", Avatar.position); + +var animationData = {url: "file:///D:/Development/HiFi/hifi/interface/resources/avatar/animations/walk_fwd.fbx", lastFrame: 35}; +//Avatar.startAnimation(animationData.url, animationData.fps || 30, 1, true, false, animationData.firstFrame || 0, animationData.lastFrame); +//Avatar.skeletonModelURL = "file:///D:/Development/HiFi/hifi/interface/resources/meshes/being_of_light/being_of_light.fbx"; + +var millisecondsToWaitBeforeStarting = 4 * 1000; +Script.setTimeout(function () { + print("Starting at", JSON.stringify(Avatar.position)); + Avatar.startAnimation(animationData.url, animationData.fps || 30, 1, true, false, animationData.firstFrame || 0, animationData.lastFrame); +}, millisecondsToWaitBeforeStarting); + + + +function update(deltaTime) { + timePassed += deltaTime; + if (timePassed > updateSpeed) { + timePassed = 0; + var newPosition = Vec3.sum(Avatar.position, { x: getRandomFloat(-0.1, 0.1), y: 0, z: getRandomFloat(-0.1, 0.1) }); + Avatar.position = newPosition; + Vec3.print("new:", newPosition); + } +} + +Script.update.connect(update); \ No newline at end of file diff --git a/scripts/developer/debugging/debugAvatarMixer.js b/scripts/developer/debugging/debugAvatarMixer.js new file mode 100644 index 0000000000..6c0a935b70 --- /dev/null +++ b/scripts/developer/debugging/debugAvatarMixer.js @@ -0,0 +1,130 @@ +"use strict"; + +// +// debugAvatarMixer.js +// scripts/developer/debugging +// +// Created by Brad Hefta-Gaub on 01/09/2017 +// Copyright 2017 High Fidelity, Inc. +// +// Distributed under the Apache License, Version 2.0. +// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html +// +/* global Toolbars, Script, Users, Overlays, AvatarList, Controller, Camera, getControllerWorldLocation */ + + +(function() { // BEGIN LOCAL_SCOPE + +Script.include("/~/system/libraries/controllers.js"); + +var isShowingOverlays = true; +var debugOverlays = {}; + +function removeOverlays() { + // enumerate the overlays and remove them + var overlayKeys = Object.keys(debugOverlays); + + for (var i = 0; i < overlayKeys.length; ++i) { + var avatarID = overlayKeys[i]; + for (var j = 0; j < debugOverlays[avatarID].length; ++j) { + Overlays.deleteOverlay(debugOverlays[avatarID][j]); + } + } + + debugOverlays = {}; +} + +function updateOverlays() { + if (isShowingOverlays) { + + var identifiers = AvatarList.getAvatarIdentifiers(); + + for (var i = 0; i < identifiers.length; ++i) { + var avatarID = identifiers[i]; + + if (avatarID === null) { + // this is our avatar, skip it + continue; + } + + // get the position for this avatar + var avatar = AvatarList.getAvatar(avatarID); + var avatarPosition = avatar && avatar.position; + + if (!avatarPosition) { + // we don't have a valid position for this avatar, skip it + continue; + } + + // setup a position for the overlay that is just above this avatar's head + var overlayPosition = avatar.getJointPosition("Head"); + overlayPosition.y += 1.05; + + var text = " All: " + AvatarManager.getAvatarDataRate(avatarID).toFixed(2) + "\n" + +" GP: " + AvatarManager.getAvatarDataRate(avatarID,"globalPosition").toFixed(2) + "\n" + +" LP: " + AvatarManager.getAvatarDataRate(avatarID,"localPosition").toFixed(2) + "\n" + +" BB: " + AvatarManager.getAvatarDataRate(avatarID,"avatarBoundingBox").toFixed(2) + "\n" + +" AO: " + AvatarManager.getAvatarDataRate(avatarID,"avatarOrientation").toFixed(2) + "\n" + +" AS: " + AvatarManager.getAvatarDataRate(avatarID,"avatarScale").toFixed(2) + "\n" + +" LA: " + AvatarManager.getAvatarDataRate(avatarID,"lookAtPosition").toFixed(2) + "\n" + +" AL: " + AvatarManager.getAvatarDataRate(avatarID,"audioLoudness").toFixed(2) + "\n" + +" SW: " + AvatarManager.getAvatarDataRate(avatarID,"sensorToWorkMatrix").toFixed(2) + "\n" + +" AF: " + AvatarManager.getAvatarDataRate(avatarID,"additionalFlags").toFixed(2) + "\n" + +" PI: " + AvatarManager.getAvatarDataRate(avatarID,"parentInfo").toFixed(2) + "\n" + +" FT: " + AvatarManager.getAvatarDataRate(avatarID,"faceTracker").toFixed(2) + "\n" + +" JD: " + AvatarManager.getAvatarDataRate(avatarID,"jointData").toFixed(2); + + if (avatarID in debugOverlays) { + // keep the overlay above the current position of this avatar + Overlays.editOverlay(debugOverlays[avatarID][0], { + position: overlayPosition, + text: text + }); + } else { + // add the overlay above this avatar + var newOverlay = Overlays.addOverlay("text3d", { + position: overlayPosition, + dimensions: { + x: 1, + y: 13 * 0.13 + }, + lineHeight: 0.1, + font:{size:0.1}, + text: text, + size: 1, + scale: 0.4, + color: { red: 255, green: 255, blue: 255}, + alpha: 1, + solid: true, + isFacingAvatar: true, + drawInFront: true + }); + + debugOverlays[avatarID]=[newOverlay]; + } + } + } +} + +Script.update.connect(updateOverlays); + +AvatarList.avatarRemovedEvent.connect(function(avatarID){ + if (isShowingOverlays) { + // we are currently showing overlays and an avatar just went away + + // first remove the rendered overlays + for (var j = 0; j < debugOverlays[avatarID].length; ++j) { + Overlays.deleteOverlay(debugOverlays[avatarID][j]); + } + + // delete the saved ID of the overlay from our mod overlays object + delete debugOverlays[avatarID]; + } +}); + +// cleanup the toolbar button and overlays when script is stopped +Script.scriptEnding.connect(function() { + removeOverlays(); +}); + +}()); // END LOCAL_SCOPE