mirror of
https://github.com/overte-org/overte.git
synced 2025-04-20 12:04:18 +02:00
add datarate
This commit is contained in:
parent
636ade9eb7
commit
13198fd949
4 changed files with 79 additions and 67 deletions
|
@ -132,6 +132,11 @@ void AvatarManager::updateMyAvatar(float deltaTime) {
|
|||
|
||||
Q_LOGGING_CATEGORY(trace_simulation_avatar, "trace.simulation.avatar");
|
||||
|
||||
float AvatarManager::getAvatarDataRate(const QUuid& sessionID, const QString& rateName) {
|
||||
auto avatar = getAvatarBySessionID(sessionID);
|
||||
return avatar->getDataRate(rateName);
|
||||
}
|
||||
|
||||
void AvatarManager::updateOtherAvatars(float deltaTime) {
|
||||
// lock the hash for read to check the size
|
||||
QReadLocker lock(&_hashLock);
|
||||
|
|
|
@ -71,6 +71,7 @@ public:
|
|||
|
||||
void addAvatarToSimulation(Avatar* avatar);
|
||||
|
||||
Q_INVOKABLE float getAvatarDataRate(const QUuid& sessionID, const QString& rateName = QString(""));
|
||||
Q_INVOKABLE RayToAvatarIntersectionResult findRayIntersection(const PickRay& ray,
|
||||
const QScriptValue& avatarIdsToInclude = QScriptValue(),
|
||||
const QScriptValue& avatarIdsToDiscard = QScriptValue());
|
||||
|
|
|
@ -89,10 +89,6 @@ AvatarData::AvatarData() :
|
|||
ASSERT(sizeof(AvatarDataPacket::AdditionalFlags) == AvatarDataPacket::ADDITIONAL_FLAGS_SIZE);
|
||||
ASSERT(sizeof(AvatarDataPacket::ParentInfo) == AvatarDataPacket::PARENT_INFO_SIZE);
|
||||
ASSERT(sizeof(AvatarDataPacket::FaceTrackerInfo) == AvatarDataPacket::FACE_TRACKER_INFO_SIZE);
|
||||
|
||||
// Old format...
|
||||
ASSERT(sizeof(AvatarDataPacket::AvatarInfo) == AvatarDataPacket::AVATAR_INFO_SIZE);
|
||||
|
||||
}
|
||||
|
||||
AvatarData::~AvatarData() {
|
||||
|
@ -682,6 +678,8 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) {
|
|||
|
||||
AvatarDataPacket::HasFlags packetStateFlags;
|
||||
|
||||
_parseBufferRate.increment();
|
||||
|
||||
const unsigned char* startPosition = reinterpret_cast<const unsigned char*>(buffer.data());
|
||||
const unsigned char* endPosition = startPosition + buffer.size();
|
||||
const unsigned char* sourceBuffer = startPosition;
|
||||
|
@ -720,6 +718,8 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) {
|
|||
}
|
||||
sourceBuffer += sizeof(AvatarDataPacket::AvatarGlobalPosition);
|
||||
//qDebug() << "hasAvatarGlobalPosition _globalPosition:" << _globalPosition;
|
||||
|
||||
_globalPositionRate.increment();
|
||||
}
|
||||
|
||||
if (hasAvatarLocalPosition) {
|
||||
|
@ -735,6 +735,8 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) {
|
|||
setLocalPosition(position);
|
||||
sourceBuffer += sizeof(AvatarDataPacket::AvatarLocalPosition);
|
||||
//qDebug() << "hasAvatarLocalPosition position:" << position;
|
||||
|
||||
_localPositionRate.increment();
|
||||
}
|
||||
|
||||
if (hasAvatarDimensions) {
|
||||
|
@ -748,6 +750,8 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) {
|
|||
}
|
||||
sourceBuffer += sizeof(AvatarDataPacket::AvatarDimensions);
|
||||
//qDebug() << "hasAvatarDimensions _globalBoundingBoxCorner:" << _globalBoundingBoxCorner;
|
||||
|
||||
_avatarDimensionRate.increment();
|
||||
}
|
||||
|
||||
if (hasAvatarOrientation) {
|
||||
|
@ -773,6 +777,8 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) {
|
|||
}
|
||||
sourceBuffer += sizeof(AvatarDataPacket::AvatarOrientation);
|
||||
//qDebug() << "hasAvatarOrientation newOrientation:" << newOrientation;
|
||||
|
||||
_avatarOrientationRate.increment();
|
||||
}
|
||||
|
||||
if (hasAvatarScale) {
|
||||
|
@ -789,6 +795,8 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) {
|
|||
setTargetScale(scale);
|
||||
sourceBuffer += sizeof(AvatarDataPacket::AvatarScale);
|
||||
//qDebug() << "hasAvatarOrientation scale:" << scale;
|
||||
|
||||
_avatarScaleRate.increment();
|
||||
}
|
||||
|
||||
if (hasLookAtPosition) {
|
||||
|
@ -804,6 +812,8 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) {
|
|||
_headData->setLookAtPosition(lookAt);
|
||||
sourceBuffer += sizeof(AvatarDataPacket::LookAtPosition);
|
||||
//qDebug() << "hasLookAtPosition lookAt:" << lookAt;
|
||||
|
||||
_lookAtPositionRate.increment();
|
||||
}
|
||||
|
||||
if (hasAudioLoudness) {
|
||||
|
@ -821,6 +831,8 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) {
|
|||
_headData->setAudioLoudness(audioLoudness);
|
||||
sourceBuffer += sizeof(AvatarDataPacket::AudioLoudness);
|
||||
//qDebug() << "hasAudioLoudness audioLoudness:" << audioLoudness;
|
||||
|
||||
_audioLoudnessRate.increment();
|
||||
}
|
||||
|
||||
if (hasSensorToWorldMatrix) {
|
||||
|
@ -838,6 +850,8 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) {
|
|||
}
|
||||
sourceBuffer += sizeof(AvatarDataPacket::SensorToWorldMatrix);
|
||||
//qDebug() << "hasSensorToWorldMatrix sensorToWorldMatrix:" << sensorToWorldMatrix;
|
||||
|
||||
_sensorToWorldRate.increment();
|
||||
}
|
||||
|
||||
if (hasAdditionalFlags) {
|
||||
|
@ -878,6 +892,7 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) {
|
|||
if (somethingChanged) {
|
||||
_additionalFlagsChanged = usecTimestampNow();
|
||||
}
|
||||
_additionalFlagsRate.increment();
|
||||
}
|
||||
|
||||
// FIXME -- make sure to handle the existance of a parent vs a change in the parent...
|
||||
|
@ -897,6 +912,7 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) {
|
|||
_parentChanged = usecTimestampNow();
|
||||
}
|
||||
|
||||
_parentInfoRate.increment();
|
||||
} else {
|
||||
// FIXME - this aint totally right, for switching to parent/no-parent
|
||||
_parentID = QUuid();
|
||||
|
@ -919,6 +935,8 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) {
|
|||
memcpy(_headData->_blendshapeCoefficients.data(), sourceBuffer, coefficientsSize);
|
||||
sourceBuffer += coefficientsSize;
|
||||
//qDebug() << "hasFaceTrackerInfo numCoefficients:" << numCoefficients;
|
||||
|
||||
_faceTrackerRate.increment();
|
||||
}
|
||||
|
||||
if (hasJointData) {
|
||||
|
@ -1011,6 +1029,7 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) {
|
|||
|
||||
//qDebug() << "hasJointData numValidJointRotations:" << numValidJointRotations << "numValidJointTranslations:" << numValidJointTranslations;
|
||||
|
||||
_jointDataRate.increment();
|
||||
}
|
||||
|
||||
int numBytesRead = sourceBuffer - startPosition;
|
||||
|
@ -1018,6 +1037,38 @@ int AvatarData::parseDataFromBuffer(const QByteArray& buffer) {
|
|||
return numBytesRead;
|
||||
}
|
||||
|
||||
float AvatarData::getDataRate(const QString& rateName) {
|
||||
if (rateName == "") {
|
||||
return _parseBufferRate.rate();
|
||||
} else if (rateName == "globalPosition") {
|
||||
return _globalPositionRate.rate();
|
||||
} else if (rateName == "localPosition") {
|
||||
return _localPositionRate.rate();
|
||||
} else if (rateName == "avatarDimensions") {
|
||||
return _avatarDimensionRate.rate();
|
||||
} else if (rateName == "avatarOrientation") {
|
||||
return _avatarOrientationRate.rate();
|
||||
} else if (rateName == "avatarScale") {
|
||||
return _avatarScaleRate.rate();
|
||||
} else if (rateName == "lookAtPosition") {
|
||||
return _lookAtPositionRate.rate();
|
||||
} else if (rateName == "audioLoudness") {
|
||||
return _audioLoudnessRate.rate();
|
||||
} else if (rateName == "sensorToWorkMatrix") {
|
||||
return _sensorToWorldRate.rate();
|
||||
} else if (rateName == "additionalFlags") {
|
||||
return _additionalFlagsRate.rate();
|
||||
} else if (rateName == "parentInfo") {
|
||||
return _parentInfoRate.rate();
|
||||
} else if (rateName == "faceTracker") {
|
||||
return _faceTrackerRate.rate();
|
||||
} else if (rateName == "jointData") {
|
||||
return _jointDataRate.rate();
|
||||
}
|
||||
return 0.0f;
|
||||
}
|
||||
|
||||
|
||||
int AvatarData::getAverageBytesReceivedPerSecond() const {
|
||||
return lrint(_averageBytesReceived.getAverageSampleValuePerSecond());
|
||||
}
|
||||
|
|
|
@ -56,6 +56,7 @@ typedef unsigned long long quint64;
|
|||
#include <Packed.h>
|
||||
#include <ThreadSafeValueCache.h>
|
||||
#include <SharedUtil.h>
|
||||
#include <shared/RateCounter.h>
|
||||
|
||||
#include "AABox.h"
|
||||
#include "HeadData.h"
|
||||
|
@ -250,66 +251,6 @@ namespace AvatarDataPacket {
|
|||
SixByteTrans translation[numValidTranslations]; // encodeded and compressed by packFloatVec3ToSignedTwoByteFixed()
|
||||
};
|
||||
*/
|
||||
|
||||
// OLD FORMAT....
|
||||
PACKED_BEGIN struct AvatarInfo {
|
||||
// FIXME - this has 8 unqiue items, we could use a simple header byte to indicate whether or not the fields
|
||||
// exist in the packet and have changed since last being sent.
|
||||
float globalPosition[3]; // avatar's position
|
||||
// FIXME - possible savings:
|
||||
// a) could be encoded as relative to last known position, most movements
|
||||
// will be withing a smaller radix
|
||||
// b) would still need an intermittent absolute value.
|
||||
|
||||
float position[3]; // skeletal model's position
|
||||
// FIXME - this used to account for a registration offset from the avatar's position
|
||||
// to the position of the skeletal model/mesh. This relative offset doesn't change from
|
||||
// frame to frame, instead only changes when the model changes, it could be moved to the
|
||||
// identity packet and/or only included when it changes.
|
||||
// if it's encoded relative to the globalPosition, it could be reduced to a smaller radix
|
||||
//
|
||||
// POTENTIAL SAVINGS - 12 bytes
|
||||
|
||||
float globalBoundingBoxCorner[3]; // global position of the lowest corner of the avatar's bounding box
|
||||
// FIXME - this would change less frequently if it was the dimensions of the bounding box
|
||||
// instead of the corner.
|
||||
//
|
||||
// POTENTIAL SAVINGS - 12 bytes
|
||||
|
||||
uint16_t localOrientation[3]; // avatar's local euler angles (degrees, compressed) relative to the thing it's attached to
|
||||
uint16_t scale; // (compressed) 'ratio' encoding uses sign bit as flag.
|
||||
// FIXME - this doesn't change every frame
|
||||
//
|
||||
// POTENTIAL SAVINGS - 2 bytes
|
||||
|
||||
float lookAtPosition[3]; // world space position that eyes are focusing on.
|
||||
// FIXME - unless the person has an eye tracker, this is simulated...
|
||||
// a) maybe we can just have the client calculate this
|
||||
// b) at distance this will be hard to discern and can likely be
|
||||
// descimated or dropped completely
|
||||
//
|
||||
// POTENTIAL SAVINGS - 12 bytes
|
||||
|
||||
uint16_t audioLoudness; // current loundess of microphone
|
||||
// FIXME -
|
||||
// a) this could probably be decimated with a smaller radix <<< DONE
|
||||
// b) this doesn't change every frame
|
||||
//
|
||||
// POTENTIAL SAVINGS - 4-2 bytes
|
||||
|
||||
// FIXME - these 20 bytes are only used by viewers if my avatar has "attachments"
|
||||
// we could save these bytes if no attachments are active.
|
||||
//
|
||||
// POTENTIAL SAVINGS - 20 bytes
|
||||
|
||||
uint8_t sensorToWorldQuat[6]; // 6 byte compressed quaternion part of sensor to world matrix
|
||||
uint16_t sensorToWorldScale; // uniform scale of sensor to world matrix
|
||||
float sensorToWorldTrans[3]; // fourth column of sensor to world matrix
|
||||
// FIXME - sensorToWorldTrans might be able to be better compressed if it was
|
||||
// relative to the avatar position.
|
||||
uint8_t flags;
|
||||
} PACKED_END;
|
||||
const size_t AVATAR_INFO_SIZE = 79;
|
||||
}
|
||||
|
||||
static const float MAX_AVATAR_SCALE = 1000.0f;
|
||||
|
@ -594,6 +535,8 @@ public:
|
|||
Q_INVOKABLE glm::mat4 getControllerLeftHandMatrix() const;
|
||||
Q_INVOKABLE glm::mat4 getControllerRightHandMatrix() const;
|
||||
|
||||
float getDataRate(const QString& rateName = QString(""));
|
||||
|
||||
public slots:
|
||||
void sendAvatarDataPacket();
|
||||
void sendIdentityPacket();
|
||||
|
@ -696,7 +639,21 @@ protected:
|
|||
quint64 _parentChanged { 0 };
|
||||
|
||||
quint64 _lastToByteArray { 0 }; // tracks the last time we did a toByteArray
|
||||
|
||||
|
||||
// Some rate data for incoming data
|
||||
RateCounter<> _parseBufferRate;
|
||||
RateCounter<> _globalPositionRate;
|
||||
RateCounter<> _localPositionRate;
|
||||
RateCounter<> _avatarDimensionRate;
|
||||
RateCounter<> _avatarOrientationRate;
|
||||
RateCounter<> _avatarScaleRate;
|
||||
RateCounter<> _lookAtPositionRate;
|
||||
RateCounter<> _audioLoudnessRate;
|
||||
RateCounter<> _sensorToWorldRate;
|
||||
RateCounter<> _additionalFlagsRate;
|
||||
RateCounter<> _parentInfoRate;
|
||||
RateCounter<> _faceTrackerRate;
|
||||
RateCounter<> _jointDataRate;
|
||||
|
||||
glm::vec3 _globalBoundingBoxCorner;
|
||||
|
||||
|
@ -713,8 +670,6 @@ protected:
|
|||
|
||||
int getFauxJointIndex(const QString& name) const;
|
||||
|
||||
AvatarDataPacket::AvatarInfo _lastAvatarInfo;
|
||||
|
||||
private:
|
||||
friend void avatarStateFromFrame(const QByteArray& frameData, AvatarData* _avatar);
|
||||
static QUrl _defaultFullAvatarModelUrl;
|
||||
|
|
Loading…
Reference in a new issue