Merge remote-tracking branch 'upstream/master' into smarter_textures

This commit is contained in:
Brad Davis 2017-02-23 17:22:03 -08:00
commit 61e341db75
29 changed files with 620 additions and 417 deletions

View file

@ -47,7 +47,7 @@ AvatarMixer::AvatarMixer(ReceivedMessage& message) :
auto& packetReceiver = DependencyManager::get<NodeList>()->getPacketReceiver();
packetReceiver.registerListener(PacketType::AvatarData, this, "queueIncomingPacket");
packetReceiver.registerListener(PacketType::AdjustAvatarSorting, this, "handleAdjustAvatarSorting");
packetReceiver.registerListener(PacketType::ViewFrustum, this, "handleViewFrustumPacket");
packetReceiver.registerListener(PacketType::AvatarIdentity, this, "handleAvatarIdentityPacket");
packetReceiver.registerListener(PacketType::KillAvatar, this, "handleKillAvatarPacket");
@ -309,7 +309,7 @@ void AvatarMixer::nodeKilled(SharedNodePointer killedNode) {
},
[&](const SharedNodePointer& node) {
QMetaObject::invokeMethod(node->getLinkedData(),
"removeLastBroadcastSequenceNumber",
"cleanupKilledNode",
Qt::AutoConnection,
Q_ARG(const QUuid&, QUuid(killedNode->getUUID())));
}
@ -317,6 +317,27 @@ void AvatarMixer::nodeKilled(SharedNodePointer killedNode) {
}
}
void AvatarMixer::handleAdjustAvatarSorting(QSharedPointer<ReceivedMessage> message, SharedNodePointer senderNode) {
auto start = usecTimestampNow();
// only allow admins with kick rights to change this value...
if (senderNode->getCanKick()) {
message->readPrimitive(&AvatarData::_avatarSortCoefficientSize);
message->readPrimitive(&AvatarData::_avatarSortCoefficientCenter);
message->readPrimitive(&AvatarData::_avatarSortCoefficientAge);
qCDebug(avatars) << "New avatar sorting... "
<< "size:" << AvatarData::_avatarSortCoefficientSize
<< "center:" << AvatarData::_avatarSortCoefficientCenter
<< "age:" << AvatarData::_avatarSortCoefficientAge;
}
auto end = usecTimestampNow();
_handleAdjustAvatarSortingElapsedTime += (end - start);
}
void AvatarMixer::handleViewFrustumPacket(QSharedPointer<ReceivedMessage> message, SharedNodePointer senderNode) {
auto start = usecTimestampNow();
getOrCreateClientData(senderNode);
@ -485,6 +506,9 @@ void AvatarMixer::sendStatsPacket() {
float averageOthersIncluded = averageNodes ? stats.numOthersIncluded / averageNodes : 0.0f;
slaveObject["sent_6_averageOthersIncluded"] = TIGHT_LOOP_STAT(averageOthersIncluded);
float averageOverBudgetAvatars = averageNodes ? stats.overBudgetAvatars / averageNodes : 0.0f;
slaveObject["sent_7_averageOverBudgetAvatars"] = TIGHT_LOOP_STAT(averageOverBudgetAvatars);
slaveObject["timing_1_processIncomingPackets"] = TIGHT_LOOP_STAT_UINT64(stats.processIncomingPacketsElapsedTime);
slaveObject["timing_2_ignoreCalculation"] = TIGHT_LOOP_STAT_UINT64(stats.ignoreCalculationElapsedTime);
slaveObject["timing_3_toByteArray"] = TIGHT_LOOP_STAT_UINT64(stats.toByteArrayElapsedTime);
@ -514,7 +538,10 @@ void AvatarMixer::sendStatsPacket() {
float averageOthersIncluded = averageNodes ? aggregateStats.numOthersIncluded / averageNodes : 0.0f;
slavesAggregatObject["sent_6_averageOthersIncluded"] = TIGHT_LOOP_STAT(averageOthersIncluded);
float averageOverBudgetAvatars = averageNodes ? aggregateStats.overBudgetAvatars / averageNodes : 0.0f;
slavesAggregatObject["sent_7_averageOverBudgetAvatars"] = TIGHT_LOOP_STAT(averageOverBudgetAvatars);
slavesAggregatObject["timing_1_processIncomingPackets"] = TIGHT_LOOP_STAT_UINT64(aggregateStats.processIncomingPacketsElapsedTime);
slavesAggregatObject["timing_2_ignoreCalculation"] = TIGHT_LOOP_STAT_UINT64(aggregateStats.ignoreCalculationElapsedTime);
slavesAggregatObject["timing_3_toByteArray"] = TIGHT_LOOP_STAT_UINT64(aggregateStats.toByteArrayElapsedTime);

View file

@ -39,6 +39,7 @@ public slots:
private slots:
void queueIncomingPacket(QSharedPointer<ReceivedMessage> message, SharedNodePointer node);
void handleAdjustAvatarSorting(QSharedPointer<ReceivedMessage> message, SharedNodePointer senderNode);
void handleViewFrustumPacket(QSharedPointer<ReceivedMessage> message, SharedNodePointer senderNode);
void handleAvatarIdentityPacket(QSharedPointer<ReceivedMessage> message, SharedNodePointer senderNode);
void handleKillAvatarPacket(QSharedPointer<ReceivedMessage> message);
@ -92,6 +93,7 @@ private:
quint64 _broadcastAvatarDataNodeTransform { 0 };
quint64 _broadcastAvatarDataNodeFunctor { 0 };
quint64 _handleAdjustAvatarSortingElapsedTime { 0 };
quint64 _handleViewFrustumPacketElapsedTime { 0 };
quint64 _handleAvatarIdentityPacketElapsedTime { 0 };
quint64 _handleKillAvatarPacketElapsedTime { 0 };

View file

@ -74,14 +74,22 @@ bool AvatarMixerClientData::checkAndSetHasReceivedFirstPacketsFrom(const QUuid&
return true;
}
uint64_t AvatarMixerClientData::getLastBroadcastTime(const QUuid& nodeUUID) const {
// return the matching PacketSequenceNumber, or the default if we don't have it
auto nodeMatch = _lastBroadcastTimes.find(nodeUUID);
if (nodeMatch != _lastBroadcastTimes.end()) {
return nodeMatch->second;
}
return 0;
}
uint16_t AvatarMixerClientData::getLastBroadcastSequenceNumber(const QUuid& nodeUUID) const {
// return the matching PacketSequenceNumber, or the default if we don't have it
auto nodeMatch = _lastBroadcastSequenceNumbers.find(nodeUUID);
if (nodeMatch != _lastBroadcastSequenceNumbers.end()) {
return nodeMatch->second;
} else {
return 0;
}
return 0;
}
void AvatarMixerClientData::ignoreOther(SharedNodePointer self, SharedNodePointer other) {
@ -118,8 +126,6 @@ bool AvatarMixerClientData::otherAvatarInView(const AABox& otherAvatarBox) {
void AvatarMixerClientData::loadJSONStats(QJsonObject& jsonObject) const {
jsonObject["display_name"] = _avatar->getDisplayName();
jsonObject["full_rate_distance"] = _fullRateDistance;
jsonObject["max_av_distance"] = _maxAvatarDistance;
jsonObject["num_avs_sent_last_frame"] = _numAvatarsSentLastFrame;
jsonObject["avg_other_av_starves_per_second"] = getAvgNumOtherAvatarStarvesPerSecond();
jsonObject["avg_other_av_skips_per_second"] = getAvgNumOtherAvatarSkipsPerSecond();

View file

@ -43,6 +43,7 @@ public:
int parseData(ReceivedMessage& message) override;
AvatarData& getAvatar() { return *_avatar; }
const AvatarData* getConstAvatarData() const { return _avatar.get(); }
AvatarSharedPointer getAvatarSharedPointer() const { return _avatar; }
bool checkAndSetHasReceivedFirstPacketsFrom(const QUuid& uuid);
@ -51,6 +52,15 @@ public:
{ _lastBroadcastSequenceNumbers[nodeUUID] = sequenceNumber; }
Q_INVOKABLE void removeLastBroadcastSequenceNumber(const QUuid& nodeUUID) { _lastBroadcastSequenceNumbers.erase(nodeUUID); }
uint64_t getLastBroadcastTime(const QUuid& nodeUUID) const;
void setLastBroadcastTime(const QUuid& nodeUUID, uint64_t broadcastTime) { _lastBroadcastTimes[nodeUUID] = broadcastTime; }
Q_INVOKABLE void removeLastBroadcastTime(const QUuid& nodeUUID) { _lastBroadcastTimes.erase(nodeUUID); }
Q_INVOKABLE void cleanupKilledNode(const QUuid& nodeUUID) {
removeLastBroadcastSequenceNumber(nodeUUID);
removeLastBroadcastTime(nodeUUID);
}
uint16_t getLastReceivedSequenceNumber() const { return _lastReceivedSequenceNumber; }
HRCTime getIdentityChangeTimestamp() const { return _identityChangeTimestamp; }
@ -58,12 +68,6 @@ public:
bool getAvatarSessionDisplayNameMustChange() const { return _avatarSessionDisplayNameMustChange; }
void setAvatarSessionDisplayNameMustChange(bool set = true) { _avatarSessionDisplayNameMustChange = set; }
void setFullRateDistance(float fullRateDistance) { _fullRateDistance = fullRateDistance; }
float getFullRateDistance() const { return _fullRateDistance; }
void setMaxAvatarDistance(float maxAvatarDistance) { _maxAvatarDistance = maxAvatarDistance; }
float getMaxAvatarDistance() const { return _maxAvatarDistance; }
void resetNumAvatarsSentLastFrame() { _numAvatarsSentLastFrame = 0; }
void incrementNumAvatarsSentLastFrame() { ++_numAvatarsSentLastFrame; }
int getNumAvatarsSentLastFrame() const { return _numAvatarsSentLastFrame; }
@ -106,6 +110,8 @@ public:
bool getRequestsDomainListData() { return _requestsDomainListData; }
void setRequestsDomainListData(bool requesting) { _requestsDomainListData = requesting; }
ViewFrustum getViewFrustom() const { return _currentViewFrustum; }
quint64 getLastOtherAvatarEncodeTime(QUuid otherAvatar) {
quint64 result = 0;
if (_lastOtherAvatarEncodeTime.find(otherAvatar) != _lastOtherAvatarEncodeTime.end()) {
@ -134,6 +140,7 @@ private:
uint16_t _lastReceivedSequenceNumber { 0 };
std::unordered_map<QUuid, uint16_t> _lastBroadcastSequenceNumbers;
std::unordered_set<QUuid> _hasReceivedFirstPacketsFrom;
std::unordered_map<QUuid, uint64_t> _lastBroadcastTimes;
// this is a map of the last time we encoded an "other" avatar for
// sending to "this" node
@ -143,9 +150,6 @@ private:
HRCTime _identityChangeTimestamp;
bool _avatarSessionDisplayNameMustChange{ false };
float _fullRateDistance = FLT_MAX;
float _maxAvatarDistance = FLT_MAX;
int _numAvatarsSentLastFrame = 0;
int _numFramesSinceAdjustment = 0;

View file

@ -66,24 +66,20 @@ void AvatarMixerSlave::processIncomingPackets(const SharedNodePointer& node) {
}
void AvatarMixerSlave::sendIdentityPacket(const AvatarMixerClientData* nodeData, const SharedNodePointer& destinationNode) {
int AvatarMixerSlave::sendIdentityPacket(const AvatarMixerClientData* nodeData, const SharedNodePointer& destinationNode) {
int bytesSent = 0;
QByteArray individualData = nodeData->getConstAvatarData()->identityByteArray();
auto identityPacket = NLPacket::create(PacketType::AvatarIdentity, individualData.size());
individualData.replace(0, NUM_BYTES_RFC4122_UUID, nodeData->getNodeID().toRfc4122());
individualData.replace(0, NUM_BYTES_RFC4122_UUID, nodeData->getNodeID().toRfc4122()); // FIXME, this looks suspicious
bytesSent += individualData.size();
identityPacket->write(individualData);
DependencyManager::get<NodeList>()->sendPacket(std::move(identityPacket), *destinationNode);
_stats.numIdentityPackets++;
return bytesSent;
}
static const int AVATAR_MIXER_BROADCAST_FRAMES_PER_SECOND = 45;
// only send extra avatar data (avatars out of view, ignored) every Nth AvatarData frame
// Extra avatar data will be sent (AVATAR_MIXER_BROADCAST_FRAMES_PER_SECOND/EXTRA_AVATAR_DATA_FRAME_RATIO) times
// per second.
// This value should be a power of two for performance purposes, as the mixer performs a modulo operation every frame
// to determine whether the extra data should be sent.
static const int EXTRA_AVATAR_DATA_FRAME_RATIO = 16;
// FIXME - There is some old logic (unchanged as of 2/17/17) that randomly decides to send an identity
// packet. That logic had the following comment about the constants it uses...
//
@ -117,9 +113,6 @@ void AvatarMixerSlave::broadcastAvatarData(const SharedNodePointer& node) {
// reset the internal state for correct random number distribution
distribution.reset();
// reset the max distance for this frame
float maxAvatarDistanceThisFrame = 0.0f;
// reset the number of sent avatars
nodeData->resetNumAvatarsSentLastFrame();
@ -128,6 +121,15 @@ void AvatarMixerSlave::broadcastAvatarData(const SharedNodePointer& node) {
// keep track of outbound data rate specifically for avatar data
int numAvatarDataBytes = 0;
int identityBytesSent = 0;
// max number of avatarBytes per frame
auto maxAvatarBytesPerFrame = (_maxKbpsPerNode * BYTES_PER_KILOBIT) / AVATAR_MIXER_BROADCAST_FRAMES_PER_SECOND;
// FIXME - find a way to not send the sessionID for every avatar
int minimumBytesPerAvatar = AvatarDataPacket::AVATAR_HAS_FLAGS_SIZE + NUM_BYTES_RFC4122_UUID;
int overBudgetAvatars = 0;
// keep track of the number of other avatars held back in this frame
int numAvatarsHeldBack = 0;
@ -135,9 +137,6 @@ void AvatarMixerSlave::broadcastAvatarData(const SharedNodePointer& node) {
// keep track of the number of other avatar frames skipped
int numAvatarsWithSkippedFrames = 0;
// use the data rate specifically for avatar data for FRD adjustment checks
float avatarDataRateLastSecond = nodeData->getOutboundAvatarDataKbps();
// When this is true, the AvatarMixer will send Avatar data to a client about avatars that are not in the view frustrum
bool getsOutOfView = nodeData->getRequestsDomainListData();
@ -147,267 +146,264 @@ void AvatarMixerSlave::broadcastAvatarData(const SharedNodePointer& node) {
// When this is true, the AvatarMixer will send Avatar data to a client about avatars that have ignored them
bool getsAnyIgnored = getsIgnoredByMe && node->getCanKick();
// Check if it is time to adjust what we send this client based on the observed
// bandwidth to this node. We do this once a second, which is also the window for
// the bandwidth reported by node->getOutboundBandwidth();
if (nodeData->getNumFramesSinceFRDAdjustment() > AVATAR_MIXER_BROADCAST_FRAMES_PER_SECOND) {
const float FRD_ADJUSTMENT_ACCEPTABLE_RATIO = 0.8f;
const float HYSTERISIS_GAP = (1 - FRD_ADJUSTMENT_ACCEPTABLE_RATIO);
const float HYSTERISIS_MIDDLE_PERCENTAGE = (1 - (HYSTERISIS_GAP * 0.5f));
// get the current full rate distance so we can work with it
float currentFullRateDistance = nodeData->getFullRateDistance();
if (avatarDataRateLastSecond > _maxKbpsPerNode) {
// is the FRD greater than the farthest avatar?
// if so, before we calculate anything, set it to that distance
currentFullRateDistance = std::min(currentFullRateDistance, nodeData->getMaxAvatarDistance());
// we're adjusting the full rate distance to target a bandwidth in the middle
// of the hysterisis gap
currentFullRateDistance *= (_maxKbpsPerNode * HYSTERISIS_MIDDLE_PERCENTAGE) / avatarDataRateLastSecond;
nodeData->setFullRateDistance(currentFullRateDistance);
nodeData->resetNumFramesSinceFRDAdjustment();
} else if (currentFullRateDistance < nodeData->getMaxAvatarDistance()
&& avatarDataRateLastSecond < _maxKbpsPerNode * FRD_ADJUSTMENT_ACCEPTABLE_RATIO) {
// we are constrained AND we've recovered to below the acceptable ratio
// lets adjust the full rate distance to target a bandwidth in the middle of the hyterisis gap
currentFullRateDistance *= (_maxKbpsPerNode * HYSTERISIS_MIDDLE_PERCENTAGE) / avatarDataRateLastSecond;
nodeData->setFullRateDistance(currentFullRateDistance);
nodeData->resetNumFramesSinceFRDAdjustment();
}
} else {
nodeData->incrementNumFramesSinceFRDAdjustment();
}
// setup a PacketList for the avatarPackets
auto avatarPacketList = NLPacketList::create(PacketType::BulkAvatarData);
// this is an AGENT we have received head data from
// send back a packet with other active node data to this node
// Define the minimum bubble size
static const glm::vec3 minBubbleSize = glm::vec3(0.3f, 1.3f, 0.3f);
// Define the scale of the box for the current node
glm::vec3 nodeBoxScale = (nodeData->getPosition() - nodeData->getGlobalBoundingBoxCorner()) * 2.0f;
// Set up the bounding box for the current node
AABox nodeBox(nodeData->getGlobalBoundingBoxCorner(), nodeBoxScale);
// Clamp the size of the bounding box to a minimum scale
if (glm::any(glm::lessThan(nodeBoxScale, minBubbleSize))) {
nodeBox.setScaleStayCentered(minBubbleSize);
}
// Quadruple the scale of both bounding boxes
nodeBox.embiggen(4.0f);
// setup list of AvatarData as well as maps to map betweeen the AvatarData and the original nodes
// for calling the AvatarData::sortAvatars() function and getting our sorted list of client nodes
QList<AvatarSharedPointer> avatarList;
std::unordered_map<AvatarSharedPointer, SharedNodePointer> avatarDataToNodes;
int listItem = 0;
std::for_each(_begin, _end, [&](const SharedNodePointer& otherNode) {
const AvatarMixerClientData* otherNodeData = reinterpret_cast<const AvatarMixerClientData*>(otherNode->getLinkedData());
bool shouldConsider = false;
quint64 startIgnoreCalculation = usecTimestampNow();
// make sure we have data for this avatar, that it isn't the same node,
// and isn't an avatar that the viewing node has ignored
// or that has ignored the viewing node
if (!otherNode->getLinkedData()
|| otherNode->getUUID() == node->getUUID()
|| (node->isIgnoringNodeWithID(otherNode->getUUID()) && !getsIgnoredByMe)
|| (otherNode->isIgnoringNodeWithID(node->getUUID()) && !getsAnyIgnored)) {
shouldConsider = false;
} else {
const AvatarMixerClientData* otherData = reinterpret_cast<AvatarMixerClientData*>(otherNode->getLinkedData());
shouldConsider = true; // assume we will consider...
// Check to see if the space bubble is enabled
if (node->isIgnoreRadiusEnabled() || otherNode->isIgnoreRadiusEnabled()) {
// Define the minimum bubble size
static const glm::vec3 minBubbleSize = glm::vec3(0.3f, 1.3f, 0.3f);
// Define the scale of the box for the current node
glm::vec3 nodeBoxScale = (nodeData->getPosition() - nodeData->getGlobalBoundingBoxCorner()) * 2.0f;
// Define the scale of the box for the current other node
glm::vec3 otherNodeBoxScale = (otherData->getPosition() - otherData->getGlobalBoundingBoxCorner()) * 2.0f;
// Set up the bounding box for the current node
AABox nodeBox(nodeData->getGlobalBoundingBoxCorner(), nodeBoxScale);
// Clamp the size of the bounding box to a minimum scale
if (glm::any(glm::lessThan(nodeBoxScale, minBubbleSize))) {
nodeBox.setScaleStayCentered(minBubbleSize);
}
// Set up the bounding box for the current other node
AABox otherNodeBox(otherData->getGlobalBoundingBoxCorner(), otherNodeBoxScale);
// Clamp the size of the bounding box to a minimum scale
if (glm::any(glm::lessThan(otherNodeBoxScale, minBubbleSize))) {
otherNodeBox.setScaleStayCentered(minBubbleSize);
}
// Quadruple the scale of both bounding boxes
nodeBox.embiggen(4.0f);
otherNodeBox.embiggen(4.0f);
// Perform the collision check between the two bounding boxes
if (nodeBox.touches(otherNodeBox)) {
nodeData->ignoreOther(node, otherNode);
shouldConsider = getsAnyIgnored;
}
}
// Not close enough to ignore
if (shouldConsider) {
nodeData->removeFromRadiusIgnoringSet(node, otherNode->getUUID());
}
quint64 endIgnoreCalculation = usecTimestampNow();
_stats.ignoreCalculationElapsedTime += (endIgnoreCalculation - startIgnoreCalculation);
}
if (shouldConsider) {
quint64 startAvatarDataPacking = usecTimestampNow();
++numOtherAvatars;
const AvatarMixerClientData* otherNodeData = reinterpret_cast<const AvatarMixerClientData*>(otherNode->getLinkedData());
// make sure we send out identity packets to and from new arrivals.
bool forceSend = !nodeData->checkAndSetHasReceivedFirstPacketsFrom(otherNode->getUUID());
// FIXME - this clause seems suspicious "... || otherNodeData->getIdentityChangeTimestamp() > _lastFrameTimestamp ..."
if (otherNodeData->getIdentityChangeTimestamp().time_since_epoch().count() > 0
&& (forceSend
|| otherNodeData->getIdentityChangeTimestamp() > _lastFrameTimestamp
|| distribution(generator) < IDENTITY_SEND_PROBABILITY)) {
sendIdentityPacket(otherNodeData, node);
}
const AvatarData* otherAvatar = otherNodeData->getConstAvatarData();
// Decide whether to send this avatar's data based on it's distance from us
// The full rate distance is the distance at which EVERY update will be sent for this avatar
// at twice the full rate distance, there will be a 50% chance of sending this avatar's update
glm::vec3 otherPosition = otherAvatar->getClientGlobalPosition();
float distanceToAvatar = glm::length(myPosition - otherPosition);
// potentially update the max full rate distance for this frame
maxAvatarDistanceThisFrame = std::max(maxAvatarDistanceThisFrame, distanceToAvatar);
// This code handles the random dropping of avatar data based on the ratio of
// "getFullRateDistance" to actual distance.
//
// NOTE: If the recieving node is in "PAL mode" then it's asked to get things even that
// are out of view, this also appears to disable this random distribution.
if (distanceToAvatar != 0.0f
&& !getsOutOfView
&& distribution(generator) > (nodeData->getFullRateDistance() / distanceToAvatar)) {
quint64 endAvatarDataPacking = usecTimestampNow();
_stats.avatarDataPackingElapsedTime += (endAvatarDataPacking - startAvatarDataPacking);
shouldConsider = false;
}
if (shouldConsider) {
AvatarDataSequenceNumber lastSeqToReceiver = nodeData->getLastBroadcastSequenceNumber(otherNode->getUUID());
AvatarDataSequenceNumber lastSeqFromSender = otherNodeData->getLastReceivedSequenceNumber();
// FIXME - This code does appear to be working. But it seems brittle.
// It supports determining if the frame of data for this "other"
// avatar has already been sent to the reciever. This has been
// verified to work on a desktop display that renders at 60hz and
// therefore sends to mixer at 30hz. Each second you'd expect to
// have 15 (45hz-30hz) duplicate frames. In this case, the stat
// avg_other_av_skips_per_second does report 15.
//
// make sure we haven't already sent this data from this sender to this receiver
// or that somehow we haven't sent
if (lastSeqToReceiver == lastSeqFromSender && lastSeqToReceiver != 0) {
++numAvatarsHeldBack;
quint64 endAvatarDataPacking = usecTimestampNow();
_stats.avatarDataPackingElapsedTime += (endAvatarDataPacking - startAvatarDataPacking);
shouldConsider = false;
} else if (lastSeqFromSender - lastSeqToReceiver > 1) {
// this is a skip - we still send the packet but capture the presence of the skip so we see it happening
++numAvatarsWithSkippedFrames;
}
// we're going to send this avatar
if (shouldConsider) {
// determine if avatar is in view, to determine how much data to include...
glm::vec3 otherNodeBoxScale = (otherPosition - otherNodeData->getGlobalBoundingBoxCorner()) * 2.0f;
AABox otherNodeBox(otherNodeData->getGlobalBoundingBoxCorner(), otherNodeBoxScale);
bool isInView = nodeData->otherAvatarInView(otherNodeBox);
// this throttles the extra data to only be sent every Nth message
if (!isInView && !getsOutOfView && (lastSeqToReceiver % EXTRA_AVATAR_DATA_FRAME_RATIO > 0)) {
quint64 endAvatarDataPacking = usecTimestampNow();
_stats.avatarDataPackingElapsedTime += (endAvatarDataPacking - startAvatarDataPacking);
shouldConsider = false;
}
if (shouldConsider) {
// start a new segment in the PacketList for this avatar
avatarPacketList->startSegment();
AvatarData::AvatarDataDetail detail;
if (!isInView && !getsOutOfView) {
detail = AvatarData::MinimumData;
nodeData->incrementAvatarOutOfView();
} else {
detail = distribution(generator) < AVATAR_SEND_FULL_UPDATE_RATIO
? AvatarData::SendAllData : AvatarData::CullSmallData;
nodeData->incrementAvatarInView();
}
{
bool includeThisAvatar = true;
auto lastEncodeForOther = nodeData->getLastOtherAvatarEncodeTime(otherNode->getUUID());
QVector<JointData>& lastSentJointsForOther = nodeData->getLastOtherAvatarSentJoints(otherNode->getUUID());
bool distanceAdjust = true;
glm::vec3 viewerPosition = myPosition;
AvatarDataPacket::HasFlags hasFlagsOut; // the result of the toByteArray
bool dropFaceTracking = false;
quint64 start = usecTimestampNow();
QByteArray bytes = otherAvatar->toByteArray(detail, lastEncodeForOther, lastSentJointsForOther,
hasFlagsOut, dropFaceTracking, distanceAdjust, viewerPosition, &lastSentJointsForOther);
quint64 end = usecTimestampNow();
_stats.toByteArrayElapsedTime += (end - start);
static const int MAX_ALLOWED_AVATAR_DATA = (1400 - NUM_BYTES_RFC4122_UUID);
if (bytes.size() > MAX_ALLOWED_AVATAR_DATA) {
qCWarning(avatars) << "otherAvatar.toByteArray() resulted in very large buffer:" << bytes.size() << "... attempt to drop facial data";
dropFaceTracking = true; // first try dropping the facial data
bytes = otherAvatar->toByteArray(detail, lastEncodeForOther, lastSentJointsForOther,
hasFlagsOut, dropFaceTracking, distanceAdjust, viewerPosition, &lastSentJointsForOther);
if (bytes.size() > MAX_ALLOWED_AVATAR_DATA) {
qCWarning(avatars) << "otherAvatar.toByteArray() without facial data resulted in very large buffer:" << bytes.size() << "... reduce to MinimumData";
bytes = otherAvatar->toByteArray(AvatarData::MinimumData, lastEncodeForOther, lastSentJointsForOther,
hasFlagsOut, dropFaceTracking, distanceAdjust, viewerPosition, &lastSentJointsForOther);
}
if (bytes.size() > MAX_ALLOWED_AVATAR_DATA) {
qCWarning(avatars) << "otherAvatar.toByteArray() MinimumData resulted in very large buffer:" << bytes.size() << "... FAIL!!";
includeThisAvatar = false;
}
}
if (includeThisAvatar) {
numAvatarDataBytes += avatarPacketList->write(otherNode->getUUID().toRfc4122());
numAvatarDataBytes += avatarPacketList->write(bytes);
_stats.numOthersIncluded++;
// increment the number of avatars sent to this reciever
nodeData->incrementNumAvatarsSentLastFrame();
// set the last sent sequence number for this sender on the receiver
nodeData->setLastBroadcastSequenceNumber(otherNode->getUUID(),
otherNodeData->getLastReceivedSequenceNumber());
}
}
avatarPacketList->endSegment();
quint64 endAvatarDataPacking = usecTimestampNow();
_stats.avatarDataPackingElapsedTime += (endAvatarDataPacking - startAvatarDataPacking);
}
}
}
// theoretically it's possible for a Node to be in the NodeList (and therefore end up here),
// but not have yet sent data that's linked to the node. Check for that case and don't
// consider those nodes.
if (otherNodeData) {
listItem++;
AvatarSharedPointer otherAvatar = otherNodeData->getAvatarSharedPointer();
avatarList << otherAvatar;
avatarDataToNodes[otherAvatar] = otherNode;
}
});
AvatarSharedPointer thisAvatar = nodeData->getAvatarSharedPointer();
ViewFrustum cameraView = nodeData->getViewFrustom();
std::priority_queue<AvatarPriority> sortedAvatars = AvatarData::sortAvatars(
avatarList, cameraView,
[&](AvatarSharedPointer avatar)->uint64_t{
auto avatarNode = avatarDataToNodes[avatar];
assert(avatarNode); // we can't have gotten here without the avatarData being a valid key in the map
return nodeData->getLastBroadcastTime(avatarNode->getUUID());
},
[&](AvatarSharedPointer avatar)->float{
glm::vec3 nodeBoxHalfScale = (avatar->getPosition() - avatar->getGlobalBoundingBoxCorner());
return glm::max(nodeBoxHalfScale.x, glm::max(nodeBoxHalfScale.y, nodeBoxHalfScale.z));
},
[&](AvatarSharedPointer avatar)->bool{
if (avatar == thisAvatar) {
return true; // ignore ourselves...
}
bool shouldIgnore = false;
// We will also ignore other nodes for a couple of different reasons:
// 1) ignore bubbles and ignore specific node
// 2) the node hasn't really updated it's frame data recently, this can
// happen if for example the avatar is connected on a desktop and sending
// updates at ~30hz. So every 3 frames we skip a frame.
auto avatarNode = avatarDataToNodes[avatar];
assert(avatarNode); // we can't have gotten here without the avatarData being a valid key in the map
const AvatarMixerClientData* avatarNodeData = reinterpret_cast<const AvatarMixerClientData*>(avatarNode->getLinkedData());
assert(avatarNodeData); // we can't have gotten here without avatarNode having valid data
quint64 startIgnoreCalculation = usecTimestampNow();
// make sure we have data for this avatar, that it isn't the same node,
// and isn't an avatar that the viewing node has ignored
// or that has ignored the viewing node
if (!avatarNode->getLinkedData()
|| avatarNode->getUUID() == node->getUUID()
|| (node->isIgnoringNodeWithID(avatarNode->getUUID()) && !getsIgnoredByMe)
|| (avatarNode->isIgnoringNodeWithID(node->getUUID()) && !getsAnyIgnored)) {
shouldIgnore = true;
} else {
// Check to see if the space bubble is enabled
if (node->isIgnoreRadiusEnabled() || avatarNode->isIgnoreRadiusEnabled()) {
// Define the scale of the box for the current other node
glm::vec3 otherNodeBoxScale = (avatarNodeData->getPosition() - avatarNodeData->getGlobalBoundingBoxCorner()) * 2.0f;
// Set up the bounding box for the current other node
AABox otherNodeBox(avatarNodeData->getGlobalBoundingBoxCorner(), otherNodeBoxScale);
// Clamp the size of the bounding box to a minimum scale
if (glm::any(glm::lessThan(otherNodeBoxScale, minBubbleSize))) {
otherNodeBox.setScaleStayCentered(minBubbleSize);
}
// Quadruple the scale of both bounding boxes
otherNodeBox.embiggen(4.0f);
// Perform the collision check between the two bounding boxes
if (nodeBox.touches(otherNodeBox)) {
nodeData->ignoreOther(node, avatarNode);
shouldIgnore = !getsAnyIgnored;
}
}
// Not close enough to ignore
if (!shouldIgnore) {
nodeData->removeFromRadiusIgnoringSet(node, avatarNode->getUUID());
}
}
quint64 endIgnoreCalculation = usecTimestampNow();
_stats.ignoreCalculationElapsedTime += (endIgnoreCalculation - startIgnoreCalculation);
if (!shouldIgnore) {
AvatarDataSequenceNumber lastSeqToReceiver = nodeData->getLastBroadcastSequenceNumber(avatarNode->getUUID());
AvatarDataSequenceNumber lastSeqFromSender = avatarNodeData->getLastReceivedSequenceNumber();
// FIXME - This code does appear to be working. But it seems brittle.
// It supports determining if the frame of data for this "other"
// avatar has already been sent to the reciever. This has been
// verified to work on a desktop display that renders at 60hz and
// therefore sends to mixer at 30hz. Each second you'd expect to
// have 15 (45hz-30hz) duplicate frames. In this case, the stat
// avg_other_av_skips_per_second does report 15.
//
// make sure we haven't already sent this data from this sender to this receiver
// or that somehow we haven't sent
if (lastSeqToReceiver == lastSeqFromSender && lastSeqToReceiver != 0) {
++numAvatarsHeldBack;
shouldIgnore = true;
} else if (lastSeqFromSender - lastSeqToReceiver > 1) {
// this is a skip - we still send the packet but capture the presence of the skip so we see it happening
++numAvatarsWithSkippedFrames;
}
}
return shouldIgnore;
});
// loop through our sorted avatars and allocate our bandwidth to them accordingly
int avatarRank = 0;
// this is overly conservative, because it includes some avatars we might not consider
int remainingAvatars = (int)sortedAvatars.size();
while (!sortedAvatars.empty()) {
AvatarPriority sortData = sortedAvatars.top();
sortedAvatars.pop();
const auto& avatarData = sortData.avatar;
avatarRank++;
remainingAvatars--;
auto otherNode = avatarDataToNodes[avatarData];
assert(otherNode); // we can't have gotten here without the avatarData being a valid key in the map
// NOTE: Here's where we determine if we are over budget and drop to bare minimum data
int minimRemainingAvatarBytes = minimumBytesPerAvatar * remainingAvatars;
bool overBudget = (identityBytesSent + numAvatarDataBytes + minimRemainingAvatarBytes) > maxAvatarBytesPerFrame;
quint64 startAvatarDataPacking = usecTimestampNow();
++numOtherAvatars;
const AvatarMixerClientData* otherNodeData = reinterpret_cast<const AvatarMixerClientData*>(otherNode->getLinkedData());
// make sure we send out identity packets to and from new arrivals.
bool forceSend = !nodeData->checkAndSetHasReceivedFirstPacketsFrom(otherNode->getUUID());
// FIXME - this clause seems suspicious "... || otherNodeData->getIdentityChangeTimestamp() > _lastFrameTimestamp ..."
if (!overBudget
&& otherNodeData->getIdentityChangeTimestamp().time_since_epoch().count() > 0
&& (forceSend
|| otherNodeData->getIdentityChangeTimestamp() > _lastFrameTimestamp
|| distribution(generator) < IDENTITY_SEND_PROBABILITY)) {
identityBytesSent += sendIdentityPacket(otherNodeData, node);
}
const AvatarData* otherAvatar = otherNodeData->getConstAvatarData();
glm::vec3 otherPosition = otherAvatar->getClientGlobalPosition();
// determine if avatar is in view, to determine how much data to include...
glm::vec3 otherNodeBoxScale = (otherPosition - otherNodeData->getGlobalBoundingBoxCorner()) * 2.0f;
AABox otherNodeBox(otherNodeData->getGlobalBoundingBoxCorner(), otherNodeBoxScale);
bool isInView = nodeData->otherAvatarInView(otherNodeBox);
// start a new segment in the PacketList for this avatar
avatarPacketList->startSegment();
AvatarData::AvatarDataDetail detail;
if (overBudget) {
overBudgetAvatars++;
_stats.overBudgetAvatars++;
detail = AvatarData::NoData;
} else if (!isInView && !getsOutOfView) {
detail = AvatarData::NoData;
nodeData->incrementAvatarOutOfView();
} else {
detail = distribution(generator) < AVATAR_SEND_FULL_UPDATE_RATIO
? AvatarData::SendAllData : AvatarData::CullSmallData;
nodeData->incrementAvatarInView();
}
bool includeThisAvatar = true;
auto lastEncodeForOther = nodeData->getLastOtherAvatarEncodeTime(otherNode->getUUID());
QVector<JointData>& lastSentJointsForOther = nodeData->getLastOtherAvatarSentJoints(otherNode->getUUID());
bool distanceAdjust = true;
glm::vec3 viewerPosition = myPosition;
AvatarDataPacket::HasFlags hasFlagsOut; // the result of the toByteArray
bool dropFaceTracking = false;
quint64 start = usecTimestampNow();
QByteArray bytes = otherAvatar->toByteArray(detail, lastEncodeForOther, lastSentJointsForOther,
hasFlagsOut, dropFaceTracking, distanceAdjust, viewerPosition, &lastSentJointsForOther);
quint64 end = usecTimestampNow();
_stats.toByteArrayElapsedTime += (end - start);
static const int MAX_ALLOWED_AVATAR_DATA = (1400 - NUM_BYTES_RFC4122_UUID);
if (bytes.size() > MAX_ALLOWED_AVATAR_DATA) {
qCWarning(avatars) << "otherAvatar.toByteArray() resulted in very large buffer:" << bytes.size() << "... attempt to drop facial data";
dropFaceTracking = true; // first try dropping the facial data
bytes = otherAvatar->toByteArray(detail, lastEncodeForOther, lastSentJointsForOther,
hasFlagsOut, dropFaceTracking, distanceAdjust, viewerPosition, &lastSentJointsForOther);
if (bytes.size() > MAX_ALLOWED_AVATAR_DATA) {
qCWarning(avatars) << "otherAvatar.toByteArray() without facial data resulted in very large buffer:" << bytes.size() << "... reduce to MinimumData";
bytes = otherAvatar->toByteArray(AvatarData::MinimumData, lastEncodeForOther, lastSentJointsForOther,
hasFlagsOut, dropFaceTracking, distanceAdjust, viewerPosition, &lastSentJointsForOther);
}
if (bytes.size() > MAX_ALLOWED_AVATAR_DATA) {
qCWarning(avatars) << "otherAvatar.toByteArray() MinimumData resulted in very large buffer:" << bytes.size() << "... FAIL!!";
includeThisAvatar = false;
}
}
if (includeThisAvatar) {
numAvatarDataBytes += avatarPacketList->write(otherNode->getUUID().toRfc4122());
numAvatarDataBytes += avatarPacketList->write(bytes);
_stats.numOthersIncluded++;
// increment the number of avatars sent to this reciever
nodeData->incrementNumAvatarsSentLastFrame();
// set the last sent sequence number for this sender on the receiver
nodeData->setLastBroadcastSequenceNumber(otherNode->getUUID(),
otherNodeData->getLastReceivedSequenceNumber());
// remember the last time we sent details about this other node to the receiver
nodeData->setLastBroadcastTime(otherNode->getUUID(), start);
}
avatarPacketList->endSegment();
quint64 endAvatarDataPacking = usecTimestampNow();
_stats.avatarDataPackingElapsedTime += (endAvatarDataPacking - startAvatarDataPacking);
};
quint64 startPacketSending = usecTimestampNow();
// close the current packet so that we're always sending something
@ -426,13 +422,6 @@ void AvatarMixerSlave::broadcastAvatarData(const SharedNodePointer& node) {
nodeData->recordNumOtherAvatarStarves(numAvatarsHeldBack);
nodeData->recordNumOtherAvatarSkips(numAvatarsWithSkippedFrames);
if (numOtherAvatars == 0) {
// update the full rate distance to FLOAT_MAX since we didn't have any other avatars to send
nodeData->setMaxAvatarDistance(FLT_MAX);
} else {
nodeData->setMaxAvatarDistance(maxAvatarDistanceThisFrame);
}
quint64 endPacketSending = usecTimestampNow();
_stats.packetSendingElapsedTime += (endPacketSending - startPacketSending);
}

View file

@ -25,6 +25,8 @@ public:
int numBytesSent { 0 };
int numIdentityPackets { 0 };
int numOthersIncluded { 0 };
int overBudgetAvatars { 0 };
quint64 ignoreCalculationElapsedTime { 0 };
quint64 avatarDataPackingElapsedTime { 0 };
quint64 packetSendingElapsedTime { 0 };
@ -43,6 +45,8 @@ public:
numBytesSent = 0;
numIdentityPackets = 0;
numOthersIncluded = 0;
overBudgetAvatars = 0;
ignoreCalculationElapsedTime = 0;
avatarDataPackingElapsedTime = 0;
packetSendingElapsedTime = 0;
@ -60,6 +64,8 @@ public:
numBytesSent += rhs.numBytesSent;
numIdentityPackets += rhs.numIdentityPackets;
numOthersIncluded += rhs.numOthersIncluded;
overBudgetAvatars += rhs.overBudgetAvatars;
ignoreCalculationElapsedTime += rhs.ignoreCalculationElapsedTime;
avatarDataPackingElapsedTime += rhs.avatarDataPackingElapsedTime;
packetSendingElapsedTime += rhs.packetSendingElapsedTime;
@ -85,7 +91,7 @@ public:
void harvestStats(AvatarMixerSlaveStats& stats);
private:
void sendIdentityPacket(const AvatarMixerClientData* nodeData, const SharedNodePointer& destinationNode);
int sendIdentityPacket(const AvatarMixerClientData* nodeData, const SharedNodePointer& destinationNode);
// frame state
ConstIter _begin;

View file

@ -107,11 +107,11 @@ Item {
}
StatText {
visible: root.expanded
text: "Fully Simulated Avatars: " + root.fullySimulatedAvatarCount
text: "Avatars Updated: " + root.updatedAvatarCount
}
StatText {
visible: root.expanded
text: "Partially Simulated Avatars: " + root.partiallySimulatedAvatarCount
text: "Avatars NOT Updated: " + root.notUpdatedAvatarCount
}
}
}

View file

@ -334,11 +334,6 @@ void Avatar::updateAvatarEntities() {
setAvatarEntityDataChanged(false);
}
bool Avatar::shouldDie() const {
const qint64 AVATAR_SILENCE_THRESHOLD_USECS = 5 * USECS_PER_SECOND;
return _owningAvatarMixer.isNull() || getUsecsSinceLastUpdate() > AVATAR_SILENCE_THRESHOLD_USECS;
}
void Avatar::simulate(float deltaTime, bool inView) {
PROFILE_RANGE(simulation, "simulate");

View file

@ -178,12 +178,13 @@ public:
uint64_t getLastRenderUpdateTime() const { return _lastRenderUpdateTime; }
void setLastRenderUpdateTime(uint64_t time) { _lastRenderUpdateTime = time; }
bool shouldDie() const;
void animateScaleChanges(float deltaTime);
void setTargetScale(float targetScale) override;
Q_INVOKABLE float getSimulationRate(const QString& rateName = QString("")) const;
bool hasNewJointData() const { return _hasNewJointData; }
public slots:
// FIXME - these should be migrated to use Pose data instead

View file

@ -148,15 +148,6 @@ float AvatarManager::getAvatarSimulationRate(const QUuid& sessionID, const QStri
}
class AvatarPriority {
public:
AvatarPriority(AvatarSharedPointer a, float p) : avatar(a), priority(p) {}
AvatarSharedPointer avatar;
float priority;
bool operator<(const AvatarPriority& other) const { return priority < other.priority; }
};
void AvatarManager::updateOtherAvatars(float deltaTime) {
// lock the hash for read to check the size
QReadLocker lock(&_hashLock);
@ -172,59 +163,35 @@ void AvatarManager::updateOtherAvatars(float deltaTime) {
QList<AvatarSharedPointer> avatarList = avatarMap.values();
ViewFrustum cameraView;
qApp->copyDisplayViewFrustum(cameraView);
glm::vec3 frustumCenter = cameraView.getPosition();
const float OUT_OF_VIEW_PENALTY = -10.0;
std::priority_queue<AvatarPriority> sortedAvatars = AvatarData::sortAvatars(
avatarList, cameraView,
std::priority_queue<AvatarPriority> sortedAvatars;
{
PROFILE_RANGE(simulation, "sort");
for (int32_t i = 0; i < avatarList.size(); ++i) {
const auto& avatar = std::static_pointer_cast<Avatar>(avatarList.at(i));
if (avatar == _myAvatar || !avatar->isInitialized()) {
[](AvatarSharedPointer avatar)->uint64_t{
return std::static_pointer_cast<Avatar>(avatar)->getLastRenderUpdateTime();
},
[](AvatarSharedPointer avatar)->float{
return std::static_pointer_cast<Avatar>(avatar)->getBoundingRadius();
},
[this](AvatarSharedPointer avatar)->bool{
const auto& castedAvatar = std::static_pointer_cast<Avatar>(avatar);
if (castedAvatar == _myAvatar || !castedAvatar->isInitialized()) {
// DO NOT update _myAvatar! Its update has already been done earlier in the main loop.
// DO NOT update or fade out uninitialized Avatars
continue;
return true; // ignore it
}
if (avatar->shouldDie()) {
removeAvatar(avatar->getID());
continue;
return true; // ignore it
}
if (avatar->isDead()) {
continue;
return true; // ignore it
}
// priority = weighted linear combination of:
// (a) apparentSize
// (b) proximity to center of view
// (c) time since last update
// (d) TIME_PENALTY to help recently updated entries sort toward back
glm::vec3 avatarPosition = avatar->getPosition();
glm::vec3 offset = avatarPosition - frustumCenter;
float distance = glm::length(offset) + 0.001f; // add 1mm to avoid divide by zero
float radius = avatar->getBoundingRadius();
const glm::vec3& forward = cameraView.getDirection();
float apparentSize = 2.0f * radius / distance;
float cosineAngle = glm::length(glm::dot(offset, forward) * forward) / distance;
float age = (float)(startTime - avatar->getLastRenderUpdateTime()) / (float)(USECS_PER_SECOND);
// NOTE: we are adding values of different units to get a single measure of "priority".
// Thus we multiply each component by a conversion "weight" that scales its units relative to the others.
// These weights are pure magic tuning and should be hard coded in the relation below,
// but are currently exposed for anyone who would like to explore fine tuning:
float priority = _avatarSortCoefficientSize * apparentSize
+ _avatarSortCoefficientCenter * cosineAngle
+ _avatarSortCoefficientAge * age;
// decrement priority of avatars outside keyhole
if (distance > cameraView.getCenterRadius()) {
if (!cameraView.sphereIntersectsFrustum(avatarPosition, radius)) {
priority += OUT_OF_VIEW_PENALTY;
}
}
sortedAvatars.push(AvatarPriority(avatar, priority));
}
}
return false;
});
render::PendingChanges pendingChanges;
const uint64_t RENDER_UPDATE_BUDGET = 1500; // usec
@ -232,8 +199,8 @@ void AvatarManager::updateOtherAvatars(float deltaTime) {
uint64_t renderExpiry = startTime + RENDER_UPDATE_BUDGET;
uint64_t maxExpiry = startTime + MAX_UPDATE_BUDGET;
int fullySimulatedAvatars = 0;
int partiallySimulatedAvatars = 0;
int numAvatarsUpdated = 0;
int numAVatarsNotUpdated = 0;
while (!sortedAvatars.empty()) {
const AvatarPriority& sortData = sortedAvatars.top();
const auto& avatar = std::static_pointer_cast<Avatar>(sortData.avatar);
@ -254,33 +221,57 @@ void AvatarManager::updateOtherAvatars(float deltaTime) {
}
avatar->animateScaleChanges(deltaTime);
const float OUT_OF_VIEW_THRESHOLD = 0.5f * AvatarData::OUT_OF_VIEW_PENALTY;
uint64_t now = usecTimestampNow();
if (now < renderExpiry) {
// we're within budget
const float OUT_OF_VIEW_THRESHOLD = 0.5f * OUT_OF_VIEW_PENALTY;
bool inView = sortData.priority > OUT_OF_VIEW_THRESHOLD;
if (inView && avatar->hasNewJointData()) {
numAvatarsUpdated++;
}
avatar->simulate(deltaTime, inView);
avatar->updateRenderItem(pendingChanges);
avatar->setLastRenderUpdateTime(startTime);
fullySimulatedAvatars++;
} else if (now < maxExpiry) {
// we've spent most of our time budget, but we still simulate() the avatar as it if were out of view
// --> some avatars may freeze until their priority trickles up
const bool inView = false;
avatar->simulate(deltaTime, inView);
partiallySimulatedAvatars++;
bool inView = sortData.priority > OUT_OF_VIEW_THRESHOLD;
if (inView && avatar->hasNewJointData()) {
numAVatarsNotUpdated++;
}
avatar->simulate(deltaTime, false);
} else {
// we've spent ALL of our time budget --> bail on the rest of the avatar updates
// --> more avatars may freeze until their priority trickles up
// --> some scale or fade animations may glitch
// --> some avatar velocity measurements may be a little off
// HACK: no time simulate, but we will take the time to count how many were tragically missed
bool inView = sortData.priority > OUT_OF_VIEW_THRESHOLD;
if (!inView) {
break;
}
if (inView && avatar->hasNewJointData()) {
numAVatarsNotUpdated++;
}
sortedAvatars.pop();
while (inView && !sortedAvatars.empty()) {
const AvatarPriority& newSortData = sortedAvatars.top();
const auto& newAvatar = std::static_pointer_cast<Avatar>(newSortData.avatar);
inView = newSortData.priority > OUT_OF_VIEW_THRESHOLD;
if (inView && newAvatar->hasNewJointData()) {
numAVatarsNotUpdated++;
}
sortedAvatars.pop();
}
break;
}
sortedAvatars.pop();
}
_avatarSimulationTime = (float)(usecTimestampNow() - startTime) / (float)USECS_PER_MSEC;
_fullySimulatedAvatars = fullySimulatedAvatars;
_partiallySimulatedAvatars = partiallySimulatedAvatars;
_numAvatarsUpdated = numAvatarsUpdated;
_numAvatarsNotUpdated = numAVatarsNotUpdated;
qApp->getMain3DScene()->enqueuePendingChanges(pendingChanges);
simulateAvatarFades(deltaTime);
@ -598,25 +589,40 @@ RayToAvatarIntersectionResult AvatarManager::findRayIntersection(const PickRay&
// HACK
float AvatarManager::getAvatarSortCoefficient(const QString& name) {
if (name == "size") {
return _avatarSortCoefficientSize;
return AvatarData::_avatarSortCoefficientSize;
} else if (name == "center") {
return _avatarSortCoefficientCenter;
return AvatarData::_avatarSortCoefficientCenter;
} else if (name == "age") {
return _avatarSortCoefficientAge;
return AvatarData::_avatarSortCoefficientAge;
}
return 0.0f;
}
// HACK
void AvatarManager::setAvatarSortCoefficient(const QString& name, const QScriptValue& value) {
bool somethingChanged = false;
if (value.isNumber()) {
float numericalValue = (float)value.toNumber();
if (name == "size") {
_avatarSortCoefficientSize = numericalValue;
AvatarData::_avatarSortCoefficientSize = numericalValue;
somethingChanged = true;
} else if (name == "center") {
_avatarSortCoefficientCenter = numericalValue;
AvatarData::_avatarSortCoefficientCenter = numericalValue;
somethingChanged = true;
} else if (name == "age") {
_avatarSortCoefficientAge = numericalValue;
AvatarData::_avatarSortCoefficientAge = numericalValue;
somethingChanged = true;
}
}
if (somethingChanged) {
size_t packetSize = sizeof(AvatarData::_avatarSortCoefficientSize) +
sizeof(AvatarData::_avatarSortCoefficientCenter) +
sizeof(AvatarData::_avatarSortCoefficientAge);
auto packet = NLPacket::create(PacketType::AdjustAvatarSorting, packetSize);
packet->writePrimitive(AvatarData::_avatarSortCoefficientSize);
packet->writePrimitive(AvatarData::_avatarSortCoefficientCenter);
packet->writePrimitive(AvatarData::_avatarSortCoefficientAge);
DependencyManager::get<NodeList>()->broadcastToNodes(std::move(packet), NodeSet() << NodeType::AvatarMixer);
}
}

View file

@ -43,8 +43,8 @@ public:
std::shared_ptr<MyAvatar> getMyAvatar() { return _myAvatar; }
AvatarSharedPointer getAvatarBySessionID(const QUuid& sessionID) const override;
int getFullySimulatedAvatars() const { return _fullySimulatedAvatars; }
int getPartiallySimulatedAvatars() const { return _partiallySimulatedAvatars; }
int getNumAvatarsUpdated() const { return _numAvatarsUpdated; }
int getNumAvatarsNotUpdated() const { return _numAvatarsNotUpdated; }
float getAvatarSimulationTime() const { return _avatarSimulationTime; }
void updateMyAvatar(float deltaTime);
@ -120,15 +120,9 @@ private:
VectorOfMotionStates _motionStatesToRemoveFromPhysics;
RateCounter<> _myAvatarSendRate;
int _fullySimulatedAvatars { 0 };
int _partiallySimulatedAvatars { 0 };
int _numAvatarsUpdated { 0 };
int _numAvatarsNotUpdated { 0 };
float _avatarSimulationTime { 0.0f };
// TODO: remove this HACK once we settle on optimal sort coefficients
// These coefficients exposed for fine tuning the sort priority for transfering new _jointData to the render pipeline.
float _avatarSortCoefficientSize { 0.5f };
float _avatarSortCoefficientCenter { 0.25 };
float _avatarSortCoefficientAge { 1.0f };
};
Q_DECLARE_METATYPE(AvatarManager::LocalLight)

View file

@ -148,13 +148,22 @@ MyAvatar::MyAvatar(RigPointer rig) :
auto player = DependencyManager::get<Deck>();
auto recorder = DependencyManager::get<Recorder>();
connect(player.data(), &Deck::playbackStateChanged, [=] {
if (player->isPlaying()) {
bool isPlaying = player->isPlaying();
if (isPlaying) {
auto recordingInterface = DependencyManager::get<RecordingScriptingInterface>();
if (recordingInterface->getPlayFromCurrentLocation()) {
setRecordingBasis();
}
} else {
clearRecordingBasis();
useFullAvatarURL(_fullAvatarURLFromPreferences, _fullAvatarModelName);
}
auto audioIO = DependencyManager::get<AudioClient>();
audioIO->setIsPlayingBackRecording(isPlaying);
if (_rig) {
_rig->setEnableAnimations(!isPlaying);
}
});
@ -180,8 +189,8 @@ MyAvatar::MyAvatar(RigPointer rig) :
if (recordingInterface->getPlayerUseSkeletonModel() && dummyAvatar.getSkeletonModelURL().isValid() &&
(dummyAvatar.getSkeletonModelURL() != getSkeletonModelURL())) {
// FIXME
//myAvatar->useFullAvatarURL()
setSkeletonModelURL(dummyAvatar.getSkeletonModelURL());
}
if (recordingInterface->getPlayerUseDisplayName() && dummyAvatar.getDisplayName() != getDisplayName()) {
@ -204,6 +213,11 @@ MyAvatar::MyAvatar(RigPointer rig) :
// head orientation
_headData->setLookAtPosition(headData->getLookAtPosition());
}
auto jointData = dummyAvatar.getRawJointData();
if (jointData.length() > 0 && _rig) {
_rig->copyJointsFromJointData(jointData);
}
});
connect(rig.get(), SIGNAL(onLoadComplete()), this, SIGNAL(onLoadComplete()));
@ -471,7 +485,9 @@ void MyAvatar::simulate(float deltaTime) {
{
PerformanceTimer perfTimer("joints");
// copy out the skeleton joints from the model
_rig->copyJointsIntoJointData(_jointData);
if (_rigEnabled) {
_rig->copyJointsIntoJointData(_jointData);
}
}
{

View file

@ -485,6 +485,7 @@ private:
std::unordered_set<int> _headBoneSet;
RigPointer _rig;
bool _prevShouldDrawHead;
bool _rigEnabled { true };
bool _enableDebugDrawDefaultPose { false };
bool _enableDebugDrawAnimPose { false };

View file

@ -123,8 +123,8 @@ void Stats::updateStats(bool force) {
auto avatarManager = DependencyManager::get<AvatarManager>();
// we need to take one avatar out so we don't include ourselves
STAT_UPDATE(avatarCount, avatarManager->size() - 1);
STAT_UPDATE(fullySimulatedAvatarCount, avatarManager->getFullySimulatedAvatars());
STAT_UPDATE(partiallySimulatedAvatarCount, avatarManager->getPartiallySimulatedAvatars());
STAT_UPDATE(updatedAvatarCount, avatarManager->getNumAvatarsUpdated());
STAT_UPDATE(notUpdatedAvatarCount, avatarManager->getNumAvatarsNotUpdated());
STAT_UPDATE(serverCount, (int)nodeList->size());
STAT_UPDATE(framerate, qApp->getFps());
if (qApp->getActiveDisplayPlugin()) {

View file

@ -49,8 +49,8 @@ class Stats : public QQuickItem {
STATS_PROPERTY(int, simrate, 0)
STATS_PROPERTY(int, avatarSimrate, 0)
STATS_PROPERTY(int, avatarCount, 0)
STATS_PROPERTY(int, fullySimulatedAvatarCount, 0)
STATS_PROPERTY(int, partiallySimulatedAvatarCount, 0)
STATS_PROPERTY(int, updatedAvatarCount, 0)
STATS_PROPERTY(int, notUpdatedAvatarCount, 0)
STATS_PROPERTY(int, packetInCount, 0)
STATS_PROPERTY(int, packetOutCount, 0)
STATS_PROPERTY(float, mbpsIn, 0)
@ -160,8 +160,8 @@ signals:
void simrateChanged();
void avatarSimrateChanged();
void avatarCountChanged();
void fullySimulatedAvatarCountChanged();
void partiallySimulatedAvatarCountChanged();
void updatedAvatarCountChanged();
void notUpdatedAvatarCountChanged();
void packetInCountChanged();
void packetOutCountChanged();
void mbpsInChanged();

View file

@ -483,6 +483,10 @@ void Rig::setEnableInverseKinematics(bool enable) {
_enableInverseKinematics = enable;
}
void Rig::setEnableAnimations(bool enable) {
_enabledAnimations = enable;
}
AnimPose Rig::getAbsoluteDefaultPose(int index) const {
if (_animSkeleton && index >= 0 && index < _animSkeleton->getNumJoints()) {
return _absoluteDefaultPoses[index];
@ -907,7 +911,7 @@ void Rig::updateAnimations(float deltaTime, glm::mat4 rootTransform) {
setModelOffset(rootTransform);
if (_animNode) {
if (_animNode && _enabledAnimations) {
PerformanceTimer perfTimer("handleTriggers");
updateAnimationStateHandlers();

View file

@ -210,6 +210,7 @@ public:
void computeAvatarBoundingCapsule(const FBXGeometry& geometry, float& radiusOut, float& heightOut, glm::vec3& offsetOut) const;
void setEnableInverseKinematics(bool enable);
void setEnableAnimations(bool enable);
const glm::mat4& getGeometryToRigTransform() const { return _geometryToRigTransform; }
@ -314,6 +315,7 @@ protected:
int32_t _numOverrides { 0 };
bool _lastEnableInverseKinematics { true };
bool _enableInverseKinematics { true };
bool _enabledAnimations { true };
mutable uint32_t _jointNameWarningCount { 0 };

View file

@ -39,13 +39,10 @@
#include <plugins/CodecPlugin.h>
#include <plugins/PluginManager.h>
#include <udt/PacketHeaders.h>
#include <PositionalAudioStream.h>
#include <SettingHandle.h>
#include <SharedUtil.h>
#include <UUID.h>
#include <Transform.h>
#include "PositionalAudioStream.h"
#include "AudioClientLogging.h"
#include "AudioLogging.h"
@ -294,12 +291,12 @@ QString friendlyNameForAudioDevice(IMMDevice* pEndpoint) {
IPropertyStore* pPropertyStore;
pEndpoint->OpenPropertyStore(STGM_READ, &pPropertyStore);
pEndpoint->Release();
pEndpoint = NULL;
pEndpoint = nullptr;
PROPVARIANT pv;
PropVariantInit(&pv);
HRESULT hr = pPropertyStore->GetValue(PKEY_Device_FriendlyName, &pv);
pPropertyStore->Release();
pPropertyStore = NULL;
pPropertyStore = nullptr;
deviceName = QString::fromWCharArray((wchar_t*)pv.pwszVal);
if (!IsWindows8OrGreater()) {
// Windows 7 provides only the 31 first characters of the device name.
@ -313,9 +310,9 @@ QString friendlyNameForAudioDevice(IMMDevice* pEndpoint) {
QString AudioClient::friendlyNameForAudioDevice(wchar_t* guid) {
QString deviceName;
HRESULT hr = S_OK;
CoInitialize(NULL);
IMMDeviceEnumerator* pMMDeviceEnumerator = NULL;
CoCreateInstance(__uuidof(MMDeviceEnumerator), NULL, CLSCTX_ALL, __uuidof(IMMDeviceEnumerator), (void**)&pMMDeviceEnumerator);
CoInitialize(nullptr);
IMMDeviceEnumerator* pMMDeviceEnumerator = nullptr;
CoCreateInstance(__uuidof(MMDeviceEnumerator), nullptr, CLSCTX_ALL, __uuidof(IMMDeviceEnumerator), (void**)&pMMDeviceEnumerator);
IMMDevice* pEndpoint;
hr = pMMDeviceEnumerator->GetDevice(guid, &pEndpoint);
if (hr == E_NOTFOUND) {
@ -325,7 +322,7 @@ QString AudioClient::friendlyNameForAudioDevice(wchar_t* guid) {
deviceName = ::friendlyNameForAudioDevice(pEndpoint);
}
pMMDeviceEnumerator->Release();
pMMDeviceEnumerator = NULL;
pMMDeviceEnumerator = nullptr;
CoUninitialize();
return deviceName;
}
@ -968,8 +965,7 @@ void AudioClient::handleLocalEchoAndReverb(QByteArray& inputByteArray) {
}
void AudioClient::handleAudioInput() {
if (!_inputDevice) {
if (!_inputDevice || _isPlayingBackRecording) {
return;
}

View file

@ -147,6 +147,8 @@ public:
void setPositionGetter(AudioPositionGetter positionGetter) { _positionGetter = positionGetter; }
void setOrientationGetter(AudioOrientationGetter orientationGetter) { _orientationGetter = orientationGetter; }
void setIsPlayingBackRecording(bool isPlayingBackRecording) { _isPlayingBackRecording = isPlayingBackRecording; }
Q_INVOKABLE void setAvatarBoundingBoxParameters(glm::vec3 corner, glm::vec3 scale);
void checkDevices();
@ -369,10 +371,12 @@ private:
QVector<QString> _inputDevices;
QVector<QString> _outputDevices;
bool _hasReceivedFirstPacket = false;
bool _hasReceivedFirstPacket { false };
QVector<AudioInjector*> _activeLocalAudioInjectors;
bool _isPlayingBackRecording { false };
CodecPluginPointer _codec;
QString _selectedCodecName;
Encoder* _encoder { nullptr }; // for outbound mic stream

View file

@ -91,4 +91,4 @@ void injectorOptionsFromScriptValue(const QScriptValue& object, AudioInjectorOpt
qCWarning(audio) << "Unknown audio injector option:" << it.name();
}
}
}
}

View file

@ -36,6 +36,8 @@
#include <shared/JSONHelpers.h>
#include <ShapeInfo.h>
#include <AudioHelpers.h>
#include <Profile.h>
#include <VariantMapToScriptValue.h>
#include "AvatarLogging.h"
@ -68,8 +70,7 @@ AvatarData::AvatarData() :
_displayNameAlpha(1.0f),
_errorLogExpiry(0),
_owningAvatarMixer(),
_targetVelocity(0.0f),
_localAABox(DEFAULT_LOCAL_AABOX_CORNER, DEFAULT_LOCAL_AABOX_SCALE)
_targetVelocity(0.0f)
{
setBodyPitch(0.0f);
setBodyYaw(-90.0f);
@ -192,6 +193,13 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSent
unsigned char* destinationBuffer = reinterpret_cast<unsigned char*>(avatarDataByteArray.data());
unsigned char* startPosition = destinationBuffer;
// special case, if we were asked for no data, then just include the flags all set to nothing
if (dataDetail == NoData) {
AvatarDataPacket::HasFlags packetStateFlags = 0;
memcpy(destinationBuffer, &packetStateFlags, sizeof(packetStateFlags));
return avatarDataByteArray.left(sizeof(packetStateFlags));
}
// FIXME -
//
// BUG -- if you enter a space bubble, and then back away, the avatar has wrong orientation until "send all" happens...
@ -2309,3 +2317,100 @@ void RayToAvatarIntersectionResultFromScriptValue(const QScriptValue& object, Ra
vec3FromScriptValue(intersection, value.intersection);
}
}
const float AvatarData::OUT_OF_VIEW_PENALTY = -10.0f;
float AvatarData::_avatarSortCoefficientSize { 0.5f };
float AvatarData::_avatarSortCoefficientCenter { 0.25 };
float AvatarData::_avatarSortCoefficientAge { 1.0f };
std::priority_queue<AvatarPriority> AvatarData::sortAvatars(
QList<AvatarSharedPointer> avatarList,
const ViewFrustum& cameraView,
std::function<uint64_t(AvatarSharedPointer)> getLastUpdated,
std::function<float(AvatarSharedPointer)> getBoundingRadius,
std::function<bool(AvatarSharedPointer)> shouldIgnore) {
uint64_t startTime = usecTimestampNow();
glm::vec3 frustumCenter = cameraView.getPosition();
std::priority_queue<AvatarPriority> sortedAvatars;
{
PROFILE_RANGE(simulation, "sort");
for (int32_t i = 0; i < avatarList.size(); ++i) {
const auto& avatar = avatarList.at(i);
if (shouldIgnore(avatar)) {
continue;
}
// priority = weighted linear combination of:
// (a) apparentSize
// (b) proximity to center of view
// (c) time since last update
glm::vec3 avatarPosition = avatar->getPosition();
glm::vec3 offset = avatarPosition - frustumCenter;
float distance = glm::length(offset) + 0.001f; // add 1mm to avoid divide by zero
// FIXME - AvatarData has something equivolent to this
float radius = getBoundingRadius(avatar);
const glm::vec3& forward = cameraView.getDirection();
float apparentSize = 2.0f * radius / distance;
float cosineAngle = glm::length(glm::dot(offset, forward) * forward) / distance;
float age = (float)(startTime - getLastUpdated(avatar)) / (float)(USECS_PER_SECOND);
// NOTE: we are adding values of different units to get a single measure of "priority".
// Thus we multiply each component by a conversion "weight" that scales its units relative to the others.
// These weights are pure magic tuning and should be hard coded in the relation below,
// but are currently exposed for anyone who would like to explore fine tuning:
float priority = _avatarSortCoefficientSize * apparentSize
+ _avatarSortCoefficientCenter * cosineAngle
+ _avatarSortCoefficientAge * age;
// decrement priority of avatars outside keyhole
if (distance > cameraView.getCenterRadius()) {
if (!cameraView.sphereIntersectsFrustum(avatarPosition, radius)) {
priority += OUT_OF_VIEW_PENALTY;
}
}
sortedAvatars.push(AvatarPriority(avatar, priority));
}
}
return sortedAvatars;
}
QScriptValue AvatarEntityMapToScriptValue(QScriptEngine* engine, const AvatarEntityMap& value) {
QScriptValue obj = engine->newObject();
for (auto entityID : value.keys()) {
QByteArray entityProperties = value.value(entityID);
QJsonDocument jsonEntityProperties = QJsonDocument::fromBinaryData(entityProperties);
if (!jsonEntityProperties.isObject()) {
qCDebug(avatars) << "bad AvatarEntityData in AvatarEntityMap" << QString(entityProperties.toHex());
}
QVariant variantEntityProperties = jsonEntityProperties.toVariant();
QVariantMap entityPropertiesMap = variantEntityProperties.toMap();
QScriptValue scriptEntityProperties = variantMapToScriptValue(entityPropertiesMap, *engine);
QString key = entityID.toString();
obj.setProperty(key, scriptEntityProperties);
}
return obj;
}
void AvatarEntityMapFromScriptValue(const QScriptValue& object, AvatarEntityMap& value) {
QScriptValueIterator itr(object);
while (itr.hasNext()) {
itr.next();
QUuid EntityID = QUuid(itr.name());
QScriptValue scriptEntityProperties = itr.value();
QVariant variantEntityProperties = scriptEntityProperties.toVariant();
QJsonDocument jsonEntityProperties = QJsonDocument::fromVariant(variantEntityProperties);
QByteArray binaryEntityProperties = jsonEntityProperties.toBinaryData();
value[EntityID] = binaryEntityProperties;
}
}

View file

@ -14,6 +14,8 @@
#include <string>
#include <memory>
#include <queue>
/* VS2010 defines stdint.h, but not inttypes.h */
#if defined(_MSC_VER)
typedef signed char int8_t;
@ -44,6 +46,7 @@ typedef unsigned long long quint64;
#include <QVariantMap>
#include <QVector>
#include <QtScript/QScriptable>
#include <QtScript/QScriptValueIterator>
#include <QReadWriteLock>
#include <JointData.h>
@ -57,6 +60,7 @@ typedef unsigned long long quint64;
#include <ThreadSafeValueCache.h>
#include <SharedUtil.h>
#include <shared/RateCounter.h>
#include <ViewFrustum.h>
#include "AABox.h"
#include "HeadData.h"
@ -133,6 +137,7 @@ namespace AvatarDataPacket {
const HasFlags PACKET_HAS_AVATAR_LOCAL_POSITION = 1U << 9;
const HasFlags PACKET_HAS_FACE_TRACKER_INFO = 1U << 10;
const HasFlags PACKET_HAS_JOINT_DATA = 1U << 11;
const size_t AVATAR_HAS_FLAGS_SIZE = 2;
// NOTE: AvatarDataPackets start with a uint16_t sequence number that is not reflected in the Header structure.
@ -304,6 +309,14 @@ public:
RateCounter<> jointDataRate;
};
class AvatarPriority {
public:
AvatarPriority(AvatarSharedPointer a, float p) : avatar(a), priority(p) {}
AvatarSharedPointer avatar;
float priority;
// NOTE: we invert the less-than operator to sort high priorities to front
bool operator<(const AvatarPriority& other) const { return priority < other.priority; }
};
class AvatarData : public QObject, public SpatiallyNestable {
Q_OBJECT
@ -362,6 +375,7 @@ public:
void setHandPosition(const glm::vec3& handPosition);
typedef enum {
NoData,
MinimumData,
CullSmallData,
IncludeSmallData,
@ -539,8 +553,6 @@ public:
void setOwningAvatarMixer(const QWeakPointer<Node>& owningAvatarMixer) { _owningAvatarMixer = owningAvatarMixer; }
const AABox& getLocalAABox() const { return _localAABox; }
int getUsecsSinceLastUpdate() const { return _averageBytesReceived.getUsecsSinceLastEvent(); }
int getAverageBytesReceivedPerSecond() const;
int getReceiveRate() const;
@ -578,6 +590,28 @@ public:
}
bool shouldDie() const {
const qint64 AVATAR_SILENCE_THRESHOLD_USECS = 5 * USECS_PER_SECOND;
return _owningAvatarMixer.isNull() || getUsecsSinceLastUpdate() > AVATAR_SILENCE_THRESHOLD_USECS;
}
static const float OUT_OF_VIEW_PENALTY;
static std::priority_queue<AvatarPriority> sortAvatars(
QList<AvatarSharedPointer> avatarList,
const ViewFrustum& cameraView,
std::function<uint64_t(AvatarSharedPointer)> getLastUpdated,
std::function<float(AvatarSharedPointer)> getBoundingRadius,
std::function<bool(AvatarSharedPointer)> shouldIgnore);
// TODO: remove this HACK once we settle on optimal sort coefficients
// These coefficients exposed for fine tuning the sort priority for transfering new _jointData to the render pipeline.
static float _avatarSortCoefficientSize;
static float _avatarSortCoefficientCenter;
static float _avatarSortCoefficientAge;
public slots:
void sendAvatarDataPacket();
void sendIdentityPacket();
@ -659,8 +693,6 @@ protected:
glm::vec3 _targetVelocity;
AABox _localAABox;
SimpleMovingAverage _averageBytesReceived;
// During recording, this holds the starting position, orientation & scale of the recorded avatar
@ -816,6 +848,11 @@ Q_DECLARE_METATYPE(RayToAvatarIntersectionResult)
QScriptValue RayToAvatarIntersectionResultToScriptValue(QScriptEngine* engine, const RayToAvatarIntersectionResult& results);
void RayToAvatarIntersectionResultFromScriptValue(const QScriptValue& object, RayToAvatarIntersectionResult& results);
Q_DECLARE_METATYPE(AvatarEntityMap)
QScriptValue AvatarEntityMapToScriptValue(QScriptEngine* engine, const AvatarEntityMap& value);
void AvatarEntityMapFromScriptValue(const QScriptValue& object, AvatarEntityMap& value);
// faux joint indexes (-1 means invalid)
const int SENSOR_TO_WORLD_MATRIX_INDEX = 65534; // -2
const int CONTROLLER_RIGHTHAND_INDEX = 65533; // -3

View file

@ -190,3 +190,4 @@ void AvatarHashMap::sessionUUIDChanged(const QUuid& sessionUUID, const QUuid& ol
_lastOwnerSessionUUID = oldUUID;
emit avatarSessionChangedEvent(sessionUUID, oldUUID);
}

View file

@ -27,7 +27,6 @@
#include "AvatarData.h"
class AvatarHashMap : public QObject, public Dependency {
Q_OBJECT
SINGLETON_DEPENDENCY

View file

@ -112,7 +112,8 @@ public:
ReloadEntityServerScript,
EntityPhysics,
EntityServerScriptLog,
LAST_PACKET_TYPE = EntityServerScriptLog
AdjustAvatarSorting,
LAST_PACKET_TYPE = AdjustAvatarSorting
};
};

View file

@ -56,6 +56,11 @@ bool RecordingScriptingInterface::loadRecording(const QString& url) {
using namespace recording;
auto loader = ClipCache::instance().getClipLoader(url);
if (!loader) {
qWarning() << "Clip failed to load from " << url;
return false;
}
if (!loader->isLoaded()) {
QEventLoop loop;
QObject::connect(loader.data(), &Resource::loaded, &loop, &QEventLoop::quit);

View file

@ -576,6 +576,7 @@ void ScriptEngine::init() {
qScriptRegisterMetaType(this, EntityItemIDtoScriptValue, EntityItemIDfromScriptValue);
qScriptRegisterMetaType(this, RayToEntityIntersectionResultToScriptValue, RayToEntityIntersectionResultFromScriptValue);
qScriptRegisterMetaType(this, RayToAvatarIntersectionResultToScriptValue, RayToAvatarIntersectionResultFromScriptValue);
qScriptRegisterMetaType(this, AvatarEntityMapToScriptValue, AvatarEntityMapFromScriptValue);
qScriptRegisterSequenceMetaType<QVector<QUuid>>(this);
qScriptRegisterSequenceMetaType<QVector<EntityItemID>>(this);

View file

@ -58,11 +58,14 @@ function updateOverlays() {
// setup a position for the overlay that is just above this avatar's head
var overlayPosition = avatar.getJointPosition("Head");
overlayPosition.y += 1.05;
overlayPosition.y += 1.15;
var rows = 8;
var text = avatarID + "\n"
+"--- Data from Mixer ---\n"
+"All: " + AvatarManager.getAvatarDataRate(avatarID).toFixed(2) + "kbps (" + AvatarManager.getAvatarUpdateRate(avatarID).toFixed(2) + "hz)" + "\n"
/*
+" GP: " + AvatarManager.getAvatarDataRate(avatarID,"globalPosition").toFixed(2) + "kbps (" + AvatarManager.getAvatarUpdateRate(avatarID,"globalPosition").toFixed(2) + "hz)" + "\n"
+" LP: " + AvatarManager.getAvatarDataRate(avatarID,"localPosition").toFixed(2) + "kbps (" + AvatarManager.getAvatarUpdateRate(avatarID,"localPosition").toFixed(2) + "hz)" + "\n"
+" BB: " + AvatarManager.getAvatarDataRate(avatarID,"avatarBoundingBox").toFixed(2) + "kbps (" + AvatarManager.getAvatarUpdateRate(avatarID,"avatarBoundingBox").toFixed(2) + "hz)" + "\n"
@ -74,11 +77,12 @@ function updateOverlays() {
+" AF: " + AvatarManager.getAvatarDataRate(avatarID,"additionalFlags").toFixed(2) + "kbps (" + AvatarManager.getAvatarUpdateRate(avatarID,"additionalFlags").toFixed(2) + "hz)" + "\n"
+" PI: " + AvatarManager.getAvatarDataRate(avatarID,"parentInfo").toFixed(2) + "kbps (" + AvatarManager.getAvatarUpdateRate(avatarID,"parentInfo").toFixed(2) + "hz)" + "\n"
+" FT: " + AvatarManager.getAvatarDataRate(avatarID,"faceTracker").toFixed(2) + "kbps (" + AvatarManager.getAvatarUpdateRate(avatarID,"faceTracker").toFixed(2) + "hz)" + "\n"
*/
+" JD: " + AvatarManager.getAvatarDataRate(avatarID,"jointData").toFixed(2) + "kbps (" + AvatarManager.getAvatarUpdateRate(avatarID,"jointData").toFixed(2) + "hz)" + "\n"
+"--- Simulation ---\n"
+"All: " + AvatarManager.getAvatarSimulationRate(avatarID,"avatar").toFixed(2) + "hz \n"
+" inView: " + AvatarManager.getAvatarSimulationRate(avatarID,"avatarInView").toFixed(2) + "hz \n"
+" SM: " + AvatarManager.getAvatarSimulationRate(avatarID,"skeletonModel").toFixed(2) + "hz \n"
//+" SM: " + AvatarManager.getAvatarSimulationRate(avatarID,"skeletonModel").toFixed(2) + "hz \n"
+" JD: " + AvatarManager.getAvatarSimulationRate(avatarID,"jointData").toFixed(2) + "hz \n"
if (avatarID in debugOverlays) {
@ -93,7 +97,7 @@ function updateOverlays() {
position: overlayPosition,
dimensions: {
x: 1.25,
y: 19 * 0.13
y: rows * 0.13
},
lineHeight: 0.1,
font:{size:0.1},

View file

@ -12,14 +12,14 @@
HIFI_PUBLIC_BUCKET = "http://s3.amazonaws.com/hifi-public/";
Script.include("/~/system/libraries/toolBars.js");
var recordingFile = "recording.rec";
var recordingFile = "recording.hfr";
function setPlayerOptions() {
Recording.setPlayFromCurrentLocation(true);
Recording.setPlayerUseDisplayName(false);
Recording.setPlayerUseAttachments(false);
Recording.setPlayerUseHeadModel(false);
Recording.setPlayerUseSkeletonModel(false);
Recording.setPlayerUseSkeletonModel(true);
}
var windowDimensions = Controller.getViewportDimensions();
@ -142,7 +142,6 @@ function setupTimer() {
backgroundAlpha: 1.0,
visible: true
});
}
function updateTimer() {
@ -272,7 +271,7 @@ function mousePressEvent(event) {
}
} else if (loadIcon === toolBar.clicked(clickedOverlay)) {
if (!Recording.isRecording() && !Recording.isPlaying()) {
recordingFile = Window.browse("Load recorcding from file", ".", "Recordings (*.hfr *.rec *.HFR *.REC)");
recordingFile = Window.browse("Load recording from file", ".", "Recordings (*.hfr *.rec *.HFR *.REC)");
if (!(recordingFile === "null" || recordingFile === null || recordingFile === "")) {
Recording.loadRecording(recordingFile);
}
@ -345,5 +344,3 @@ Script.scriptEnding.connect(scriptEnding);
// Should be called last to put everything into position
moveUI();