Merge branch 'master' of github.com:highfidelity/hifi into tablet-ui-edit-js

This commit is contained in:
Seth Alves 2017-02-22 12:07:31 -08:00
commit a6b4f07674
18 changed files with 1548 additions and 635 deletions

View file

@ -434,8 +434,16 @@ void Agent::executeScript() {
connect(&_avatarAudioTimerThread, &QThread::finished, audioTimerWorker, &QObject::deleteLater);
_avatarAudioTimerThread.start();
// 60Hz timer for avatar
QObject::connect(_scriptEngine.get(), &ScriptEngine::update, this, &Agent::processAgentAvatar);
// Agents should run at 45hz
static const int AVATAR_DATA_HZ = 45;
static const int AVATAR_DATA_IN_MSECS = MSECS_PER_SECOND / AVATAR_DATA_HZ;
QTimer* avatarDataTimer = new QTimer(this);
connect(avatarDataTimer, &QTimer::timeout, this, &Agent::processAgentAvatar);
avatarDataTimer->setSingleShot(false);
avatarDataTimer->setInterval(AVATAR_DATA_IN_MSECS);
avatarDataTimer->setTimerType(Qt::PreciseTimer);
avatarDataTimer->start();
_scriptEngine->run();
Frame::clearFrameHandler(AUDIO_FRAME_TYPE);
@ -538,7 +546,7 @@ void Agent::processAgentAvatar() {
auto scriptedAvatar = DependencyManager::get<ScriptableAvatar>();
AvatarData::AvatarDataDetail dataDetail = (randFloat() < AVATAR_SEND_FULL_UPDATE_RATIO) ? AvatarData::SendAllData : AvatarData::CullSmallData;
QByteArray avatarByteArray = scriptedAvatar->toByteArray(dataDetail, 0, scriptedAvatar->getLastSentJointData());
QByteArray avatarByteArray = scriptedAvatar->toByteArrayStateful(dataDetail);
scriptedAvatar->doneEncoding(true);
static AvatarDataSequenceNumber sequenceNumber = 0;

File diff suppressed because it is too large Load diff

View file

@ -21,6 +21,8 @@
#include <ThreadedAssignment.h>
#include "AvatarMixerClientData.h"
#include "AvatarMixerSlavePool.h"
/// Handles assignments of type AvatarMixer - distribution of avatar data to various clients
class AvatarMixer : public ThreadedAssignment {
Q_OBJECT
@ -36,8 +38,8 @@ public slots:
void sendStatsPacket() override;
private slots:
void queueIncomingPacket(QSharedPointer<ReceivedMessage> message, SharedNodePointer node);
void handleViewFrustumPacket(QSharedPointer<ReceivedMessage> message, SharedNodePointer senderNode);
void handleAvatarDataPacket(QSharedPointer<ReceivedMessage> message, SharedNodePointer senderNode);
void handleAvatarIdentityPacket(QSharedPointer<ReceivedMessage> message, SharedNodePointer senderNode);
void handleKillAvatarPacket(QSharedPointer<ReceivedMessage> message);
void handleNodeIgnoreRequestPacket(QSharedPointer<ReceivedMessage> message, SharedNodePointer senderNode);
@ -45,22 +47,29 @@ private slots:
void handleRequestsDomainListDataPacket(QSharedPointer<ReceivedMessage> message, SharedNodePointer senderNode);
void domainSettingsRequestComplete();
void handlePacketVersionMismatch(PacketType type, const HifiSockAddr& senderSockAddr, const QUuid& senderUUID);
void start();
private:
void broadcastAvatarData();
AvatarMixerClientData* getOrCreateClientData(SharedNodePointer node);
std::chrono::microseconds timeFrame(p_high_resolution_clock::time_point& timestamp);
void throttle(std::chrono::microseconds duration, int frame);
void parseDomainServerSettings(const QJsonObject& domainSettings);
void sendIdentityPacket(AvatarMixerClientData* nodeData, const SharedNodePointer& destinationNode);
QThread _broadcastThread;
void manageDisplayName(const SharedNodePointer& node);
p_high_resolution_clock::time_point _lastFrameTimestamp;
float _trailingSleepRatio { 1.0f };
float _performanceThrottlingRatio { 0.0f };
// FIXME - new throttling - use these values somehow
float _trailingMixRatio { 0.0f };
float _throttlingRatio { 0.0f };
int _sumListeners { 0 };
int _numStatFrames { 0 };
int _numTightLoopFrames { 0 };
int _sumIdentityPackets { 0 };
float _maxKbpsPerNode = 0.0f;
@ -68,11 +77,40 @@ private:
float _domainMinimumScale { MIN_AVATAR_SCALE };
float _domainMaximumScale { MAX_AVATAR_SCALE };
QTimer* _broadcastTimer = nullptr;
RateCounter<> _broadcastRate;
p_high_resolution_clock::time_point _lastDebugMessage;
QHash<QString, QPair<int, int>> _sessionDisplayNames;
quint64 _displayNameManagementElapsedTime { 0 }; // total time spent in broadcastAvatarData/display name management... since last stats window
quint64 _ignoreCalculationElapsedTime { 0 };
quint64 _avatarDataPackingElapsedTime { 0 };
quint64 _packetSendingElapsedTime { 0 };
quint64 _broadcastAvatarDataElapsedTime { 0 }; // total time spent in broadcastAvatarData since last stats window
quint64 _broadcastAvatarDataInner { 0 };
quint64 _broadcastAvatarDataLockWait { 0 };
quint64 _broadcastAvatarDataNodeTransform { 0 };
quint64 _broadcastAvatarDataNodeFunctor { 0 };
quint64 _handleViewFrustumPacketElapsedTime { 0 };
quint64 _handleAvatarIdentityPacketElapsedTime { 0 };
quint64 _handleKillAvatarPacketElapsedTime { 0 };
quint64 _handleNodeIgnoreRequestPacketElapsedTime { 0 };
quint64 _handleRadiusIgnoreRequestPacketElapsedTime { 0 };
quint64 _handleRequestsDomainListDataPacketElapsedTime { 0 };
quint64 _processQueuedAvatarDataPacketsElapsedTime { 0 };
quint64 _processQueuedAvatarDataPacketsLockWaitElapsedTime { 0 };
quint64 _processEventsElapsedTime { 0 };
quint64 _sendStatsElapsedTime { 0 };
quint64 _queueIncomingPacketElapsedTime { 0 };
quint64 _lastStatsTime { usecTimestampNow() };
RateCounter<> _loopRate; // this is the rate that the main thread tight loop runs
AvatarMixerSlavePool _slavePool;
};
#endif // hifi_AvatarMixer_h

View file

@ -16,10 +16,52 @@
#include "AvatarMixerClientData.h"
void AvatarMixerClientData::queuePacket(QSharedPointer<ReceivedMessage> message, SharedNodePointer node) {
if (!_packetQueue.node) {
_packetQueue.node = node;
}
_packetQueue.push(message);
}
int AvatarMixerClientData::processPackets() {
int packetsProcessed = 0;
SharedNodePointer node = _packetQueue.node;
assert(_packetQueue.empty() || node);
_packetQueue.node.clear();
while (!_packetQueue.empty()) {
auto& packet = _packetQueue.front();
packetsProcessed++;
switch (packet->getType()) {
case PacketType::AvatarData:
parseData(*packet);
break;
default:
Q_UNREACHABLE();
}
_packetQueue.pop();
}
assert(_packetQueue.empty());
return packetsProcessed;
}
int AvatarMixerClientData::parseData(ReceivedMessage& message) {
// pull the sequence number from the data first
message.readPrimitive(&_lastReceivedSequenceNumber);
uint16_t sequenceNumber;
message.readPrimitive(&sequenceNumber);
if (sequenceNumber < _lastReceivedSequenceNumber && _lastReceivedSequenceNumber != UINT16_MAX) {
incrementNumOutOfOrderSends();
}
_lastReceivedSequenceNumber = sequenceNumber;
// compute the offset to the data payload
return _avatar->parseDataFromBuffer(message.readWithoutCopy(message.getBytesLeftToRead()));
}

View file

@ -16,6 +16,7 @@
#include <cfloat>
#include <unordered_map>
#include <unordered_set>
#include <queue>
#include <QtCore/QJsonObject>
#include <QtCore/QUrl>
@ -41,6 +42,7 @@ public:
int parseData(ReceivedMessage& message) override;
AvatarData& getAvatar() { return *_avatar; }
const AvatarData* getConstAvatarData() const { return _avatar.get(); }
bool checkAndSetHasReceivedFirstPacketsFrom(const QUuid& uuid);
@ -85,9 +87,9 @@ public:
void loadJSONStats(QJsonObject& jsonObject) const;
glm::vec3 getPosition() { return _avatar ? _avatar->getPosition() : glm::vec3(0); }
glm::vec3 getGlobalBoundingBoxCorner() { return _avatar ? _avatar->getGlobalBoundingBoxCorner() : glm::vec3(0); }
bool isRadiusIgnoring(const QUuid& other) { return _radiusIgnoredOthers.find(other) != _radiusIgnoredOthers.end(); }
glm::vec3 getPosition() const { return _avatar ? _avatar->getPosition() : glm::vec3(0); }
glm::vec3 getGlobalBoundingBoxCorner() const { return _avatar ? _avatar->getGlobalBoundingBoxCorner() : glm::vec3(0); }
bool isRadiusIgnoring(const QUuid& other) const { return _radiusIgnoredOthers.find(other) != _radiusIgnoredOthers.end(); }
void addToRadiusIgnoringSet(const QUuid& other) { _radiusIgnoredOthers.insert(other); }
void removeFromRadiusIgnoringSet(SharedNodePointer self, const QUuid& other);
void ignoreOther(SharedNodePointer self, SharedNodePointer other);
@ -118,9 +120,15 @@ public:
return _lastOtherAvatarSentJoints[otherAvatar];
}
void queuePacket(QSharedPointer<ReceivedMessage> message, SharedNodePointer node);
int processPackets(); // returns number of packets processed
private:
struct PacketQueue : public std::queue<QSharedPointer<ReceivedMessage>> {
QWeakPointer<Node> node;
};
PacketQueue _packetQueue;
AvatarSharedPointer _avatar { new AvatarData() };
uint16_t _lastReceivedSequenceNumber { 0 };

View file

@ -0,0 +1,443 @@
//
// AvatarMixerSlave.cpp
// assignment-client/src/avatar
//
// Created by Brad Hefta-Gaub on 2/14/2017.
// Copyright 2017 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include <algorithm>
#include <random>
#include <glm/glm.hpp>
#include <glm/gtx/norm.hpp>
#include <glm/gtx/vector_angle.hpp>
#include <AvatarLogging.h>
#include <LogHandler.h>
#include <NetworkAccessManager.h>
#include <NodeList.h>
#include <Node.h>
#include <OctreeConstants.h>
#include <udt/PacketHeaders.h>
#include <SharedUtil.h>
#include <StDev.h>
#include <UUID.h>
#include "AvatarMixer.h"
#include "AvatarMixerClientData.h"
#include "AvatarMixerSlave.h"
void AvatarMixerSlave::configure(ConstIter begin, ConstIter end) {
_begin = begin;
_end = end;
}
void AvatarMixerSlave::configureBroadcast(ConstIter begin, ConstIter end,
p_high_resolution_clock::time_point lastFrameTimestamp,
float maxKbpsPerNode, float throttlingRatio) {
_begin = begin;
_end = end;
_lastFrameTimestamp = lastFrameTimestamp;
_maxKbpsPerNode = maxKbpsPerNode;
_throttlingRatio = throttlingRatio;
}
void AvatarMixerSlave::harvestStats(AvatarMixerSlaveStats& stats) {
stats = _stats;
_stats.reset();
}
void AvatarMixerSlave::processIncomingPackets(const SharedNodePointer& node) {
auto start = usecTimestampNow();
auto nodeData = dynamic_cast<AvatarMixerClientData*>(node->getLinkedData());
if (nodeData) {
_stats.nodesProcessed++;
_stats.packetsProcessed += nodeData->processPackets();
}
auto end = usecTimestampNow();
_stats.processIncomingPacketsElapsedTime += (end - start);
}
void AvatarMixerSlave::sendIdentityPacket(const AvatarMixerClientData* nodeData, const SharedNodePointer& destinationNode) {
QByteArray individualData = nodeData->getConstAvatarData()->identityByteArray();
auto identityPacket = NLPacket::create(PacketType::AvatarIdentity, individualData.size());
individualData.replace(0, NUM_BYTES_RFC4122_UUID, nodeData->getNodeID().toRfc4122());
identityPacket->write(individualData);
DependencyManager::get<NodeList>()->sendPacket(std::move(identityPacket), *destinationNode);
_stats.numIdentityPackets++;
}
static const int AVATAR_MIXER_BROADCAST_FRAMES_PER_SECOND = 45;
// only send extra avatar data (avatars out of view, ignored) every Nth AvatarData frame
// Extra avatar data will be sent (AVATAR_MIXER_BROADCAST_FRAMES_PER_SECOND/EXTRA_AVATAR_DATA_FRAME_RATIO) times
// per second.
// This value should be a power of two for performance purposes, as the mixer performs a modulo operation every frame
// to determine whether the extra data should be sent.
static const int EXTRA_AVATAR_DATA_FRAME_RATIO = 16;
// FIXME - There is some old logic (unchanged as of 2/17/17) that randomly decides to send an identity
// packet. That logic had the following comment about the constants it uses...
//
// An 80% chance of sending a identity packet within a 5 second interval.
// assuming 60 htz update rate.
//
// Assuming the calculation of the constant is in fact correct for 80% and 60hz and 5 seconds (an assumption
// that I have not verified) then the constant is definitely wrong now, since we send at 45hz.
const float IDENTITY_SEND_PROBABILITY = 1.0f / 187.0f;
void AvatarMixerSlave::broadcastAvatarData(const SharedNodePointer& node) {
quint64 start = usecTimestampNow();
auto nodeList = DependencyManager::get<NodeList>();
// setup for distributed random floating point values
std::random_device randomDevice;
std::mt19937 generator(randomDevice());
std::uniform_real_distribution<float> distribution;
if (node->getLinkedData() && (node->getType() == NodeType::Agent) && node->getActiveSocket()) {
_stats.nodesBroadcastedTo++;
AvatarMixerClientData* nodeData = reinterpret_cast<AvatarMixerClientData*>(node->getLinkedData());
nodeData->resetInViewStats();
const AvatarData& avatar = nodeData->getAvatar();
glm::vec3 myPosition = avatar.getClientGlobalPosition();
// reset the internal state for correct random number distribution
distribution.reset();
// reset the max distance for this frame
float maxAvatarDistanceThisFrame = 0.0f;
// reset the number of sent avatars
nodeData->resetNumAvatarsSentLastFrame();
// keep a counter of the number of considered avatars
int numOtherAvatars = 0;
// keep track of outbound data rate specifically for avatar data
int numAvatarDataBytes = 0;
// keep track of the number of other avatars held back in this frame
int numAvatarsHeldBack = 0;
// keep track of the number of other avatar frames skipped
int numAvatarsWithSkippedFrames = 0;
// use the data rate specifically for avatar data for FRD adjustment checks
float avatarDataRateLastSecond = nodeData->getOutboundAvatarDataKbps();
// When this is true, the AvatarMixer will send Avatar data to a client about avatars that are not in the view frustrum
bool getsOutOfView = nodeData->getRequestsDomainListData();
// When this is true, the AvatarMixer will send Avatar data to a client about avatars that they've ignored
bool getsIgnoredByMe = getsOutOfView;
// When this is true, the AvatarMixer will send Avatar data to a client about avatars that have ignored them
bool getsAnyIgnored = getsIgnoredByMe && node->getCanKick();
// Check if it is time to adjust what we send this client based on the observed
// bandwidth to this node. We do this once a second, which is also the window for
// the bandwidth reported by node->getOutboundBandwidth();
if (nodeData->getNumFramesSinceFRDAdjustment() > AVATAR_MIXER_BROADCAST_FRAMES_PER_SECOND) {
const float FRD_ADJUSTMENT_ACCEPTABLE_RATIO = 0.8f;
const float HYSTERISIS_GAP = (1 - FRD_ADJUSTMENT_ACCEPTABLE_RATIO);
const float HYSTERISIS_MIDDLE_PERCENTAGE = (1 - (HYSTERISIS_GAP * 0.5f));
// get the current full rate distance so we can work with it
float currentFullRateDistance = nodeData->getFullRateDistance();
if (avatarDataRateLastSecond > _maxKbpsPerNode) {
// is the FRD greater than the farthest avatar?
// if so, before we calculate anything, set it to that distance
currentFullRateDistance = std::min(currentFullRateDistance, nodeData->getMaxAvatarDistance());
// we're adjusting the full rate distance to target a bandwidth in the middle
// of the hysterisis gap
currentFullRateDistance *= (_maxKbpsPerNode * HYSTERISIS_MIDDLE_PERCENTAGE) / avatarDataRateLastSecond;
nodeData->setFullRateDistance(currentFullRateDistance);
nodeData->resetNumFramesSinceFRDAdjustment();
} else if (currentFullRateDistance < nodeData->getMaxAvatarDistance()
&& avatarDataRateLastSecond < _maxKbpsPerNode * FRD_ADJUSTMENT_ACCEPTABLE_RATIO) {
// we are constrained AND we've recovered to below the acceptable ratio
// lets adjust the full rate distance to target a bandwidth in the middle of the hyterisis gap
currentFullRateDistance *= (_maxKbpsPerNode * HYSTERISIS_MIDDLE_PERCENTAGE) / avatarDataRateLastSecond;
nodeData->setFullRateDistance(currentFullRateDistance);
nodeData->resetNumFramesSinceFRDAdjustment();
}
} else {
nodeData->incrementNumFramesSinceFRDAdjustment();
}
// setup a PacketList for the avatarPackets
auto avatarPacketList = NLPacketList::create(PacketType::BulkAvatarData);
// this is an AGENT we have received head data from
// send back a packet with other active node data to this node
std::for_each(_begin, _end, [&](const SharedNodePointer& otherNode) {
bool shouldConsider = false;
quint64 startIgnoreCalculation = usecTimestampNow();
// make sure we have data for this avatar, that it isn't the same node,
// and isn't an avatar that the viewing node has ignored
// or that has ignored the viewing node
if (!otherNode->getLinkedData()
|| otherNode->getUUID() == node->getUUID()
|| (node->isIgnoringNodeWithID(otherNode->getUUID()) && !getsIgnoredByMe)
|| (otherNode->isIgnoringNodeWithID(node->getUUID()) && !getsAnyIgnored)) {
shouldConsider = false;
} else {
const AvatarMixerClientData* otherData = reinterpret_cast<AvatarMixerClientData*>(otherNode->getLinkedData());
shouldConsider = true; // assume we will consider...
// Check to see if the space bubble is enabled
if (node->isIgnoreRadiusEnabled() || otherNode->isIgnoreRadiusEnabled()) {
// Define the minimum bubble size
static const glm::vec3 minBubbleSize = glm::vec3(0.3f, 1.3f, 0.3f);
// Define the scale of the box for the current node
glm::vec3 nodeBoxScale = (nodeData->getPosition() - nodeData->getGlobalBoundingBoxCorner()) * 2.0f;
// Define the scale of the box for the current other node
glm::vec3 otherNodeBoxScale = (otherData->getPosition() - otherData->getGlobalBoundingBoxCorner()) * 2.0f;
// Set up the bounding box for the current node
AABox nodeBox(nodeData->getGlobalBoundingBoxCorner(), nodeBoxScale);
// Clamp the size of the bounding box to a minimum scale
if (glm::any(glm::lessThan(nodeBoxScale, minBubbleSize))) {
nodeBox.setScaleStayCentered(minBubbleSize);
}
// Set up the bounding box for the current other node
AABox otherNodeBox(otherData->getGlobalBoundingBoxCorner(), otherNodeBoxScale);
// Clamp the size of the bounding box to a minimum scale
if (glm::any(glm::lessThan(otherNodeBoxScale, minBubbleSize))) {
otherNodeBox.setScaleStayCentered(minBubbleSize);
}
// Quadruple the scale of both bounding boxes
nodeBox.embiggen(4.0f);
otherNodeBox.embiggen(4.0f);
// Perform the collision check between the two bounding boxes
if (nodeBox.touches(otherNodeBox)) {
nodeData->ignoreOther(node, otherNode);
shouldConsider = getsAnyIgnored;
}
}
// Not close enough to ignore
if (shouldConsider) {
nodeData->removeFromRadiusIgnoringSet(node, otherNode->getUUID());
}
quint64 endIgnoreCalculation = usecTimestampNow();
_stats.ignoreCalculationElapsedTime += (endIgnoreCalculation - startIgnoreCalculation);
}
if (shouldConsider) {
quint64 startAvatarDataPacking = usecTimestampNow();
++numOtherAvatars;
const AvatarMixerClientData* otherNodeData = reinterpret_cast<const AvatarMixerClientData*>(otherNode->getLinkedData());
// make sure we send out identity packets to and from new arrivals.
bool forceSend = !nodeData->checkAndSetHasReceivedFirstPacketsFrom(otherNode->getUUID());
// FIXME - this clause seems suspicious "... || otherNodeData->getIdentityChangeTimestamp() > _lastFrameTimestamp ..."
if (otherNodeData->getIdentityChangeTimestamp().time_since_epoch().count() > 0
&& (forceSend
|| otherNodeData->getIdentityChangeTimestamp() > _lastFrameTimestamp
|| distribution(generator) < IDENTITY_SEND_PROBABILITY)) {
sendIdentityPacket(otherNodeData, node);
}
const AvatarData* otherAvatar = otherNodeData->getConstAvatarData();
// Decide whether to send this avatar's data based on it's distance from us
// The full rate distance is the distance at which EVERY update will be sent for this avatar
// at twice the full rate distance, there will be a 50% chance of sending this avatar's update
glm::vec3 otherPosition = otherAvatar->getClientGlobalPosition();
float distanceToAvatar = glm::length(myPosition - otherPosition);
// potentially update the max full rate distance for this frame
maxAvatarDistanceThisFrame = std::max(maxAvatarDistanceThisFrame, distanceToAvatar);
// This code handles the random dropping of avatar data based on the ratio of
// "getFullRateDistance" to actual distance.
//
// NOTE: If the recieving node is in "PAL mode" then it's asked to get things even that
// are out of view, this also appears to disable this random distribution.
if (distanceToAvatar != 0.0f
&& !getsOutOfView
&& distribution(generator) > (nodeData->getFullRateDistance() / distanceToAvatar)) {
quint64 endAvatarDataPacking = usecTimestampNow();
_stats.avatarDataPackingElapsedTime += (endAvatarDataPacking - startAvatarDataPacking);
shouldConsider = false;
}
if (shouldConsider) {
AvatarDataSequenceNumber lastSeqToReceiver = nodeData->getLastBroadcastSequenceNumber(otherNode->getUUID());
AvatarDataSequenceNumber lastSeqFromSender = otherNodeData->getLastReceivedSequenceNumber();
// FIXME - This code does appear to be working. But it seems brittle.
// It supports determining if the frame of data for this "other"
// avatar has already been sent to the reciever. This has been
// verified to work on a desktop display that renders at 60hz and
// therefore sends to mixer at 30hz. Each second you'd expect to
// have 15 (45hz-30hz) duplicate frames. In this case, the stat
// avg_other_av_skips_per_second does report 15.
//
// make sure we haven't already sent this data from this sender to this receiver
// or that somehow we haven't sent
if (lastSeqToReceiver == lastSeqFromSender && lastSeqToReceiver != 0) {
++numAvatarsHeldBack;
quint64 endAvatarDataPacking = usecTimestampNow();
_stats.avatarDataPackingElapsedTime += (endAvatarDataPacking - startAvatarDataPacking);
shouldConsider = false;
} else if (lastSeqFromSender - lastSeqToReceiver > 1) {
// this is a skip - we still send the packet but capture the presence of the skip so we see it happening
++numAvatarsWithSkippedFrames;
}
// we're going to send this avatar
if (shouldConsider) {
// determine if avatar is in view, to determine how much data to include...
glm::vec3 otherNodeBoxScale = (otherPosition - otherNodeData->getGlobalBoundingBoxCorner()) * 2.0f;
AABox otherNodeBox(otherNodeData->getGlobalBoundingBoxCorner(), otherNodeBoxScale);
bool isInView = nodeData->otherAvatarInView(otherNodeBox);
// this throttles the extra data to only be sent every Nth message
if (!isInView && !getsOutOfView && (lastSeqToReceiver % EXTRA_AVATAR_DATA_FRAME_RATIO > 0)) {
quint64 endAvatarDataPacking = usecTimestampNow();
_stats.avatarDataPackingElapsedTime += (endAvatarDataPacking - startAvatarDataPacking);
shouldConsider = false;
}
if (shouldConsider) {
// start a new segment in the PacketList for this avatar
avatarPacketList->startSegment();
AvatarData::AvatarDataDetail detail;
if (!isInView && !getsOutOfView) {
detail = AvatarData::MinimumData;
nodeData->incrementAvatarOutOfView();
} else {
detail = distribution(generator) < AVATAR_SEND_FULL_UPDATE_RATIO
? AvatarData::SendAllData : AvatarData::CullSmallData;
nodeData->incrementAvatarInView();
}
{
bool includeThisAvatar = true;
auto lastEncodeForOther = nodeData->getLastOtherAvatarEncodeTime(otherNode->getUUID());
QVector<JointData>& lastSentJointsForOther = nodeData->getLastOtherAvatarSentJoints(otherNode->getUUID());
bool distanceAdjust = true;
glm::vec3 viewerPosition = myPosition;
AvatarDataPacket::HasFlags hasFlagsOut; // the result of the toByteArray
bool dropFaceTracking = false;
quint64 start = usecTimestampNow();
QByteArray bytes = otherAvatar->toByteArray(detail, lastEncodeForOther, lastSentJointsForOther,
hasFlagsOut, dropFaceTracking, distanceAdjust, viewerPosition, &lastSentJointsForOther);
quint64 end = usecTimestampNow();
_stats.toByteArrayElapsedTime += (end - start);
static const int MAX_ALLOWED_AVATAR_DATA = (1400 - NUM_BYTES_RFC4122_UUID);
if (bytes.size() > MAX_ALLOWED_AVATAR_DATA) {
qCWarning(avatars) << "otherAvatar.toByteArray() resulted in very large buffer:" << bytes.size() << "... attempt to drop facial data";
dropFaceTracking = true; // first try dropping the facial data
bytes = otherAvatar->toByteArray(detail, lastEncodeForOther, lastSentJointsForOther,
hasFlagsOut, dropFaceTracking, distanceAdjust, viewerPosition, &lastSentJointsForOther);
if (bytes.size() > MAX_ALLOWED_AVATAR_DATA) {
qCWarning(avatars) << "otherAvatar.toByteArray() without facial data resulted in very large buffer:" << bytes.size() << "... reduce to MinimumData";
bytes = otherAvatar->toByteArray(AvatarData::MinimumData, lastEncodeForOther, lastSentJointsForOther,
hasFlagsOut, dropFaceTracking, distanceAdjust, viewerPosition, &lastSentJointsForOther);
}
if (bytes.size() > MAX_ALLOWED_AVATAR_DATA) {
qCWarning(avatars) << "otherAvatar.toByteArray() MinimumData resulted in very large buffer:" << bytes.size() << "... FAIL!!";
includeThisAvatar = false;
}
}
if (includeThisAvatar) {
numAvatarDataBytes += avatarPacketList->write(otherNode->getUUID().toRfc4122());
numAvatarDataBytes += avatarPacketList->write(bytes);
_stats.numOthersIncluded++;
// increment the number of avatars sent to this reciever
nodeData->incrementNumAvatarsSentLastFrame();
// set the last sent sequence number for this sender on the receiver
nodeData->setLastBroadcastSequenceNumber(otherNode->getUUID(),
otherNodeData->getLastReceivedSequenceNumber());
}
}
avatarPacketList->endSegment();
quint64 endAvatarDataPacking = usecTimestampNow();
_stats.avatarDataPackingElapsedTime += (endAvatarDataPacking - startAvatarDataPacking);
}
}
}
}
});
quint64 startPacketSending = usecTimestampNow();
// close the current packet so that we're always sending something
avatarPacketList->closeCurrentPacket(true);
_stats.numPacketsSent += (int)avatarPacketList->getNumPackets();
_stats.numBytesSent += numAvatarDataBytes;
// send the avatar data PacketList
nodeList->sendPacketList(std::move(avatarPacketList), *node);
// record the bytes sent for other avatar data in the AvatarMixerClientData
nodeData->recordSentAvatarData(numAvatarDataBytes);
// record the number of avatars held back this frame
nodeData->recordNumOtherAvatarStarves(numAvatarsHeldBack);
nodeData->recordNumOtherAvatarSkips(numAvatarsWithSkippedFrames);
if (numOtherAvatars == 0) {
// update the full rate distance to FLOAT_MAX since we didn't have any other avatars to send
nodeData->setMaxAvatarDistance(FLT_MAX);
} else {
nodeData->setMaxAvatarDistance(maxAvatarDistanceThisFrame);
}
quint64 endPacketSending = usecTimestampNow();
_stats.packetSendingElapsedTime += (endPacketSending - startPacketSending);
}
quint64 end = usecTimestampNow();
_stats.jobElapsedTime += (end - start);
}

View file

@ -0,0 +1,101 @@
//
// AvatarMixerSlave.h
// assignment-client/src/avatar
//
// Created by Brad Hefta-Gaub on 2/14/2017.
// Copyright 2017 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#ifndef hifi_AvatarMixerSlave_h
#define hifi_AvatarMixerSlave_h
class AvatarMixerClientData;
class AvatarMixerSlaveStats {
public:
int nodesProcessed { 0 };
int packetsProcessed { 0 };
quint64 processIncomingPacketsElapsedTime { 0 };
int nodesBroadcastedTo { 0 };
int numPacketsSent { 0 };
int numBytesSent { 0 };
int numIdentityPackets { 0 };
int numOthersIncluded { 0 };
quint64 ignoreCalculationElapsedTime { 0 };
quint64 avatarDataPackingElapsedTime { 0 };
quint64 packetSendingElapsedTime { 0 };
quint64 toByteArrayElapsedTime { 0 };
quint64 jobElapsedTime { 0 };
void reset() {
// receiving job stats
nodesProcessed = 0;
packetsProcessed = 0;
processIncomingPacketsElapsedTime = 0;
// sending job stats
nodesBroadcastedTo = 0;
numPacketsSent = 0;
numBytesSent = 0;
numIdentityPackets = 0;
numOthersIncluded = 0;
ignoreCalculationElapsedTime = 0;
avatarDataPackingElapsedTime = 0;
packetSendingElapsedTime = 0;
toByteArrayElapsedTime = 0;
jobElapsedTime = 0;
}
AvatarMixerSlaveStats& operator+=(const AvatarMixerSlaveStats& rhs) {
nodesProcessed += rhs.nodesProcessed;
packetsProcessed += rhs.packetsProcessed;
processIncomingPacketsElapsedTime += rhs.processIncomingPacketsElapsedTime;
nodesBroadcastedTo += rhs.nodesBroadcastedTo;
numPacketsSent += rhs.numPacketsSent;
numBytesSent += rhs.numBytesSent;
numIdentityPackets += rhs.numIdentityPackets;
numOthersIncluded += rhs.numOthersIncluded;
ignoreCalculationElapsedTime += rhs.ignoreCalculationElapsedTime;
avatarDataPackingElapsedTime += rhs.avatarDataPackingElapsedTime;
packetSendingElapsedTime += rhs.packetSendingElapsedTime;
toByteArrayElapsedTime += rhs.toByteArrayElapsedTime;
jobElapsedTime += rhs.jobElapsedTime;
return *this;
}
};
class AvatarMixerSlave {
public:
using ConstIter = NodeList::const_iterator;
void configure(ConstIter begin, ConstIter end);
void configureBroadcast(ConstIter begin, ConstIter end,
p_high_resolution_clock::time_point lastFrameTimestamp,
float maxKbpsPerNode, float throttlingRatio);
void processIncomingPackets(const SharedNodePointer& node);
void broadcastAvatarData(const SharedNodePointer& node);
void harvestStats(AvatarMixerSlaveStats& stats);
private:
void sendIdentityPacket(const AvatarMixerClientData* nodeData, const SharedNodePointer& destinationNode);
// frame state
ConstIter _begin;
ConstIter _end;
p_high_resolution_clock::time_point _lastFrameTimestamp;
float _maxKbpsPerNode { 0.0f };
float _throttlingRatio { 0.0f };
AvatarMixerSlaveStats _stats;
};
#endif // hifi_AvatarMixerSlave_h

View file

@ -0,0 +1,208 @@
//
// AvatarMixerSlavePool.cpp
// assignment-client/src/avatar
//
// Created by Brad Hefta-Gaub on 2/14/2017.
// Copyright 2017 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include <assert.h>
#include <algorithm>
#include "AvatarMixerSlavePool.h"
void AvatarMixerSlaveThread::run() {
while (true) {
wait();
// iterate over all available nodes
SharedNodePointer node;
while (try_pop(node)) {
(this->*_function)(node);
}
bool stopping = _stop;
notify(stopping);
if (stopping) {
return;
}
}
}
void AvatarMixerSlaveThread::wait() {
{
Lock lock(_pool._mutex);
_pool._slaveCondition.wait(lock, [&] {
assert(_pool._numStarted <= _pool._numThreads);
return _pool._numStarted != _pool._numThreads;
});
++_pool._numStarted;
}
if (_pool._configure) {
_pool._configure(*this);
}
_function = _pool._function;
}
void AvatarMixerSlaveThread::notify(bool stopping) {
{
Lock lock(_pool._mutex);
assert(_pool._numFinished < _pool._numThreads);
++_pool._numFinished;
if (stopping) {
++_pool._numStopped;
}
}
_pool._poolCondition.notify_one();
}
bool AvatarMixerSlaveThread::try_pop(SharedNodePointer& node) {
return _pool._queue.try_pop(node);
}
#ifdef AVATAR_SINGLE_THREADED
static AvatarMixerSlave slave;
#endif
void AvatarMixerSlavePool::processIncomingPackets(ConstIter begin, ConstIter end) {
_function = &AvatarMixerSlave::processIncomingPackets;
_configure = [&](AvatarMixerSlave& slave) {
slave.configure(begin, end);
};
run(begin, end);
}
void AvatarMixerSlavePool::broadcastAvatarData(ConstIter begin, ConstIter end,
p_high_resolution_clock::time_point lastFrameTimestamp,
float maxKbpsPerNode, float throttlingRatio) {
_function = &AvatarMixerSlave::broadcastAvatarData;
_configure = [&](AvatarMixerSlave& slave) {
slave.configureBroadcast(begin, end, lastFrameTimestamp, maxKbpsPerNode, throttlingRatio);
};
run(begin, end);
}
void AvatarMixerSlavePool::run(ConstIter begin, ConstIter end) {
_begin = begin;
_end = end;
#ifdef AUDIO_SINGLE_THREADED
_configure(slave);
std::for_each(begin, end, [&](const SharedNodePointer& node) {
_function(slave, node);
});
#else
// fill the queue
std::for_each(_begin, _end, [&](const SharedNodePointer& node) {
_queue.emplace(node);
});
{
Lock lock(_mutex);
// run
_numStarted = _numFinished = 0;
_slaveCondition.notify_all();
// wait
_poolCondition.wait(lock, [&] {
assert(_numFinished <= _numThreads);
return _numFinished == _numThreads;
});
assert(_numStarted == _numThreads);
}
assert(_queue.empty());
#endif
}
void AvatarMixerSlavePool::each(std::function<void(AvatarMixerSlave& slave)> functor) {
#ifdef AVATAR_SINGLE_THREADED
functor(slave);
#else
for (auto& slave : _slaves) {
functor(*slave.get());
}
#endif
}
void AvatarMixerSlavePool::setNumThreads(int numThreads) {
// clamp to allowed size
{
int maxThreads = QThread::idealThreadCount();
if (maxThreads == -1) {
// idealThreadCount returns -1 if cores cannot be detected
static const int MAX_THREADS_IF_UNKNOWN = 4;
maxThreads = MAX_THREADS_IF_UNKNOWN;
}
int clampedThreads = std::min(std::max(1, numThreads), maxThreads);
if (clampedThreads != numThreads) {
qWarning("%s: clamped to %d (was %d)", __FUNCTION__, clampedThreads, numThreads);
numThreads = clampedThreads;
}
}
resize(numThreads);
}
void AvatarMixerSlavePool::resize(int numThreads) {
assert(_numThreads == (int)_slaves.size());
#ifdef AVATAR_SINGLE_THREADED
qDebug("%s: running single threaded", __FUNCTION__, numThreads);
#else
qDebug("%s: set %d threads (was %d)", __FUNCTION__, numThreads, _numThreads);
Lock lock(_mutex);
if (numThreads > _numThreads) {
// start new slaves
for (int i = 0; i < numThreads - _numThreads; ++i) {
auto slave = new AvatarMixerSlaveThread(*this);
slave->start();
_slaves.emplace_back(slave);
}
} else if (numThreads < _numThreads) {
auto extraBegin = _slaves.begin() + numThreads;
// mark slaves to stop...
auto slave = extraBegin;
while (slave != _slaves.end()) {
(*slave)->_stop = true;
++slave;
}
// ...cycle them until they do stop...
_numStopped = 0;
while (_numStopped != (_numThreads - numThreads)) {
_numStarted = _numFinished = _numStopped;
_slaveCondition.notify_all();
_poolCondition.wait(lock, [&] {
assert(_numFinished <= _numThreads);
return _numFinished == _numThreads;
});
}
// ...wait for threads to finish...
slave = extraBegin;
while (slave != _slaves.end()) {
QThread* thread = reinterpret_cast<QThread*>(slave->get());
static const int MAX_THREAD_WAIT_TIME = 10;
thread->wait(MAX_THREAD_WAIT_TIME);
++slave;
}
// ...and erase them
_slaves.erase(extraBegin, _slaves.end());
}
_numThreads = _numStarted = _numFinished = numThreads;
assert(_numThreads == (int)_slaves.size());
#endif
}

View file

@ -0,0 +1,104 @@
//
// AvatarMixerSlavePool.h
// assignment-client/src/avatar
//
// Created by Brad Hefta-Gaub on 2/14/2017.
// Copyright 2017 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#ifndef hifi_AvatarMixerSlavePool_h
#define hifi_AvatarMixerSlavePool_h
#include <condition_variable>
#include <mutex>
#include <vector>
#include <tbb/concurrent_queue.h>
#include <QThread>
#include <NodeList.h>
#include "AvatarMixerSlave.h"
class AvatarMixerSlavePool;
class AvatarMixerSlaveThread : public QThread, public AvatarMixerSlave {
Q_OBJECT
using ConstIter = NodeList::const_iterator;
using Mutex = std::mutex;
using Lock = std::unique_lock<Mutex>;
public:
AvatarMixerSlaveThread(AvatarMixerSlavePool& pool) : _pool(pool) {}
void run() override final;
private:
friend class AvatarMixerSlavePool;
void wait();
void notify(bool stopping);
bool try_pop(SharedNodePointer& node);
AvatarMixerSlavePool& _pool;
void (AvatarMixerSlave::*_function)(const SharedNodePointer& node) { nullptr };
bool _stop { false };
};
// Slave pool for audio mixers
// AvatarMixerSlavePool is not thread-safe! It should be instantiated and used from a single thread.
class AvatarMixerSlavePool {
using Queue = tbb::concurrent_queue<SharedNodePointer>;
using Mutex = std::mutex;
using Lock = std::unique_lock<Mutex>;
using ConditionVariable = std::condition_variable;
public:
using ConstIter = NodeList::const_iterator;
AvatarMixerSlavePool(int numThreads = QThread::idealThreadCount()) { setNumThreads(numThreads); }
~AvatarMixerSlavePool() { resize(0); }
// Jobs the slave pool can do...
void processIncomingPackets(ConstIter begin, ConstIter end);
void broadcastAvatarData(ConstIter begin, ConstIter end,
p_high_resolution_clock::time_point lastFrameTimestamp, float maxKbpsPerNode, float throttlingRatio);
// iterate over all slaves
void each(std::function<void(AvatarMixerSlave& slave)> functor);
void setNumThreads(int numThreads);
int numThreads() { return _numThreads; }
private:
void run(ConstIter begin, ConstIter end);
void resize(int numThreads);
std::vector<std::unique_ptr<AvatarMixerSlaveThread>> _slaves;
friend void AvatarMixerSlaveThread::wait();
friend void AvatarMixerSlaveThread::notify(bool stopping);
friend bool AvatarMixerSlaveThread::try_pop(SharedNodePointer& node);
// synchronization state
Mutex _mutex;
ConditionVariable _slaveCondition;
ConditionVariable _poolCondition;
void (AvatarMixerSlave::*_function)(const SharedNodePointer& node);
std::function<void(AvatarMixerSlave&)> _configure;
int _numThreads { 0 };
int _numStarted { 0 }; // guarded by _mutex
int _numFinished { 0 }; // guarded by _mutex
int _numStopped { 0 }; // guarded by _mutex
// frame state
Queue _queue;
ConstIter _begin;
ConstIter _end;
};
#endif // hifi_AvatarMixerSlavePool_h

View file

@ -14,10 +14,9 @@
#include <GLMHelpers.h>
#include "ScriptableAvatar.h"
QByteArray ScriptableAvatar::toByteArray(AvatarDataDetail dataDetail, quint64 lastSentTime, const QVector<JointData>& lastSentJointData,
bool distanceAdjust, glm::vec3 viewerPosition, QVector<JointData>* sentJointDataOut) {
QByteArray ScriptableAvatar::toByteArrayStateful(AvatarDataDetail dataDetail) {
_globalPosition = getPosition();
return AvatarData::toByteArray(dataDetail, lastSentTime, lastSentJointData, distanceAdjust, viewerPosition, sentJointDataOut);
return AvatarData::toByteArrayStateful(dataDetail);
}

View file

@ -28,8 +28,7 @@ public:
Q_INVOKABLE AnimationDetails getAnimationDetails();
virtual void setSkeletonModelURL(const QUrl& skeletonModelURL) override;
virtual QByteArray toByteArray(AvatarDataDetail dataDetail, quint64 lastSentTime, const QVector<JointData>& lastSentJointData,
bool distanceAdjust = false, glm::vec3 viewerPosition = glm::vec3(0), QVector<JointData>* sentJointDataOut = nullptr) override;
virtual QByteArray toByteArrayStateful(AvatarDataDetail dataDetail) override;
private slots:

View file

@ -1299,6 +1299,22 @@
"placeholder": 5.0,
"default": 5.0,
"advanced": true
},
{
"name": "auto_threads",
"label": "Automatically determine thread count",
"type": "checkbox",
"help": "Allow system to determine number of threads (recommended)",
"default": false,
"advanced": true
},
{
"name": "num_threads",
"label": "Number of Threads",
"help": "Threads to spin up for avatar mixing (if not automatically set)",
"placeholder": "1",
"default": "1",
"advanced": true
}
]
}

View file

@ -227,8 +227,7 @@ void MyAvatar::simulateAttachments(float deltaTime) {
// don't update attachments here, do it in harvestResultsFromPhysicsSimulation()
}
QByteArray MyAvatar::toByteArray(AvatarDataDetail dataDetail, quint64 lastSentTime, const QVector<JointData>& lastSentJointData,
bool distanceAdjust, glm::vec3 viewerPosition, QVector<JointData>* sentJointDataOut) {
QByteArray MyAvatar::toByteArrayStateful(AvatarDataDetail dataDetail) {
CameraMode mode = qApp->getCamera()->getMode();
_globalPosition = getPosition();
_globalBoundingBoxDimensions.x = _characterController.getCapsuleRadius();
@ -239,12 +238,12 @@ QByteArray MyAvatar::toByteArray(AvatarDataDetail dataDetail, quint64 lastSentTi
// fake the avatar position that is sent up to the AvatarMixer
glm::vec3 oldPosition = getPosition();
setPosition(getSkeletonPosition());
QByteArray array = AvatarData::toByteArray(dataDetail, lastSentTime, lastSentJointData, distanceAdjust, viewerPosition, sentJointDataOut);
QByteArray array = AvatarData::toByteArrayStateful(dataDetail);
// copy the correct position back
setPosition(oldPosition);
return array;
}
return AvatarData::toByteArray(dataDetail, lastSentTime, lastSentJointData, distanceAdjust, viewerPosition, sentJointDataOut);
return AvatarData::toByteArrayStateful(dataDetail);
}
void MyAvatar::centerBody() {

View file

@ -338,8 +338,7 @@ private:
glm::quat getWorldBodyOrientation() const;
virtual QByteArray toByteArray(AvatarDataDetail dataDetail, quint64 lastSentTime, const QVector<JointData>& lastSentJointData,
bool distanceAdjust = false, glm::vec3 viewerPosition = glm::vec3(0), QVector<JointData>* sentJointDataOut = nullptr) override;
virtual QByteArray toByteArrayStateful(AvatarDataDetail dataDetail) override;
void simulate(float deltaTime);
void updateFromTrackers(float deltaTime);

View file

@ -120,10 +120,6 @@ void AvatarData::nextAttitude(glm::vec3 position, glm::quat orientation) {
updateAttitude();
}
float AvatarData::getTargetScale() const {
return _targetScale;
}
void AvatarData::setTargetScale(float targetScale) {
auto newValue = glm::clamp(targetScale, MIN_AVATAR_SCALE, MAX_AVATAR_SCALE);
if (_targetScale != newValue) {
@ -141,10 +137,10 @@ void AvatarData::setHandPosition(const glm::vec3& handPosition) {
_handPosition = glm::inverse(getOrientation()) * (handPosition - getPosition());
}
void AvatarData::lazyInitHeadData() {
void AvatarData::lazyInitHeadData() const {
// lazily allocate memory for HeadData in case we're not an Avatar instance
if (!_headData) {
_headData = new HeadData(this);
_headData = new HeadData(const_cast<AvatarData*>(this));
}
if (_forceFaceTrackerConnected) {
_headData->_isFaceTrackerConnected = true;
@ -152,39 +148,7 @@ void AvatarData::lazyInitHeadData() {
}
bool AvatarData::avatarBoundingBoxChangedSince(quint64 time) {
return _avatarBoundingBoxChanged >= time;
}
bool AvatarData::avatarScaleChangedSince(quint64 time) {
return _avatarScaleChanged >= time;
}
bool AvatarData::lookAtPositionChangedSince(quint64 time) {
return _headData->lookAtPositionChangedSince(time);
}
bool AvatarData::audioLoudnessChangedSince(quint64 time) {
return _headData->audioLoudnessChangedSince(time);
}
bool AvatarData::sensorToWorldMatrixChangedSince(quint64 time) {
return _sensorToWorldMatrixChanged >= time;
}
bool AvatarData::additionalFlagsChangedSince(quint64 time) {
return _additionalFlagsChanged >= time;
}
bool AvatarData::parentInfoChangedSince(quint64 time) {
return _parentChanged >= time;
}
bool AvatarData::faceTrackerInfoChangedSince(quint64 time) {
return true; // FIXME!
}
float AvatarData::getDistanceBasedMinRotationDOT(glm::vec3 viewerPosition) {
float AvatarData::getDistanceBasedMinRotationDOT(glm::vec3 viewerPosition) const {
auto distance = glm::distance(_globalPosition, viewerPosition);
float result = ROTATION_CHANGE_179D; // assume worst
if (distance < AVATAR_DISTANCE_LEVEL_1) {
@ -199,20 +163,24 @@ float AvatarData::getDistanceBasedMinRotationDOT(glm::vec3 viewerPosition) {
return result;
}
float AvatarData::getDistanceBasedMinTranslationDistance(glm::vec3 viewerPosition) {
float AvatarData::getDistanceBasedMinTranslationDistance(glm::vec3 viewerPosition) const {
return AVATAR_MIN_TRANSLATION; // Eventually make this distance sensitive as well
}
QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSentTime, const QVector<JointData>& lastSentJointData,
bool distanceAdjust, glm::vec3 viewerPosition, QVector<JointData>* sentJointDataOut) {
// we want to track outbound data in this case...
QByteArray AvatarData::toByteArrayStateful(AvatarDataDetail dataDetail) {
AvatarDataPacket::HasFlags hasFlagsOut;
auto lastSentTime = _lastToByteArray;
_lastToByteArray = usecTimestampNow();
return AvatarData::toByteArray(dataDetail, lastSentTime, getLastSentJointData(),
hasFlagsOut, false, false, glm::vec3(0), nullptr,
&_outboundDataRate);
}
// if no timestamp was included, then assume the avatarData is single instance
// and is tracking its own last encoding time.
if (lastSentTime == 0) {
lastSentTime = _lastToByteArray;
_lastToByteArray = usecTimestampNow();
}
QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSentTime, const QVector<JointData>& lastSentJointData,
AvatarDataPacket::HasFlags& hasFlagsOut, bool dropFaceTracking, bool distanceAdjust,
glm::vec3 viewerPosition, QVector<JointData>* sentJointDataOut, AvatarDataRate* outboundDataRateOut) const {
bool cullSmallChanges = (dataDetail == CullSmallData);
bool sendAll = (dataDetail == SendAllData);
@ -259,26 +227,26 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSent
// separately
bool hasParentInfo = sendAll || parentInfoChangedSince(lastSentTime);
bool hasAvatarLocalPosition = hasParent() && (sendAll ||
tranlationChangedSince(lastSentTime) ||
parentInfoChangedSince(lastSentTime));
tranlationChangedSince(lastSentTime) ||
parentInfoChangedSince(lastSentTime));
bool hasFaceTrackerInfo = hasFaceTracker() && (sendAll || faceTrackerInfoChangedSince(lastSentTime));
bool hasFaceTrackerInfo = !dropFaceTracking && hasFaceTracker() && (sendAll || faceTrackerInfoChangedSince(lastSentTime));
bool hasJointData = sendAll || !sendMinimum;
// Leading flags, to indicate how much data is actually included in the packet...
AvatarDataPacket::HasFlags packetStateFlags =
(hasAvatarGlobalPosition ? AvatarDataPacket::PACKET_HAS_AVATAR_GLOBAL_POSITION : 0)
| (hasAvatarBoundingBox ? AvatarDataPacket::PACKET_HAS_AVATAR_BOUNDING_BOX : 0)
| (hasAvatarOrientation ? AvatarDataPacket::PACKET_HAS_AVATAR_ORIENTATION : 0)
| (hasAvatarScale ? AvatarDataPacket::PACKET_HAS_AVATAR_SCALE : 0)
| (hasLookAtPosition ? AvatarDataPacket::PACKET_HAS_LOOK_AT_POSITION : 0)
| (hasAudioLoudness ? AvatarDataPacket::PACKET_HAS_AUDIO_LOUDNESS : 0)
| (hasSensorToWorldMatrix ? AvatarDataPacket::PACKET_HAS_SENSOR_TO_WORLD_MATRIX : 0)
| (hasAdditionalFlags ? AvatarDataPacket::PACKET_HAS_ADDITIONAL_FLAGS : 0)
| (hasParentInfo ? AvatarDataPacket::PACKET_HAS_PARENT_INFO : 0)
| (hasAvatarLocalPosition ? AvatarDataPacket::PACKET_HAS_AVATAR_LOCAL_POSITION : 0)
| (hasFaceTrackerInfo ? AvatarDataPacket::PACKET_HAS_FACE_TRACKER_INFO : 0)
| (hasJointData ? AvatarDataPacket::PACKET_HAS_JOINT_DATA : 0);
(hasAvatarGlobalPosition ? AvatarDataPacket::PACKET_HAS_AVATAR_GLOBAL_POSITION : 0)
| (hasAvatarBoundingBox ? AvatarDataPacket::PACKET_HAS_AVATAR_BOUNDING_BOX : 0)
| (hasAvatarOrientation ? AvatarDataPacket::PACKET_HAS_AVATAR_ORIENTATION : 0)
| (hasAvatarScale ? AvatarDataPacket::PACKET_HAS_AVATAR_SCALE : 0)
| (hasLookAtPosition ? AvatarDataPacket::PACKET_HAS_LOOK_AT_POSITION : 0)
| (hasAudioLoudness ? AvatarDataPacket::PACKET_HAS_AUDIO_LOUDNESS : 0)
| (hasSensorToWorldMatrix ? AvatarDataPacket::PACKET_HAS_SENSOR_TO_WORLD_MATRIX : 0)
| (hasAdditionalFlags ? AvatarDataPacket::PACKET_HAS_ADDITIONAL_FLAGS : 0)
| (hasParentInfo ? AvatarDataPacket::PACKET_HAS_PARENT_INFO : 0)
| (hasAvatarLocalPosition ? AvatarDataPacket::PACKET_HAS_AVATAR_LOCAL_POSITION : 0)
| (hasFaceTrackerInfo ? AvatarDataPacket::PACKET_HAS_FACE_TRACKER_INFO : 0)
| (hasJointData ? AvatarDataPacket::PACKET_HAS_JOINT_DATA : 0);
memcpy(destinationBuffer, &packetStateFlags, sizeof(packetStateFlags));
destinationBuffer += sizeof(packetStateFlags);
@ -293,7 +261,9 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSent
int numBytes = destinationBuffer - startSection;
_globalPositionRateOutbound.increment(numBytes);
if (outboundDataRateOut) {
outboundDataRateOut->globalPositionRate.increment(numBytes);
}
}
if (hasAvatarBoundingBox) {
@ -311,7 +281,9 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSent
destinationBuffer += sizeof(AvatarDataPacket::AvatarBoundingBox);
int numBytes = destinationBuffer - startSection;
_avatarBoundingBoxRateOutbound.increment(numBytes);
if (outboundDataRateOut) {
outboundDataRateOut->avatarBoundingBoxRate.increment(numBytes);
}
}
if (hasAvatarOrientation) {
@ -320,7 +292,9 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSent
destinationBuffer += packOrientationQuatToSixBytes(destinationBuffer, localOrientation);
int numBytes = destinationBuffer - startSection;
_avatarOrientationRateOutbound.increment(numBytes);
if (outboundDataRateOut) {
outboundDataRateOut->avatarOrientationRate.increment(numBytes);
}
}
if (hasAvatarScale) {
@ -331,7 +305,9 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSent
destinationBuffer += sizeof(AvatarDataPacket::AvatarScale);
int numBytes = destinationBuffer - startSection;
_avatarScaleRateOutbound.increment(numBytes);
if (outboundDataRateOut) {
outboundDataRateOut->avatarScaleRate.increment(numBytes);
}
}
if (hasLookAtPosition) {
@ -344,7 +320,9 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSent
destinationBuffer += sizeof(AvatarDataPacket::LookAtPosition);
int numBytes = destinationBuffer - startSection;
_lookAtPositionRateOutbound.increment(numBytes);
if (outboundDataRateOut) {
outboundDataRateOut->lookAtPositionRate.increment(numBytes);
}
}
if (hasAudioLoudness) {
@ -354,7 +332,9 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSent
destinationBuffer += sizeof(AvatarDataPacket::AudioLoudness);
int numBytes = destinationBuffer - startSection;
_audioLoudnessRateOutbound.increment(numBytes);
if (outboundDataRateOut) {
outboundDataRateOut->audioLoudnessRate.increment(numBytes);
}
}
if (hasSensorToWorldMatrix) {
@ -370,7 +350,9 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSent
destinationBuffer += sizeof(AvatarDataPacket::SensorToWorldMatrix);
int numBytes = destinationBuffer - startSection;
_sensorToWorldRateOutbound.increment(numBytes);
if (outboundDataRateOut) {
outboundDataRateOut->sensorToWorldRate.increment(numBytes);
}
}
if (hasAdditionalFlags) {
@ -403,7 +385,9 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSent
destinationBuffer += sizeof(AvatarDataPacket::AdditionalFlags);
int numBytes = destinationBuffer - startSection;
_additionalFlagsRateOutbound.increment(numBytes);
if (outboundDataRateOut) {
outboundDataRateOut->additionalFlagsRate.increment(numBytes);
}
}
if (hasParentInfo) {
@ -415,7 +399,9 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSent
destinationBuffer += sizeof(AvatarDataPacket::ParentInfo);
int numBytes = destinationBuffer - startSection;
_parentInfoRateOutbound.increment(numBytes);
if (outboundDataRateOut) {
outboundDataRateOut->parentInfoRate.increment(numBytes);
}
}
if (hasAvatarLocalPosition) {
@ -428,7 +414,9 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSent
destinationBuffer += sizeof(AvatarDataPacket::AvatarLocalPosition);
int numBytes = destinationBuffer - startSection;
_localPositionRateOutbound.increment(numBytes);
if (outboundDataRateOut) {
outboundDataRateOut->localPositionRate.increment(numBytes);
}
}
// If it is connected, pack up the data
@ -448,7 +436,9 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSent
destinationBuffer += _headData->_blendshapeCoefficients.size() * sizeof(float);
int numBytes = destinationBuffer - startSection;
_faceTrackerRateOutbound.increment(numBytes);
if (outboundDataRateOut) {
outboundDataRateOut->faceTrackerRate.increment(numBytes);
}
}
// If it is connected, pack up the data
@ -540,9 +530,9 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSent
glm::distance(data.translation, lastSentJointData[i].translation) > minTranslation) {
if (data.translationSet) {
validity |= (1 << validityBit);
#ifdef WANT_DEBUG
#ifdef WANT_DEBUG
translationSentCount++;
#endif
#endif
maxTranslationDimension = glm::max(fabsf(data.translation.x), maxTranslationDimension);
maxTranslationDimension = glm::max(fabsf(data.translation.y), maxTranslationDimension);
maxTranslationDimension = glm::max(fabsf(data.translation.z), maxTranslationDimension);
@ -592,24 +582,25 @@ QByteArray AvatarData::toByteArray(AvatarDataDetail dataDetail, quint64 lastSent
#ifdef WANT_DEBUG
if (sendAll) {
qCDebug(avatars) << "AvatarData::toByteArray" << cullSmallChanges << sendAll
<< "rotations:" << rotationSentCount << "translations:" << translationSentCount
<< "largest:" << maxTranslationDimension
<< "size:"
<< (beforeRotations - startPosition) << "+"
<< (beforeTranslations - beforeRotations) << "+"
<< (destinationBuffer - beforeTranslations) << "="
<< (destinationBuffer - startPosition);
<< "rotations:" << rotationSentCount << "translations:" << translationSentCount
<< "largest:" << maxTranslationDimension
<< "size:"
<< (beforeRotations - startPosition) << "+"
<< (beforeTranslations - beforeRotations) << "+"
<< (destinationBuffer - beforeTranslations) << "="
<< (destinationBuffer - startPosition);
}
#endif
int numBytes = destinationBuffer - startSection;
_jointDataRateOutbound.increment(numBytes);
if (outboundDataRateOut) {
outboundDataRateOut->jointDataRate.increment(numBytes);
}
}
int avatarDataSize = destinationBuffer - startPosition;
return avatarDataByteArray.left(avatarDataSize);
}
// NOTE: This is never used in a "distanceAdjust" mode, so it's ok that it doesn't use a variable minimum rotation/translation
void AvatarData::doneEncoding(bool cullSmallChanges) {
// The server has finished sending this version of the joint-data to other nodes. Update _lastSentJointData.
@ -1089,29 +1080,29 @@ float AvatarData::getDataRate(const QString& rateName) const {
} else if (rateName == "jointData") {
return _jointDataRate.rate() / BYTES_PER_KILOBIT;
} else if (rateName == "globalPositionOutbound") {
return _globalPositionRateOutbound.rate() / BYTES_PER_KILOBIT;
return _outboundDataRate.globalPositionRate.rate() / BYTES_PER_KILOBIT;
} else if (rateName == "localPositionOutbound") {
return _localPositionRateOutbound.rate() / BYTES_PER_KILOBIT;
return _outboundDataRate.localPositionRate.rate() / BYTES_PER_KILOBIT;
} else if (rateName == "avatarBoundingBoxOutbound") {
return _avatarBoundingBoxRateOutbound.rate() / BYTES_PER_KILOBIT;
return _outboundDataRate.avatarBoundingBoxRate.rate() / BYTES_PER_KILOBIT;
} else if (rateName == "avatarOrientationOutbound") {
return _avatarOrientationRateOutbound.rate() / BYTES_PER_KILOBIT;
return _outboundDataRate.avatarOrientationRate.rate() / BYTES_PER_KILOBIT;
} else if (rateName == "avatarScaleOutbound") {
return _avatarScaleRateOutbound.rate() / BYTES_PER_KILOBIT;
return _outboundDataRate.avatarScaleRate.rate() / BYTES_PER_KILOBIT;
} else if (rateName == "lookAtPositionOutbound") {
return _lookAtPositionRateOutbound.rate() / BYTES_PER_KILOBIT;
return _outboundDataRate.lookAtPositionRate.rate() / BYTES_PER_KILOBIT;
} else if (rateName == "audioLoudnessOutbound") {
return _audioLoudnessRateOutbound.rate() / BYTES_PER_KILOBIT;
return _outboundDataRate.audioLoudnessRate.rate() / BYTES_PER_KILOBIT;
} else if (rateName == "sensorToWorkMatrixOutbound") {
return _sensorToWorldRateOutbound.rate() / BYTES_PER_KILOBIT;
return _outboundDataRate.sensorToWorldRate.rate() / BYTES_PER_KILOBIT;
} else if (rateName == "additionalFlagsOutbound") {
return _additionalFlagsRateOutbound.rate() / BYTES_PER_KILOBIT;
return _outboundDataRate.additionalFlagsRate.rate() / BYTES_PER_KILOBIT;
} else if (rateName == "parentInfoOutbound") {
return _parentInfoRateOutbound.rate() / BYTES_PER_KILOBIT;
return _outboundDataRate.parentInfoRate.rate() / BYTES_PER_KILOBIT;
} else if (rateName == "faceTrackerOutbound") {
return _faceTrackerRateOutbound.rate() / BYTES_PER_KILOBIT;
return _outboundDataRate.faceTrackerRate.rate() / BYTES_PER_KILOBIT;
} else if (rateName == "jointDataOutbound") {
return _jointDataRateOutbound.rate() / BYTES_PER_KILOBIT;
return _outboundDataRate.jointDataRate.rate() / BYTES_PER_KILOBIT;
}
return 0.0f;
}
@ -1445,7 +1436,7 @@ void AvatarData::parseAvatarIdentityPacket(const QByteArray& data, Identity& ide
}
static const QUrl emptyURL("");
const QUrl& AvatarData::cannonicalSkeletonModelURL(const QUrl& emptyURL) {
QUrl AvatarData::cannonicalSkeletonModelURL(const QUrl& emptyURL) const {
// We don't put file urls on the wire, but instead convert to empty.
return _skeletonModelURL.scheme() == "file" ? emptyURL : _skeletonModelURL;
}
@ -1483,7 +1474,7 @@ void AvatarData::processAvatarIdentity(const Identity& identity, bool& identityC
}
}
QByteArray AvatarData::identityByteArray() {
QByteArray AvatarData::identityByteArray() const {
QByteArray identityData;
QDataStream identityStream(&identityData, QIODevice::Append);
const QUrl& urlToSend = cannonicalSkeletonModelURL(emptyURL);
@ -1646,13 +1637,7 @@ void AvatarData::sendAvatarDataPacket() {
bool cullSmallData = (randFloat() < AVATAR_SEND_FULL_UPDATE_RATIO);
auto dataDetail = cullSmallData ? SendAllData : CullSmallData;
QVector<JointData> lastSentJointData;
{
QReadLocker readLock(&_jointDataLock);
_lastSentJointData.resize(_jointData.size());
lastSentJointData = _lastSentJointData;
}
QByteArray avatarByteArray = toByteArray(dataDetail, 0, lastSentJointData);
QByteArray avatarByteArray = toByteArrayStateful(dataDetail);
doneEncoding(cullSmallData);
static AvatarDataSequenceNumber sequenceNumber = 0;

View file

@ -288,6 +288,23 @@ class AttachmentData;
class Transform;
using TransformPointer = std::shared_ptr<Transform>;
class AvatarDataRate {
public:
RateCounter<> globalPositionRate;
RateCounter<> localPositionRate;
RateCounter<> avatarBoundingBoxRate;
RateCounter<> avatarOrientationRate;
RateCounter<> avatarScaleRate;
RateCounter<> lookAtPositionRate;
RateCounter<> audioLoudnessRate;
RateCounter<> sensorToWorldRate;
RateCounter<> additionalFlagsRate;
RateCounter<> parentInfoRate;
RateCounter<> faceTrackerRate;
RateCounter<> jointDataRate;
};
class AvatarData : public QObject, public SpatiallyNestable {
Q_OBJECT
@ -351,8 +368,11 @@ public:
SendAllData
} AvatarDataDetail;
virtual QByteArray toByteArrayStateful(AvatarDataDetail dataDetail);
virtual QByteArray toByteArray(AvatarDataDetail dataDetail, quint64 lastSentTime, const QVector<JointData>& lastSentJointData,
bool distanceAdjust = false, glm::vec3 viewerPosition = glm::vec3(0), QVector<JointData>* sentJointDataOut = nullptr);
AvatarDataPacket::HasFlags& hasFlagsOut, bool dropFaceTracking, bool distanceAdjust, glm::vec3 viewerPosition,
QVector<JointData>* sentJointDataOut, AvatarDataRate* outboundDataRateOut = nullptr) const;
virtual void doneEncoding(bool cullSmallChanges);
@ -380,7 +400,7 @@ public:
void nextAttitude(glm::vec3 position, glm::quat orientation); // Can be safely called at any time.
virtual void updateAttitude() {} // Tell skeleton mesh about changes
glm::quat getHeadOrientation() {
glm::quat getHeadOrientation() const {
lazyInitHeadData();
return _headData->getOrientation();
}
@ -419,7 +439,6 @@ public:
void setAudioAverageLoudness(float value) { _headData->setAudioAverageLoudness(value); }
// Scale
float getTargetScale() const;
virtual void setTargetScale(float targetScale);
float getDomainLimitedScale() const { return glm::clamp(_targetScale, _domainMinimumScale, _domainMaximumScale); }
@ -494,7 +513,7 @@ public:
// displayNameChanged returns true if displayName has changed, false otherwise.
void processAvatarIdentity(const Identity& identity, bool& identityChanged, bool& displayNameChanged);
QByteArray identityByteArray();
QByteArray identityByteArray() const;
const QUrl& getSkeletonModelURL() const { return _skeletonModelURL; }
const QString& getDisplayName() const { return _displayName; }
@ -534,8 +553,8 @@ public:
QJsonObject toJson() const;
void fromJson(const QJsonObject& json, bool useFrameSkeleton = true);
glm::vec3 getClientGlobalPosition() { return _globalPosition; }
glm::vec3 getGlobalBoundingBoxCorner() { return _globalPosition + _globalBoundingBoxOffset - _globalBoundingBoxDimensions; }
glm::vec3 getClientGlobalPosition() const { return _globalPosition; }
glm::vec3 getGlobalBoundingBoxCorner() const { return _globalPosition + _globalBoundingBoxOffset - _globalBoundingBoxDimensions; }
Q_INVOKABLE AvatarEntityMap getAvatarEntityData() const;
Q_INVOKABLE void setAvatarEntityData(const AvatarEntityMap& avatarEntityData);
@ -550,7 +569,7 @@ public:
Q_INVOKABLE float getDataRate(const QString& rateName = QString("")) const;
Q_INVOKABLE float getUpdateRate(const QString& rateName = QString("")) const;
int getJointCount() { return _jointData.size(); }
int getJointCount() const { return _jointData.size(); }
QVector<JointData> getLastSentJointData() {
QReadLocker readLock(&_jointDataLock);
@ -571,28 +590,27 @@ public slots:
virtual bool setAbsoluteJointRotationInObjectFrame(int index, const glm::quat& rotation) override { return false; }
virtual bool setAbsoluteJointTranslationInObjectFrame(int index, const glm::vec3& translation) override { return false; }
float getTargetScale() { return _targetScale; }
float getTargetScale() const { return _targetScale; } // why is this a slot?
void resetLastSent() { _lastToByteArray = 0; }
protected:
void lazyInitHeadData();
void lazyInitHeadData() const;
float getDistanceBasedMinRotationDOT(glm::vec3 viewerPosition);
float getDistanceBasedMinTranslationDistance(glm::vec3 viewerPosition);
float getDistanceBasedMinRotationDOT(glm::vec3 viewerPosition) const;
float getDistanceBasedMinTranslationDistance(glm::vec3 viewerPosition) const;
bool avatarBoundingBoxChangedSince(quint64 time);
bool avatarScaleChangedSince(quint64 time);
bool lookAtPositionChangedSince(quint64 time);
bool audioLoudnessChangedSince(quint64 time);
bool sensorToWorldMatrixChangedSince(quint64 time);
bool additionalFlagsChangedSince(quint64 time);
bool avatarBoundingBoxChangedSince(quint64 time) const { return _avatarBoundingBoxChanged >= time; }
bool avatarScaleChangedSince(quint64 time) const { return _avatarScaleChanged >= time; }
bool lookAtPositionChangedSince(quint64 time) const { return _headData->lookAtPositionChangedSince(time); }
bool audioLoudnessChangedSince(quint64 time) const { return _headData->audioLoudnessChangedSince(time); }
bool sensorToWorldMatrixChangedSince(quint64 time) const { return _sensorToWorldMatrixChanged >= time; }
bool additionalFlagsChangedSince(quint64 time) const { return _additionalFlagsChanged >= time; }
bool parentInfoChangedSince(quint64 time) const { return _parentChanged >= time; }
bool faceTrackerInfoChangedSince(quint64 time) const { return true; } // FIXME
bool hasParent() { return !getParentID().isNull(); }
bool parentInfoChangedSince(quint64 time);
bool hasFaceTracker() { return _headData ? _headData->_isFaceTrackerConnected : false; }
bool faceTrackerInfoChangedSince(quint64 time);
bool hasParent() const { return !getParentID().isNull(); }
bool hasFaceTracker() const { return _headData ? _headData->_isFaceTrackerConnected : false; }
glm::vec3 _handPosition;
virtual const QString& getSessionDisplayNameForTransport() const { return _sessionDisplayName; }
@ -616,7 +634,7 @@ protected:
bool _forceFaceTrackerConnected;
bool _hasNewJointData { true }; // set in AvatarData, cleared in Avatar
HeadData* _headData { nullptr };
mutable HeadData* _headData { nullptr };
QUrl _skeletonModelURL;
bool _firstSkeletonCheck { true };
@ -624,7 +642,7 @@ protected:
QVector<AttachmentData> _attachmentData;
QString _displayName;
QString _sessionDisplayName { };
const QUrl& cannonicalSkeletonModelURL(const QUrl& empty);
QUrl cannonicalSkeletonModelURL(const QUrl& empty) const;
float _displayNameTargetAlpha;
float _displayNameAlpha;
@ -695,18 +713,7 @@ protected:
RateCounter<> _jointDataUpdateRate;
// Some rate data for outgoing data
RateCounter<> _globalPositionRateOutbound;
RateCounter<> _localPositionRateOutbound;
RateCounter<> _avatarBoundingBoxRateOutbound;
RateCounter<> _avatarOrientationRateOutbound;
RateCounter<> _avatarScaleRateOutbound;
RateCounter<> _lookAtPositionRateOutbound;
RateCounter<> _audioLoudnessRateOutbound;
RateCounter<> _sensorToWorldRateOutbound;
RateCounter<> _additionalFlagsRateOutbound;
RateCounter<> _parentInfoRateOutbound;
RateCounter<> _faceTrackerRateOutbound;
RateCounter<> _jointDataRateOutbound;
AvatarDataRate _outboundDataRate;
glm::vec3 _globalBoundingBoxDimensions;
glm::vec3 _globalBoundingBoxOffset;

View file

@ -34,6 +34,7 @@
#include <tbb/concurrent_unordered_map.h>
#include <DependencyManager.h>
#include <SharedUtil.h>
#include "DomainHandler.h"
#include "Node.h"
@ -182,15 +183,33 @@ public:
// This allows multiple threads (i.e. a thread pool) to share a lock
// without deadlocking when a dying node attempts to acquire a write lock
template<typename NestedNodeLambda>
void nestedEach(NestedNodeLambda functor) {
QReadLocker readLock(&_nodeMutex);
void nestedEach(NestedNodeLambda functor,
int* lockWaitOut = nullptr,
int* nodeTransformOut = nullptr,
int* functorOut = nullptr) {
auto start = usecTimestampNow();
{
QReadLocker readLock(&_nodeMutex);
auto endLock = usecTimestampNow();
if (lockWaitOut) {
*lockWaitOut = (endLock - start);
}
std::vector<SharedNodePointer> nodes(_nodeHash.size());
std::transform(_nodeHash.cbegin(), _nodeHash.cend(), nodes.begin(), [](const NodeHash::value_type& it) {
return it.second;
});
std::vector<SharedNodePointer> nodes(_nodeHash.size());
std::transform(_nodeHash.cbegin(), _nodeHash.cend(), nodes.begin(), [](const NodeHash::value_type& it) {
return it.second;
});
auto endTransform = usecTimestampNow();
if (nodeTransformOut) {
*nodeTransformOut = (endTransform - endLock);
}
functor(nodes.cbegin(), nodes.cend());
functor(nodes.cbegin(), nodes.cend());
auto endFunctor = usecTimestampNow();
if (functorOut) {
*functorOut = (endFunctor - endTransform);
}
}
}
template<typename NodeLambda>

View file

@ -179,9 +179,9 @@ public:
const glm::vec3& localVelocity,
const glm::vec3& localAngularVelocity);
bool scaleChangedSince(quint64 time) { return _scaleChanged > time; }
bool tranlationChangedSince(quint64 time) { return _translationChanged > time; }
bool rotationChangedSince(quint64 time) { return _rotationChanged > time; }
bool scaleChangedSince(quint64 time) const { return _scaleChanged > time; }
bool tranlationChangedSince(quint64 time) const { return _translationChanged > time; }
bool rotationChangedSince(quint64 time) const { return _rotationChanged > time; }
protected:
const NestableType _nestableType; // EntityItem or an AvatarData