mirror of
https://github.com/overte-org/overte.git
synced 2025-04-08 16:32:38 +02:00
Merge pull request #4476 from jherico/avatar
Working on tuning the avatar updates to take into account available per-node bandwidth as well as distance
This commit is contained in:
commit
451aedbb3c
7 changed files with 186 additions and 79 deletions
|
@ -20,9 +20,9 @@
|
|||
#include <PacketHeaders.h>
|
||||
#include <SharedUtil.h>
|
||||
#include <UUID.h>
|
||||
#include <TryLocker.h>
|
||||
|
||||
#include "AvatarMixerClientData.h"
|
||||
|
||||
#include "AvatarMixer.h"
|
||||
|
||||
const QString AVATAR_MIXER_LOGGING_NAME = "avatar-mixer";
|
||||
|
@ -119,12 +119,25 @@ void AvatarMixer::broadcastAvatarData() {
|
|||
|
||||
auto nodeList = DependencyManager::get<NodeList>();
|
||||
|
||||
AvatarMixerClientData* nodeData = NULL;
|
||||
AvatarMixerClientData* otherNodeData = NULL;
|
||||
|
||||
nodeList->eachNode([&](const SharedNodePointer& node) {
|
||||
if (node->getLinkedData() && node->getType() == NodeType::Agent && node->getActiveSocket()
|
||||
&& (nodeData = reinterpret_cast<AvatarMixerClientData*>(node->getLinkedData()))->getMutex().tryLock()) {
|
||||
nodeList->eachMatchingNode(
|
||||
[&](const SharedNodePointer& node)->bool {
|
||||
if (!node->getLinkedData()) {
|
||||
return false;
|
||||
}
|
||||
if (node->getType() != NodeType::Agent) {
|
||||
return false;
|
||||
}
|
||||
if (!node->getActiveSocket()) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
},
|
||||
[&](const SharedNodePointer& node) {
|
||||
AvatarMixerClientData* nodeData = reinterpret_cast<AvatarMixerClientData*>(node->getLinkedData());
|
||||
MutexTryLocker lock(nodeData->getMutex());
|
||||
if (!lock.isLocked()) {
|
||||
return;
|
||||
}
|
||||
++_sumListeners;
|
||||
|
||||
// reset packet pointers for this node
|
||||
|
@ -132,83 +145,97 @@ void AvatarMixer::broadcastAvatarData() {
|
|||
|
||||
AvatarData& avatar = nodeData->getAvatar();
|
||||
glm::vec3 myPosition = avatar.getPosition();
|
||||
// TODO use this along with the distance in the calculation of whether to send an update
|
||||
// about a given otherNode to this node
|
||||
// FIXME does this mean we should sort the othernodes by distance before iterating
|
||||
// over them?
|
||||
float outputBandwidth = node->getOutboundBandwidth();
|
||||
|
||||
// this is an AGENT we have received head data from
|
||||
// send back a packet with other active node data to this node
|
||||
nodeList->eachNode([&](const SharedNodePointer& otherNode) {
|
||||
if (otherNode->getLinkedData() && otherNode->getUUID() != node->getUUID()
|
||||
&& (otherNodeData = reinterpret_cast<AvatarMixerClientData*>(otherNode->getLinkedData()))->getMutex().tryLock()) {
|
||||
|
||||
AvatarMixerClientData* otherNodeData = reinterpret_cast<AvatarMixerClientData*>(otherNode->getLinkedData());
|
||||
nodeList->eachMatchingNode(
|
||||
[&](const SharedNodePointer& otherNode)->bool {
|
||||
if (!otherNode->getLinkedData()) {
|
||||
return false;
|
||||
}
|
||||
if (otherNode->getUUID() == node->getUUID()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Check throttling value
|
||||
if (!(_performanceThrottlingRatio == 0 || randFloat() < (1.0f - _performanceThrottlingRatio))) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
},
|
||||
[&](const SharedNodePointer& otherNode) {
|
||||
AvatarMixerClientData* otherNodeData = otherNodeData = reinterpret_cast<AvatarMixerClientData*>(otherNode->getLinkedData());
|
||||
MutexTryLocker lock(otherNodeData->getMutex());
|
||||
if (!lock.isLocked()) {
|
||||
return;
|
||||
}
|
||||
AvatarData& otherAvatar = otherNodeData->getAvatar();
|
||||
glm::vec3 otherPosition = otherAvatar.getPosition();
|
||||
|
||||
float distanceToAvatar = glm::length(myPosition - otherPosition);
|
||||
// Decide whether to send this avatar's data based on it's distance from us
|
||||
// The full rate distance is the distance at which EVERY update will be sent for this avatar
|
||||
// at a distance of twice the full rate distance, there will be a 50% chance of sending this avatar's update
|
||||
const float FULL_RATE_DISTANCE = 2.0f;
|
||||
|
||||
// Decide whether to send this avatar's data based on it's distance from us
|
||||
if ((_performanceThrottlingRatio == 0 || randFloat() < (1.0f - _performanceThrottlingRatio))
|
||||
&& (distanceToAvatar == 0.0f || randFloat() < FULL_RATE_DISTANCE / distanceToAvatar)) {
|
||||
QByteArray avatarByteArray;
|
||||
avatarByteArray.append(otherNode->getUUID().toRfc4122());
|
||||
avatarByteArray.append(otherAvatar.toByteArray());
|
||||
|
||||
if (avatarByteArray.size() + mixedAvatarByteArray.size() > MAX_PACKET_SIZE) {
|
||||
nodeList->writeDatagram(mixedAvatarByteArray, node);
|
||||
|
||||
// reset the packet
|
||||
mixedAvatarByteArray.resize(numPacketHeaderBytes);
|
||||
}
|
||||
|
||||
// copy the avatar into the mixedAvatarByteArray packet
|
||||
mixedAvatarByteArray.append(avatarByteArray);
|
||||
|
||||
// if the receiving avatar has just connected make sure we send out the mesh and billboard
|
||||
// for this avatar (assuming they exist)
|
||||
bool forceSend = !nodeData->checkAndSetHasReceivedFirstPackets();
|
||||
|
||||
// we will also force a send of billboard or identity packet
|
||||
// if either has changed in the last frame
|
||||
|
||||
if (otherNodeData->getBillboardChangeTimestamp() > 0
|
||||
&& (forceSend
|
||||
|| otherNodeData->getBillboardChangeTimestamp() > _lastFrameTimestamp
|
||||
|| randFloat() < BILLBOARD_AND_IDENTITY_SEND_PROBABILITY)) {
|
||||
QByteArray billboardPacket = byteArrayWithPopulatedHeader(PacketTypeAvatarBillboard);
|
||||
billboardPacket.append(otherNode->getUUID().toRfc4122());
|
||||
billboardPacket.append(otherNodeData->getAvatar().getBillboard());
|
||||
nodeList->writeDatagram(billboardPacket, node);
|
||||
|
||||
++_sumBillboardPackets;
|
||||
}
|
||||
|
||||
if (otherNodeData->getIdentityChangeTimestamp() > 0
|
||||
&& (forceSend
|
||||
|| otherNodeData->getIdentityChangeTimestamp() > _lastFrameTimestamp
|
||||
|| randFloat() < BILLBOARD_AND_IDENTITY_SEND_PROBABILITY)) {
|
||||
|
||||
QByteArray identityPacket = byteArrayWithPopulatedHeader(PacketTypeAvatarIdentity);
|
||||
|
||||
QByteArray individualData = otherNodeData->getAvatar().identityByteArray();
|
||||
individualData.replace(0, NUM_BYTES_RFC4122_UUID, otherNode->getUUID().toRfc4122());
|
||||
identityPacket.append(individualData);
|
||||
|
||||
nodeList->writeDatagram(identityPacket, node);
|
||||
|
||||
++_sumIdentityPackets;
|
||||
}
|
||||
glm::vec3 otherPosition = otherAvatar.getPosition();
|
||||
float distanceToAvatar = glm::length(myPosition - otherPosition);
|
||||
|
||||
if (!(distanceToAvatar == 0.0f || randFloat() < FULL_RATE_DISTANCE / distanceToAvatar)) {
|
||||
return;
|
||||
}
|
||||
|
||||
QByteArray avatarByteArray;
|
||||
avatarByteArray.append(otherNode->getUUID().toRfc4122());
|
||||
avatarByteArray.append(otherAvatar.toByteArray());
|
||||
|
||||
if (avatarByteArray.size() + mixedAvatarByteArray.size() > MAX_PACKET_SIZE) {
|
||||
nodeList->writeDatagram(mixedAvatarByteArray, node);
|
||||
|
||||
// reset the packet
|
||||
mixedAvatarByteArray.resize(numPacketHeaderBytes);
|
||||
}
|
||||
|
||||
// copy the avatar into the mixedAvatarByteArray packet
|
||||
mixedAvatarByteArray.append(avatarByteArray);
|
||||
|
||||
// if the receiving avatar has just connected make sure we send out the mesh and billboard
|
||||
// for this avatar (assuming they exist)
|
||||
bool forceSend = !nodeData->checkAndSetHasReceivedFirstPackets();
|
||||
|
||||
// we will also force a send of billboard or identity packet
|
||||
// if either has changed in the last frame
|
||||
|
||||
if (otherNodeData->getBillboardChangeTimestamp() > 0
|
||||
&& (forceSend
|
||||
|| otherNodeData->getBillboardChangeTimestamp() > _lastFrameTimestamp
|
||||
|| randFloat() < BILLBOARD_AND_IDENTITY_SEND_PROBABILITY)) {
|
||||
QByteArray billboardPacket = byteArrayWithPopulatedHeader(PacketTypeAvatarBillboard);
|
||||
billboardPacket.append(otherNode->getUUID().toRfc4122());
|
||||
billboardPacket.append(otherNodeData->getAvatar().getBillboard());
|
||||
nodeList->writeDatagram(billboardPacket, node);
|
||||
|
||||
++_sumBillboardPackets;
|
||||
}
|
||||
|
||||
if (otherNodeData->getIdentityChangeTimestamp() > 0
|
||||
&& (forceSend
|
||||
|| otherNodeData->getIdentityChangeTimestamp() > _lastFrameTimestamp
|
||||
|| randFloat() < BILLBOARD_AND_IDENTITY_SEND_PROBABILITY)) {
|
||||
|
||||
QByteArray identityPacket = byteArrayWithPopulatedHeader(PacketTypeAvatarIdentity);
|
||||
|
||||
QByteArray individualData = otherNodeData->getAvatar().identityByteArray();
|
||||
individualData.replace(0, NUM_BYTES_RFC4122_UUID, otherNode->getUUID().toRfc4122());
|
||||
identityPacket.append(individualData);
|
||||
|
||||
nodeList->writeDatagram(identityPacket, node);
|
||||
|
||||
++_sumIdentityPackets;
|
||||
}
|
||||
|
||||
otherNodeData->getMutex().unlock();
|
||||
}
|
||||
});
|
||||
|
||||
nodeList->writeDatagram(mixedAvatarByteArray, node);
|
||||
|
||||
nodeData->getMutex().unlock();
|
||||
}
|
||||
});
|
||||
|
||||
_lastFrameTimestamp = QDateTime::currentMSecsSinceEpoch();
|
||||
|
|
|
@ -17,7 +17,6 @@
|
|||
#include <QObject>
|
||||
#include <QElapsedTimer>
|
||||
#include "DependencyManager.h"
|
||||
#include "Node.h"
|
||||
#include "SimpleMovingAverage.h"
|
||||
|
||||
|
||||
|
|
|
@ -263,8 +263,10 @@ qint64 LimitedNodeList::writeDatagram(const QByteArray& datagram,
|
|||
}
|
||||
|
||||
emit dataSent(destinationNode->getType(), datagram.size());
|
||||
|
||||
return writeDatagram(datagram, *destinationSockAddr, destinationNode->getConnectionSecret());
|
||||
auto bytesWritten = writeDatagram(datagram, *destinationSockAddr, destinationNode->getConnectionSecret());
|
||||
// Keep track of per-destination-node bandwidth
|
||||
destinationNode->recordBytesSent(bytesWritten);
|
||||
return bytesWritten;
|
||||
}
|
||||
|
||||
// didn't have a destinationNode to send to, return 0
|
||||
|
|
|
@ -151,7 +151,18 @@ public:
|
|||
functor(it->second);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
template<typename PredLambda, typename NodeLambda>
|
||||
void eachMatchingNode(PredLambda predicate, NodeLambda functor) {
|
||||
QReadLocker readLock(&_nodeMutex);
|
||||
|
||||
for (NodeHash::const_iterator it = _nodeHash.cbegin(); it != _nodeHash.cend(); ++it) {
|
||||
if (predicate(it->second)) {
|
||||
functor(it->second);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template<typename BreakableNodeLambda>
|
||||
void eachNodeBreakable(BreakableNodeLambda functor) {
|
||||
QReadLocker readLock(&_nodeMutex);
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
#include <UUID.h>
|
||||
|
||||
#include "NetworkPeer.h"
|
||||
#include "BandwidthRecorder.h"
|
||||
|
||||
NetworkPeer::NetworkPeer() :
|
||||
_uuid(),
|
||||
|
@ -96,4 +97,37 @@ QDebug operator<<(QDebug debug, const NetworkPeer &peer) {
|
|||
<< "- public:" << peer.getPublicSocket()
|
||||
<< "- local:" << peer.getLocalSocket();
|
||||
return debug;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// FIXME this is a temporary implementation to determine if this is the right approach.
|
||||
// If so, migrate the BandwidthRecorder into the NetworkPeer class
|
||||
using BandwidthRecorderPtr = QSharedPointer<BandwidthRecorder>;
|
||||
static QHash<QUuid, BandwidthRecorderPtr> PEER_BANDWIDTH;
|
||||
|
||||
BandwidthRecorder& getBandwidthRecorder(const QUuid & uuid) {
|
||||
if (!PEER_BANDWIDTH.count(uuid)) {
|
||||
PEER_BANDWIDTH.insert(uuid, BandwidthRecorderPtr(new BandwidthRecorder()));
|
||||
}
|
||||
return *PEER_BANDWIDTH[uuid].data();
|
||||
}
|
||||
|
||||
void NetworkPeer::recordBytesSent(int count) {
|
||||
auto& bw = getBandwidthRecorder(_uuid);
|
||||
bw.updateOutboundData(0, count);
|
||||
}
|
||||
|
||||
void NetworkPeer::recordBytesReceived(int count) {
|
||||
auto& bw = getBandwidthRecorder(_uuid);
|
||||
bw.updateInboundData(0, count);
|
||||
}
|
||||
|
||||
float NetworkPeer::getOutboundBandwidth() {
|
||||
auto& bw = getBandwidthRecorder(_uuid);
|
||||
return bw.getAverageOutputKilobitsPerSecond(0);
|
||||
}
|
||||
|
||||
float NetworkPeer::getInboundBandwidth() {
|
||||
auto& bw = getBandwidthRecorder(_uuid);
|
||||
return bw.getAverageInputKilobitsPerSecond(0);
|
||||
}
|
||||
|
|
|
@ -54,6 +54,12 @@ public:
|
|||
int getConnectionAttempts() const { return _connectionAttempts; }
|
||||
void incrementConnectionAttempts() { ++_connectionAttempts; }
|
||||
void resetConnectionAttemps() { _connectionAttempts = 0; }
|
||||
|
||||
void recordBytesSent(int count);
|
||||
void recordBytesReceived(int count);
|
||||
|
||||
float getOutboundBandwidth();
|
||||
float getInboundBandwidth();
|
||||
|
||||
friend QDataStream& operator<<(QDataStream& out, const NetworkPeer& peer);
|
||||
friend QDataStream& operator>>(QDataStream& in, NetworkPeer& peer);
|
||||
|
|
28
libraries/shared/src/TryLocker.h
Normal file
28
libraries/shared/src/TryLocker.h
Normal file
|
@ -0,0 +1,28 @@
|
|||
//
|
||||
// TryLocker.h
|
||||
// libraries/shared/src
|
||||
//
|
||||
// Created by Brad Davis on 2015/03/16.
|
||||
// Copyright 2015 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
#ifndef hifi_TryLocker_h
|
||||
#define hifi_TryLocker_h
|
||||
|
||||
#include <QMutex>
|
||||
|
||||
class MutexTryLocker {
|
||||
QMutex& _mutex;
|
||||
bool _locked{ false };
|
||||
public:
|
||||
MutexTryLocker(QMutex &m) : _mutex(m), _locked(m.tryLock()) {}
|
||||
~MutexTryLocker() { if (_locked) _mutex.unlock(); }
|
||||
bool isLocked() {
|
||||
return _locked;
|
||||
}
|
||||
};
|
||||
|
||||
#endif // hifi_TryLocker_h
|
Loading…
Reference in a new issue