mirror of
https://github.com/overte-org/overte.git
synced 2025-08-09 19:10:49 +02:00
Merge pull request #6527 from ZappoMan/debugEntitiesSending
Add some debugging stats to entity server
This commit is contained in:
commit
744da64c50
7 changed files with 105 additions and 0 deletions
|
@ -266,3 +266,72 @@ void EntityServer::readAdditionalConfiguration(const QJsonObject& settingsSectio
|
||||||
tree->setWantEditLogging(wantEditLogging);
|
tree->setWantEditLogging(wantEditLogging);
|
||||||
tree->setWantTerseEditLogging(wantTerseEditLogging);
|
tree->setWantTerseEditLogging(wantTerseEditLogging);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
// FIXME - this stats tracking is somewhat temporary to debug the Whiteboard issues. It's not a bad
|
||||||
|
// set of stats to have, but we'd probably want a different data structure if we keep it very long.
|
||||||
|
// Since this version uses a single shared QMap for all senders, there could be some lock contention
|
||||||
|
// on this QWriteLocker
|
||||||
|
void EntityServer::trackSend(const QUuid& dataID, quint64 dataLastEdited, const QUuid& viewerNode) {
|
||||||
|
QWriteLocker locker(&_viewerSendingStatsLock);
|
||||||
|
_viewerSendingStats[viewerNode][dataID] = { usecTimestampNow(), dataLastEdited };
|
||||||
|
}
|
||||||
|
|
||||||
|
void EntityServer::trackViewerGone(const QUuid& viewerNode) {
|
||||||
|
QWriteLocker locker(&_viewerSendingStatsLock);
|
||||||
|
_viewerSendingStats.remove(viewerNode);
|
||||||
|
}
|
||||||
|
|
||||||
|
QString EntityServer::serverSubclassStats() {
|
||||||
|
QLocale locale(QLocale::English);
|
||||||
|
QString statsString;
|
||||||
|
|
||||||
|
// display memory usage stats
|
||||||
|
statsString += "<b>Entity Server Memory Statistics</b>\r\n";
|
||||||
|
statsString += QString().sprintf("EntityTreeElement size... %ld bytes\r\n", sizeof(EntityTreeElement));
|
||||||
|
statsString += QString().sprintf(" EntityItem size... %ld bytes\r\n", sizeof(EntityItem));
|
||||||
|
statsString += "\r\n\r\n";
|
||||||
|
|
||||||
|
statsString += "<b>Entity Server Sending to Viewer Statistics</b>\r\n";
|
||||||
|
statsString += "----- Viewer Node ID ----------------- ----- Entity ID ---------------------- "
|
||||||
|
"---------- Last Sent To ---------- ---------- Last Edited -----------\r\n";
|
||||||
|
|
||||||
|
int viewers = 0;
|
||||||
|
const int COLUMN_WIDTH = 24;
|
||||||
|
|
||||||
|
{
|
||||||
|
QReadLocker locker(&_viewerSendingStatsLock);
|
||||||
|
quint64 now = usecTimestampNow();
|
||||||
|
|
||||||
|
for (auto viewerID : _viewerSendingStats.keys()) {
|
||||||
|
statsString += viewerID.toString() + "\r\n";
|
||||||
|
|
||||||
|
auto viewerData = _viewerSendingStats[viewerID];
|
||||||
|
for (auto entityID : viewerData.keys()) {
|
||||||
|
ViewerSendingStats stats = viewerData[entityID];
|
||||||
|
|
||||||
|
quint64 elapsedSinceSent = now - stats.lastSent;
|
||||||
|
double sentMsecsAgo = (double)(elapsedSinceSent / USECS_PER_MSEC);
|
||||||
|
|
||||||
|
quint64 elapsedSinceEdit = now - stats.lastEdited;
|
||||||
|
double editMsecsAgo = (double)(elapsedSinceEdit / USECS_PER_MSEC);
|
||||||
|
|
||||||
|
statsString += " "; // the viewerID spacing
|
||||||
|
statsString += entityID.toString();
|
||||||
|
statsString += " ";
|
||||||
|
statsString += QString("%1 msecs ago")
|
||||||
|
.arg(locale.toString((double)sentMsecsAgo).rightJustified(COLUMN_WIDTH, ' '));
|
||||||
|
statsString += QString("%1 msecs ago")
|
||||||
|
.arg(locale.toString((double)editMsecsAgo).rightJustified(COLUMN_WIDTH, ' '));
|
||||||
|
statsString += "\r\n";
|
||||||
|
}
|
||||||
|
viewers++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (viewers < 1) {
|
||||||
|
statsString += " no viewers... \r\n";
|
||||||
|
}
|
||||||
|
statsString += "\r\n\r\n";
|
||||||
|
|
||||||
|
return statsString;
|
||||||
|
}
|
||||||
|
|
|
@ -21,6 +21,12 @@
|
||||||
#include "EntityTree.h"
|
#include "EntityTree.h"
|
||||||
|
|
||||||
/// Handles assignments of type EntityServer - sending entities to various clients.
|
/// Handles assignments of type EntityServer - sending entities to various clients.
|
||||||
|
|
||||||
|
struct ViewerSendingStats {
|
||||||
|
quint64 lastSent;
|
||||||
|
quint64 lastEdited;
|
||||||
|
};
|
||||||
|
|
||||||
class EntityServer : public OctreeServer, public NewlyCreatedEntityHook {
|
class EntityServer : public OctreeServer, public NewlyCreatedEntityHook {
|
||||||
Q_OBJECT
|
Q_OBJECT
|
||||||
public:
|
public:
|
||||||
|
@ -44,6 +50,10 @@ public:
|
||||||
|
|
||||||
virtual void entityCreated(const EntityItem& newEntity, const SharedNodePointer& senderNode) override;
|
virtual void entityCreated(const EntityItem& newEntity, const SharedNodePointer& senderNode) override;
|
||||||
virtual void readAdditionalConfiguration(const QJsonObject& settingsSectionObject) override;
|
virtual void readAdditionalConfiguration(const QJsonObject& settingsSectionObject) override;
|
||||||
|
virtual QString serverSubclassStats();
|
||||||
|
|
||||||
|
virtual void trackSend(const QUuid& dataID, quint64 dataLastEdited, const QUuid& viewerNode);
|
||||||
|
virtual void trackViewerGone(const QUuid& viewerNode);
|
||||||
|
|
||||||
public slots:
|
public slots:
|
||||||
void pruneDeletedEntities();
|
void pruneDeletedEntities();
|
||||||
|
@ -57,6 +67,9 @@ private slots:
|
||||||
private:
|
private:
|
||||||
EntitySimulation* _entitySimulation;
|
EntitySimulation* _entitySimulation;
|
||||||
QTimer* _pruneDeletedEntitiesTimer = nullptr;
|
QTimer* _pruneDeletedEntitiesTimer = nullptr;
|
||||||
|
|
||||||
|
QReadWriteLock _viewerSendingStatsLock;
|
||||||
|
QMap<QUuid, QMap<QUuid, ViewerSendingStats>> _viewerSendingStats;
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif // hifi_EntityServer_h
|
#endif // hifi_EntityServer_h
|
||||||
|
|
|
@ -467,6 +467,12 @@ int OctreeSendThread::packetDistributor(OctreeQueryNode* nodeData, bool viewFrus
|
||||||
isFullScene, &nodeData->stats, _myServer->getJurisdiction(),
|
isFullScene, &nodeData->stats, _myServer->getJurisdiction(),
|
||||||
&nodeData->extraEncodeData);
|
&nodeData->extraEncodeData);
|
||||||
|
|
||||||
|
// Our trackSend() function is implemented by the server subclass, and will be called back
|
||||||
|
// during the encodeTreeBitstream() as new entities/data elements are sent
|
||||||
|
params.trackSend = [this](const QUuid& dataID, quint64 dataEdited) {
|
||||||
|
_myServer->trackSend(dataID, dataEdited, _nodeUUID);
|
||||||
|
};
|
||||||
|
|
||||||
// TODO: should this include the lock time or not? This stat is sent down to the client,
|
// TODO: should this include the lock time or not? This stat is sent down to the client,
|
||||||
// it seems like it may be a good idea to include the lock time as part of the encode time
|
// it seems like it may be a good idea to include the lock time as part of the encode time
|
||||||
// are reported to client. Since you can encode without the lock
|
// are reported to client. Since you can encode without the lock
|
||||||
|
|
|
@ -821,6 +821,11 @@ bool OctreeServer::handleHTTPRequest(HTTPConnection* connection, const QUrl& url
|
||||||
.arg(locale.toString((uint)checkSum).rightJustified(16, ' '));
|
.arg(locale.toString((uint)checkSum).rightJustified(16, ' '));
|
||||||
|
|
||||||
statsString += "\r\n\r\n";
|
statsString += "\r\n\r\n";
|
||||||
|
|
||||||
|
statsString += serverSubclassStats();
|
||||||
|
|
||||||
|
statsString += "\r\n\r\n";
|
||||||
|
|
||||||
statsString += "</pre>\r\n";
|
statsString += "</pre>\r\n";
|
||||||
statsString += "</doc></html>";
|
statsString += "</doc></html>";
|
||||||
|
|
||||||
|
@ -1179,6 +1184,8 @@ void OctreeServer::nodeKilled(SharedNodePointer node) {
|
||||||
if (usecsElapsed > 1000) {
|
if (usecsElapsed > 1000) {
|
||||||
qDebug() << qPrintable(_safeServerName) << "server nodeKilled() took: " << usecsElapsed << " usecs for node:" << *node;
|
qDebug() << qPrintable(_safeServerName) << "server nodeKilled() took: " << usecsElapsed << " usecs for node:" << *node;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
trackViewerGone(node->getUUID());
|
||||||
}
|
}
|
||||||
|
|
||||||
void OctreeServer::forceNodeShutdown(SharedNodePointer node) {
|
void OctreeServer::forceNodeShutdown(SharedNodePointer node) {
|
||||||
|
|
|
@ -79,6 +79,9 @@ public:
|
||||||
virtual void beforeRun() { }
|
virtual void beforeRun() { }
|
||||||
virtual bool hasSpecialPacketsToSend(const SharedNodePointer& node) { return false; }
|
virtual bool hasSpecialPacketsToSend(const SharedNodePointer& node) { return false; }
|
||||||
virtual int sendSpecialPackets(const SharedNodePointer& node, OctreeQueryNode* queryNode, int& packetsSent) { return 0; }
|
virtual int sendSpecialPackets(const SharedNodePointer& node, OctreeQueryNode* queryNode, int& packetsSent) { return 0; }
|
||||||
|
virtual QString serverSubclassStats() { return QString(); }
|
||||||
|
virtual void trackSend(const QUuid& dataID, quint64 dataLastEdited, const QUuid& viewerNode) { }
|
||||||
|
virtual void trackViewerGone(const QUuid& viewerNode) { }
|
||||||
|
|
||||||
static float SKIP_TIME; // use this for trackXXXTime() calls for non-times
|
static float SKIP_TIME; // use this for trackXXXTime() calls for non-times
|
||||||
|
|
||||||
|
|
|
@ -311,6 +311,11 @@ OctreeElement::AppendState EntityItem::appendEntityData(OctreePacketData* packet
|
||||||
entityTreeElementExtraEncodeData->entities.insert(getEntityItemID(), propertiesDidntFit);
|
entityTreeElementExtraEncodeData->entities.insert(getEntityItemID(), propertiesDidntFit);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// if any part of our entity was sent, call trackSend
|
||||||
|
if (appendState != OctreeElement::NONE) {
|
||||||
|
params.trackSend(getID(), getLastEdited());
|
||||||
|
}
|
||||||
|
|
||||||
return appendState;
|
return appendState;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -176,6 +176,8 @@ public:
|
||||||
case OCCLUDED: return QString("OCCLUDED"); break;
|
case OCCLUDED: return QString("OCCLUDED"); break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
std::function<void(const QUuid& dataID, quint64 itemLastEdited)> trackSend { [](const QUuid&, quint64){} };
|
||||||
};
|
};
|
||||||
|
|
||||||
class ReadElementBufferToTreeArgs {
|
class ReadElementBufferToTreeArgs {
|
||||||
|
|
Loading…
Reference in a new issue