mirror of
https://thingvellir.net/git/overte
synced 2025-03-27 23:52:03 +01:00
Merge pull request #7390 from Atlante45/feat/udp-flood-protection
Server side bandwidth dial
This commit is contained in:
commit
8c290ea6b9
11 changed files with 68 additions and 18 deletions
|
@ -77,6 +77,18 @@ void AssetServer::completeSetup() {
|
|||
|
||||
auto assetServerObject = settingsObject[ASSET_SERVER_SETTINGS_KEY].toObject();
|
||||
|
||||
static const QString MAX_BANDWIDTH_OPTION = "max_bandwidth";
|
||||
auto maxBandwidthValue = assetServerObject[MAX_BANDWIDTH_OPTION];
|
||||
auto maxBandwidthFloat = maxBandwidthValue.toDouble(-1);
|
||||
|
||||
if (maxBandwidthFloat > 0.0) {
|
||||
const int BYTES_PER_MEGABITS = (1024 * 1024) / 8;
|
||||
int maxBandwidth = maxBandwidthFloat * BYTES_PER_MEGABITS;
|
||||
nodeList->setConnectionMaxBandwidth(maxBandwidth);
|
||||
qInfo() << "Set maximum bandwith per connection to" << maxBandwidthFloat << "Mb/s."
|
||||
" (" << maxBandwidth << "bytes/sec)";
|
||||
}
|
||||
|
||||
// get the path to the asset folder from the domain server settings
|
||||
static const QString ASSETS_PATH_OPTION = "assets_path";
|
||||
auto assetsJSONValue = assetServerObject[ASSETS_PATH_OPTION];
|
||||
|
|
|
@ -186,6 +186,15 @@
|
|||
"help": "The path to the directory assets are stored in.<br/>If this path is relative, it will be relative to the application data directory.<br/>If you change this path you will need to manually copy any existing assets from the previous directory.",
|
||||
"default": "",
|
||||
"advanced": true
|
||||
},
|
||||
{
|
||||
"name": "max_bandwidth",
|
||||
"type": "double",
|
||||
"label": "Max Bandwidth Per User",
|
||||
"help": "The maximum upstream bandwidth each user can use (in Mb/s).",
|
||||
"placeholder": "10.0",
|
||||
"default": "",
|
||||
"advanced": true
|
||||
}
|
||||
]
|
||||
},
|
||||
|
|
|
@ -219,6 +219,8 @@ public:
|
|||
|
||||
udt::Socket::StatsVector sampleStatsForAllConnections() { return _nodeSocket.sampleStatsForAllConnections(); }
|
||||
|
||||
void setConnectionMaxBandwidth(int maxBandwidth) { _nodeSocket.setConnectionMaxBandwidth(maxBandwidth); }
|
||||
|
||||
public slots:
|
||||
void reset();
|
||||
void eraseAllNodes();
|
||||
|
|
|
@ -20,13 +20,19 @@ using namespace std::chrono;
|
|||
|
||||
static const double USECS_PER_SECOND = 1000000.0;
|
||||
|
||||
void CongestionControl::setMaxBandwidth(int maxBandwidth) {
|
||||
_maxBandwidth = maxBandwidth;
|
||||
setPacketSendPeriod(_packetSendPeriod);
|
||||
}
|
||||
|
||||
void CongestionControl::setPacketSendPeriod(double newSendPeriod) {
|
||||
Q_ASSERT_X(newSendPeriod >= 0, "CongestionControl::setPacketPeriod", "Can not set a negative packet send period");
|
||||
|
||||
if (_maxBandwidth > 0) {
|
||||
|
||||
auto maxBandwidth = _maxBandwidth.load();
|
||||
if (maxBandwidth > 0) {
|
||||
// anytime the packet send period is about to be increased, make sure it stays below the minimum period,
|
||||
// calculated based on the maximum desired bandwidth
|
||||
double minPacketSendPeriod = USECS_PER_SECOND / (((double) _maxBandwidth) / _mss);
|
||||
double minPacketSendPeriod = USECS_PER_SECOND / (((double) maxBandwidth) / _mss);
|
||||
_packetSendPeriod = std::max(newSendPeriod, minPacketSendPeriod);
|
||||
} else {
|
||||
_packetSendPeriod = newSendPeriod;
|
||||
|
@ -39,7 +45,7 @@ DefaultCC::DefaultCC() :
|
|||
_mss = udt::MAX_PACKET_SIZE_WITH_UDP_HEADER;
|
||||
|
||||
_congestionWindowSize = 16.0;
|
||||
_packetSendPeriod = 1.0;
|
||||
setPacketSendPeriod(1.0);
|
||||
}
|
||||
|
||||
void DefaultCC::onACK(SequenceNumber ackNum) {
|
||||
|
@ -73,10 +79,10 @@ void DefaultCC::onACK(SequenceNumber ackNum) {
|
|||
|
||||
if (_receiveRate > 0) {
|
||||
// if we have a valid receive rate we set the send period to whatever the receive rate dictates
|
||||
_packetSendPeriod = USECS_PER_SECOND / _receiveRate;
|
||||
setPacketSendPeriod(USECS_PER_SECOND / _receiveRate);
|
||||
} else {
|
||||
// no valid receive rate, packet send period is dictated by estimated RTT and current congestion window size
|
||||
_packetSendPeriod = (_rtt + synInterval()) / _congestionWindowSize;
|
||||
setPacketSendPeriod((_rtt + synInterval()) / _congestionWindowSize);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
|
@ -148,8 +154,8 @@ void DefaultCC::onLoss(SequenceNumber rangeStart, SequenceNumber rangeEnd) {
|
|||
if (rangeStart > _lastDecreaseMaxSeq) {
|
||||
|
||||
_lastDecreasePeriod = _packetSendPeriod;
|
||||
|
||||
_packetSendPeriod = ceil(_packetSendPeriod * INTER_PACKET_ARRIVAL_INCREASE);
|
||||
|
||||
setPacketSendPeriod(ceil(_packetSendPeriod * INTER_PACKET_ARRIVAL_INCREASE));
|
||||
|
||||
// use EWMA to update the average number of NAKs per congestion
|
||||
static const double NAK_EWMA_ALPHA = 0.125;
|
||||
|
@ -175,7 +181,7 @@ void DefaultCC::onLoss(SequenceNumber rangeStart, SequenceNumber rangeEnd) {
|
|||
// there have been fewer than MAX_DECREASES_PER_CONGESTION_EPOCH AND this NAK matches the random count at which we
|
||||
// decided we would decrease the packet send period
|
||||
|
||||
_packetSendPeriod = ceil(_packetSendPeriod * INTER_PACKET_ARRIVAL_INCREASE);
|
||||
setPacketSendPeriod(ceil(_packetSendPeriod * INTER_PACKET_ARRIVAL_INCREASE));
|
||||
_lastDecreaseMaxSeq = _sendCurrSeqNum;
|
||||
}
|
||||
}
|
||||
|
@ -198,12 +204,12 @@ void DefaultCC::stopSlowStart() {
|
|||
|
||||
if (_receiveRate > 0) {
|
||||
// Set the sending rate to the receiving rate.
|
||||
_packetSendPeriod = USECS_PER_SECOND / _receiveRate;
|
||||
setPacketSendPeriod(USECS_PER_SECOND / _receiveRate);
|
||||
} else {
|
||||
// If no receiving rate is observed, we have to compute the sending
|
||||
// rate according to the current window size, and decrease it
|
||||
// using the method below.
|
||||
_packetSendPeriod = _congestionWindowSize / (_rtt + synInterval());
|
||||
setPacketSendPeriod(_congestionWindowSize / (_rtt + synInterval()));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
#ifndef hifi_CongestionControl_h
|
||||
#define hifi_CongestionControl_h
|
||||
|
||||
#include <atomic>
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
#include <memory>
|
||||
|
@ -37,6 +38,7 @@ public:
|
|||
virtual ~CongestionControl() {}
|
||||
|
||||
int synInterval() const { return _synInterval; }
|
||||
void setMaxBandwidth(int maxBandwidth);
|
||||
|
||||
virtual void init() {}
|
||||
virtual void onACK(SequenceNumber ackNum) {}
|
||||
|
@ -49,7 +51,6 @@ protected:
|
|||
void setMSS(int mss) { _mss = mss; }
|
||||
void setMaxCongestionWindowSize(int window) { _maxCongestionWindowSize = window; }
|
||||
void setBandwidth(int bandwidth) { _bandwidth = bandwidth; }
|
||||
void setMaxBandwidth(int maxBandwidth) { _maxBandwidth = maxBandwidth; }
|
||||
virtual void setInitialSendSequenceNumber(SequenceNumber seqNum) = 0;
|
||||
void setSendCurrentSequenceNumber(SequenceNumber seqNum) { _sendCurrSeqNum = seqNum; }
|
||||
void setReceiveRate(int rate) { _receiveRate = rate; }
|
||||
|
@ -60,7 +61,7 @@ protected:
|
|||
double _congestionWindowSize { 16.0 }; // Congestion window size, in packets
|
||||
|
||||
int _bandwidth { 0 }; // estimated bandwidth, packets per second
|
||||
int _maxBandwidth { -1 }; // Maximum desired bandwidth, packets per second
|
||||
std::atomic<int> _maxBandwidth { -1 }; // Maximum desired bandwidth, bytes per second
|
||||
double _maxCongestionWindowSize { 0.0 }; // maximum cwnd size, in packets
|
||||
|
||||
int _mss { 0 }; // Maximum Packet Size, including all packet headers
|
||||
|
|
|
@ -80,6 +80,10 @@ void Connection::resetRTT() {
|
|||
_rttVariance = _rtt / 2;
|
||||
}
|
||||
|
||||
void Connection::setMaxBandwidth(int maxBandwidth) {
|
||||
_congestionControl->setMaxBandwidth(maxBandwidth);
|
||||
}
|
||||
|
||||
SendQueue& Connection::getSendQueue() {
|
||||
if (!_sendQueue) {
|
||||
|
||||
|
|
|
@ -76,6 +76,8 @@ public:
|
|||
|
||||
HifiSockAddr getDestination() const { return _destination; }
|
||||
|
||||
void setMaxBandwidth(int maxBandwidth);
|
||||
|
||||
signals:
|
||||
void packetSent();
|
||||
void connectionInactive(const HifiSockAddr& sockAddr);
|
||||
|
|
|
@ -176,7 +176,9 @@ Connection& Socket::findOrCreateConnection(const HifiSockAddr& sockAddr) {
|
|||
auto it = _connectionsHash.find(sockAddr);
|
||||
|
||||
if (it == _connectionsHash.end()) {
|
||||
auto connection = std::unique_ptr<Connection>(new Connection(this, sockAddr, _ccFactory->create()));
|
||||
auto congestionControl = _ccFactory->create();
|
||||
congestionControl->setMaxBandwidth(_maxBandwidth);
|
||||
auto connection = std::unique_ptr<Connection>(new Connection(this, sockAddr, std::move(congestionControl)));
|
||||
|
||||
// we queue the connection to cleanup connection in case it asks for it during its own rate control sync
|
||||
QObject::connect(connection.get(), &Connection::connectionInactive, this, &Socket::cleanupConnection);
|
||||
|
@ -350,6 +352,17 @@ void Socket::setCongestionControlFactory(std::unique_ptr<CongestionControlVirtua
|
|||
_synInterval = _ccFactory->synInterval();
|
||||
}
|
||||
|
||||
|
||||
void Socket::setConnectionMaxBandwidth(int maxBandwidth) {
|
||||
qInfo() << "Setting socket's maximum bandwith to" << maxBandwidth << ". ("
|
||||
<< _connectionsHash.size() << "live connections)";
|
||||
_maxBandwidth = maxBandwidth;
|
||||
for (auto& pair : _connectionsHash) {
|
||||
auto& connection = pair.second;
|
||||
connection->setMaxBandwidth(_maxBandwidth);
|
||||
}
|
||||
}
|
||||
|
||||
ConnectionStats::Stats Socket::sampleStatsForConnection(const HifiSockAddr& destination) {
|
||||
auto it = _connectionsHash.find(destination);
|
||||
if (it != _connectionsHash.end()) {
|
||||
|
|
|
@ -72,6 +72,7 @@ public:
|
|||
{ _unfilteredHandlers[senderSockAddr] = handler; }
|
||||
|
||||
void setCongestionControlFactory(std::unique_ptr<CongestionControlVirtualFactory> ccFactory);
|
||||
void setConnectionMaxBandwidth(int maxBandwidth);
|
||||
|
||||
void messageReceived(std::unique_ptr<Packet> packet);
|
||||
void messageFailed(Connection* connection, Packet::MessageNumber messageNumber);
|
||||
|
@ -109,8 +110,10 @@ private:
|
|||
std::unordered_map<HifiSockAddr, SequenceNumber> _unreliableSequenceNumbers;
|
||||
std::unordered_map<HifiSockAddr, std::unique_ptr<Connection>> _connectionsHash;
|
||||
|
||||
int _synInterval = 10; // 10ms
|
||||
QTimer* _synTimer;
|
||||
int _synInterval { 10 }; // 10ms
|
||||
QTimer* _synTimer { nullptr };
|
||||
|
||||
int _maxBandwidth { -1 };
|
||||
|
||||
std::unique_ptr<CongestionControlVirtualFactory> _ccFactory { new CongestionControlFactory<DefaultCC>() };
|
||||
|
||||
|
|
|
@ -82,7 +82,6 @@ void LogHandler::flushRepeatedMessages() {
|
|||
}
|
||||
|
||||
QString LogHandler::printMessage(LogMsgType type, const QMessageLogContext& context, const QString& message) {
|
||||
|
||||
if (message.isEmpty()) {
|
||||
return QString();
|
||||
}
|
||||
|
|
|
@ -114,7 +114,6 @@ QScriptValue QmlWindowClass::internalConstructor(const QString& qmlSource,
|
|||
}
|
||||
} else {
|
||||
auto argumentObject = context->argument(0);
|
||||
qDebug() << argumentObject.toString();
|
||||
if (!argumentObject.property(TITLE_PROPERTY).isUndefined()) {
|
||||
title = argumentObject.property(TITLE_PROPERTY).toString();
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue