mirror of
https://github.com/JulianGro/overte.git
synced 2025-04-14 05:26:32 +02:00
changed fmax to std::max
This commit is contained in:
parent
b0f8e21d06
commit
124250b2c2
1 changed files with 26 additions and 25 deletions
|
@ -8,6 +8,7 @@
|
|||
// Threaded or non-threaded packet sender.
|
||||
//
|
||||
|
||||
#include <algorithm>
|
||||
#include <math.h>
|
||||
#include <stdint.h>
|
||||
|
||||
|
@ -26,7 +27,7 @@ const int PacketSender::MINIMAL_SLEEP_INTERVAL = (USECS_PER_SECOND / TARGET_FPS)
|
|||
|
||||
const int AVERAGE_CALL_TIME_SAMPLES = 10;
|
||||
|
||||
PacketSender::PacketSender(PacketSenderNotify* notify, int packetsPerSecond) :
|
||||
PacketSender::PacketSender(PacketSenderNotify* notify, int packetsPerSecond) :
|
||||
_packetsPerSecond(packetsPerSecond),
|
||||
_usecsPerProcessCallHint(0),
|
||||
_lastProcessCallTime(0),
|
||||
|
@ -66,19 +67,19 @@ bool PacketSender::process() {
|
|||
|
||||
bool PacketSender::threadedProcess() {
|
||||
bool hasSlept = false;
|
||||
|
||||
|
||||
if (_lastSendTime == 0) {
|
||||
_lastSendTime = usecTimestampNow();
|
||||
}
|
||||
|
||||
|
||||
// in threaded mode, we keep running and just empty our packet queue sleeping enough to keep our PPS on target
|
||||
while (_packets.size() > 0) {
|
||||
// Recalculate our SEND_INTERVAL_USECS each time, in case the caller has changed it on us..
|
||||
int packetsPerSecondTarget = (_packetsPerSecond > MINIMUM_PACKETS_PER_SECOND)
|
||||
int packetsPerSecondTarget = (_packetsPerSecond > MINIMUM_PACKETS_PER_SECOND)
|
||||
? _packetsPerSecond : MINIMUM_PACKETS_PER_SECOND;
|
||||
|
||||
uint64_t intervalBetweenSends = USECS_PER_SECOND / packetsPerSecondTarget;
|
||||
uint64_t sleepInterval = (intervalBetweenSends > SENDING_INTERVAL_ADJUST) ?
|
||||
uint64_t sleepInterval = (intervalBetweenSends > SENDING_INTERVAL_ADJUST) ?
|
||||
intervalBetweenSends - SENDING_INTERVAL_ADJUST : intervalBetweenSends;
|
||||
|
||||
// We'll sleep before we send, this way, we can set our last send time to be our ACTUAL last send time
|
||||
|
@ -95,10 +96,10 @@ bool PacketSender::threadedProcess() {
|
|||
usleep(usecToSleep);
|
||||
hasSlept = true;
|
||||
}
|
||||
|
||||
|
||||
// call our non-threaded version of ourselves
|
||||
bool keepRunning = nonThreadedProcess();
|
||||
|
||||
|
||||
if (!keepRunning) {
|
||||
break;
|
||||
}
|
||||
|
@ -122,9 +123,9 @@ bool PacketSender::threadedProcess() {
|
|||
// just track our call rate (in order to predict our sends per call) but we won't actually send any packets.
|
||||
//
|
||||
// When we are called less frequently than we have packets to send, we will send enough packets per call to keep up with our
|
||||
// target PPS.
|
||||
// target PPS.
|
||||
//
|
||||
// We also keep a running total of packets sent over multiple calls to process() so that we can adjust up or down for
|
||||
// We also keep a running total of packets sent over multiple calls to process() so that we can adjust up or down for
|
||||
// possible rounding error that would occur if we only considered whole integer packet counts per call to process
|
||||
bool PacketSender::nonThreadedProcess() {
|
||||
uint64_t now = usecTimestampNow();
|
||||
|
@ -136,7 +137,7 @@ bool PacketSender::nonThreadedProcess() {
|
|||
const uint64_t MINIMUM_POSSIBLE_CALL_TIME = 10; // in usecs
|
||||
const uint64_t USECS_PER_SECOND = 1000 * 1000;
|
||||
const float ZERO_RESET_CALLS_PER_SECOND = 1; // used in guard against divide by zero
|
||||
|
||||
|
||||
// keep track of our process call times, so we have a reliable account of how often our caller calls us
|
||||
uint64_t elapsedSinceLastCall = now - _lastProcessCallTime;
|
||||
_lastProcessCallTime = now;
|
||||
|
@ -152,22 +153,22 @@ bool PacketSender::nonThreadedProcess() {
|
|||
|
||||
if (_packets.size() == 0) {
|
||||
// in non-threaded mode, if there's nothing to do, just return, keep running till they terminate us
|
||||
return isStillRunning();
|
||||
return isStillRunning();
|
||||
}
|
||||
|
||||
// This only happens once, the first time we get this far... so we can use it as an accurate initialization
|
||||
// point for these important timing variables
|
||||
// point for these important timing variables
|
||||
if (_lastPPSCheck == 0) {
|
||||
_lastPPSCheck = now;
|
||||
// pretend like our lifetime began once call cycle for now, this makes our lifetime PPS start out most accurately
|
||||
_started = now - (uint64_t)averageCallTime;
|
||||
}
|
||||
|
||||
|
||||
|
||||
float averagePacketsPerCall = 0; // might be less than 1, if our caller calls us more frequently than the target PPS
|
||||
int packetsSentThisCall = 0;
|
||||
int packetsToSendThisCall = 0;
|
||||
|
||||
|
||||
// Since we're in non-threaded mode, we need to determine how many packets to send per call to process
|
||||
// based on how often we get called... We do this by keeping a running average of our call times, and we determine
|
||||
// how many packets to send per call
|
||||
|
@ -186,40 +187,40 @@ bool PacketSender::nonThreadedProcess() {
|
|||
if (callsPerSecond == 0) {
|
||||
callsPerSecond = ZERO_RESET_CALLS_PER_SECOND;
|
||||
}
|
||||
|
||||
|
||||
// This is the average number of packets per call...
|
||||
averagePacketsPerCall = _packetsPerSecond / callsPerSecond;
|
||||
packetsToSendThisCall = averagePacketsPerCall;
|
||||
|
||||
|
||||
// if we get called more than 1 per second, we want to mostly divide the packets evenly across the calls...
|
||||
// but we want to track the remainder and make sure over the course of a second, we are sending the target PPS
|
||||
// e.g.
|
||||
// e.g.
|
||||
// 200pps called 60 times per second...
|
||||
// 200/60 = 3.333... so really...
|
||||
// each call we should send 3
|
||||
// every 3rd call we should send 4...
|
||||
// 3,3,4,3,3,4...3,3,4 = 200...
|
||||
|
||||
|
||||
// if we get called less than 1 per second, then we want to send more than our PPS each time...
|
||||
// e.g.
|
||||
// 200pps called ever 1332.5ms
|
||||
// 200 / (1000/1332.5) = 200/(0.7505) = 266.5 packets per call
|
||||
// so...
|
||||
// so...
|
||||
// every other call we should send 266 packets
|
||||
// then on the next call we should send 267 packets
|
||||
|
||||
|
||||
// So no mater whether or not we're getting called more or less than once per second, we still need to do some bookkeeping
|
||||
// to make sure we send a few extra packets to even out our flow rate.
|
||||
uint64_t elapsedSinceLastCheck = now - _lastPPSCheck;
|
||||
|
||||
|
||||
// we might want to tun this in the future and only check after a certain number of call intervals. for now we check
|
||||
// each time and adjust accordingly
|
||||
const float CALL_INTERVALS_TO_CHECK = 1;
|
||||
const float MIN_CALL_INTERVALS_PER_RESET = 5;
|
||||
|
||||
|
||||
// we will reset our check PPS and time each second (callsPerSecond) or at least 5 calls (if we get called less frequently
|
||||
// than 5 times per second) This gives us sufficient smoothing in our packet adjustments
|
||||
float callIntervalsPerReset = fmax(callsPerSecond, MIN_CALL_INTERVALS_PER_RESET);
|
||||
float callIntervalsPerReset = std::max(callsPerSecond, MIN_CALL_INTERVALS_PER_RESET);
|
||||
|
||||
if (elapsedSinceLastCheck > (averageCallTime * CALL_INTERVALS_TO_CHECK)) {
|
||||
float ppsOverCheckInterval = (float)_packetsOverCheckInterval;
|
||||
|
@ -232,7 +233,7 @@ bool PacketSender::nonThreadedProcess() {
|
|||
int adjust = ppsOverCheckInterval - ppsExpectedForCheckInterval;
|
||||
packetsToSendThisCall -= adjust;
|
||||
}
|
||||
|
||||
|
||||
// now, do we want to reset the check interval? don't want to completely reset, because we would still have
|
||||
// a rounding error. instead, we check to see that we've passed the reset interval (which is much larger than
|
||||
// the check interval), and on those reset intervals we take the second half average and keep that for the next
|
||||
|
@ -245,7 +246,7 @@ bool PacketSender::nonThreadedProcess() {
|
|||
elapsedSinceLastCheck = now - _lastPPSCheck;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
int packetsLeft = _packets.size();
|
||||
|
||||
// Now that we know how many packets to send this call to process, just send them.
|
||||
|
|
Loading…
Reference in a new issue