mirror of
https://github.com/JulianGro/overte.git
synced 2025-04-16 22:30:42 +02:00
Merge pull request #3189 from wangyix/master
Made Audio.cpp always push a whole number of frames to the audio output; added audio input buffer stats to interface
This commit is contained in:
commit
01ebefbffd
4 changed files with 48 additions and 21 deletions
|
@ -130,7 +130,8 @@ Audio::Audio(int16_t initialJitterBufferSamples, QObject* parent) :
|
|||
_outgoingAvatarAudioSequenceNumber(0),
|
||||
_incomingMixedAudioSequenceNumberStats(INCOMING_SEQ_STATS_HISTORY_LENGTH),
|
||||
_interframeTimeGapStats(TIME_GAPS_STATS_INTERVAL_SAMPLES, TIME_GAP_STATS_WINDOW_INTERVALS),
|
||||
_ringBufferFramesAvailableStats(1, FRAMES_AVAILABLE_STATS_WINDOW_SECONDS),
|
||||
_inputRingBufferFramesAvailableStats(1, FRAMES_AVAILABLE_STATS_WINDOW_SECONDS),
|
||||
_outputRingBufferFramesAvailableStats(1, FRAMES_AVAILABLE_STATS_WINDOW_SECONDS),
|
||||
_audioOutputBufferFramesAvailableStats(1, FRAMES_AVAILABLE_STATS_WINDOW_SECONDS)
|
||||
{
|
||||
// clear the array of locally injected samples
|
||||
|
@ -795,7 +796,7 @@ AudioStreamStats Audio::getDownstreamAudioStreamStats() const {
|
|||
stats._timeGapWindowAverage = _interframeTimeGapStats.getWindowAverage();
|
||||
|
||||
stats._ringBufferFramesAvailable = _ringBuffer.framesAvailable();
|
||||
stats._ringBufferFramesAvailableAverage = _ringBufferFramesAvailableStats.getWindowAverage();
|
||||
stats._ringBufferFramesAvailableAverage = _outputRingBufferFramesAvailableStats.getWindowAverage();
|
||||
stats._ringBufferDesiredJitterBufferFrames = getDesiredJitterBufferFrames();
|
||||
stats._ringBufferStarveCount = _starveCount;
|
||||
stats._ringBufferConsecutiveNotMixedCount = _consecutiveNotMixedCount;
|
||||
|
@ -810,9 +811,11 @@ AudioStreamStats Audio::getDownstreamAudioStreamStats() const {
|
|||
|
||||
void Audio::sendDownstreamAudioStatsPacket() {
|
||||
|
||||
_inputRingBufferFramesAvailableStats.update(getInputRingBufferFramesAvailable());
|
||||
|
||||
// since this function is called every second, we'll sample the number of audio frames available here.
|
||||
_ringBufferFramesAvailableStats.update(_ringBuffer.framesAvailable());
|
||||
_audioOutputBufferFramesAvailableStats.update(getFramesAvailableInAudioOutputBuffer());
|
||||
_outputRingBufferFramesAvailableStats.update(_ringBuffer.framesAvailable());
|
||||
_audioOutputBufferFramesAvailableStats.update(getOutputRingBufferFramesAvailable());
|
||||
|
||||
// push the current seq number stats into history, which moves the history window forward 1s
|
||||
// (since that's how often pushStatsToHistory() is called)
|
||||
|
@ -974,8 +977,9 @@ void Audio::processReceivedAudio(const QByteArray& audioByteArray) {
|
|||
if (Menu::getInstance()->isOptionChecked(MenuOption::DisableQAudioOutputOverflowCheck)) {
|
||||
numNetworkOutputSamples = _ringBuffer.samplesAvailable();
|
||||
} else {
|
||||
int numSamplesAudioOutputRoomFor = _audioOutput->bytesFree() / sizeof(int16_t);
|
||||
numNetworkOutputSamples = std::min(_ringBuffer.samplesAvailable(), (int)(numSamplesAudioOutputRoomFor * networkOutputToOutputRatio));
|
||||
// make sure to push a whole number of frames to the audio output
|
||||
int numFramesAudioOutputRoomFor = _audioOutput->bytesFree() / sizeof(int16_t) * networkOutputToOutputRatio / _ringBuffer.getNumFrameSamples();
|
||||
numNetworkOutputSamples = std::min(_ringBuffer.samplesAvailable(), numFramesAudioOutputRoomFor * _ringBuffer.getNumFrameSamples());
|
||||
}
|
||||
|
||||
// if there is data in the ring buffer and room in the audio output, decide what to do
|
||||
|
@ -1596,7 +1600,7 @@ const float Audio::CALLBACK_ACCELERATOR_RATIO = 2.0f;
|
|||
const float Audio::CALLBACK_ACCELERATOR_RATIO = 2.0f;
|
||||
#endif
|
||||
|
||||
int Audio::calculateNumberOfInputCallbackBytes(const QAudioFormat& format) {
|
||||
int Audio::calculateNumberOfInputCallbackBytes(const QAudioFormat& format) const {
|
||||
int numInputCallbackBytes = (int)(((NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL
|
||||
* format.channelCount()
|
||||
* (format.sampleRate() / SAMPLE_RATE))
|
||||
|
@ -1605,7 +1609,7 @@ int Audio::calculateNumberOfInputCallbackBytes(const QAudioFormat& format) {
|
|||
return numInputCallbackBytes;
|
||||
}
|
||||
|
||||
float Audio::calculateDeviceToNetworkInputRatio(int numBytes) {
|
||||
float Audio::calculateDeviceToNetworkInputRatio(int numBytes) const {
|
||||
float inputToNetworkInputRatio = (int)((_numInputCallbackBytes
|
||||
* CALLBACK_ACCELERATOR_RATIO
|
||||
/ NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL) + 0.5f);
|
||||
|
@ -1613,15 +1617,20 @@ float Audio::calculateDeviceToNetworkInputRatio(int numBytes) {
|
|||
return inputToNetworkInputRatio;
|
||||
}
|
||||
|
||||
int Audio::calculateNumberOfFrameSamples(int numBytes) {
|
||||
int Audio::calculateNumberOfFrameSamples(int numBytes) const {
|
||||
int frameSamples = (int)(numBytes * CALLBACK_ACCELERATOR_RATIO + 0.5f) / sizeof(int16_t);
|
||||
return frameSamples;
|
||||
}
|
||||
|
||||
int Audio::getFramesAvailableInAudioOutputBuffer() const {
|
||||
int Audio::getOutputRingBufferFramesAvailable() const {
|
||||
float networkOutputToOutputRatio = (_desiredOutputFormat.sampleRate() / (float)_outputFormat.sampleRate())
|
||||
* (_desiredOutputFormat.channelCount() / (float)_outputFormat.channelCount());
|
||||
|
||||
return (_audioOutput->bufferSize() - _audioOutput->bytesFree()) * networkOutputToOutputRatio
|
||||
/ (sizeof(int16_t) * _ringBuffer.getNumFrameSamples());
|
||||
}
|
||||
|
||||
int Audio::getInputRingBufferFramesAvailable() const {
|
||||
float inputToNetworkInputRatio = calculateDeviceToNetworkInputRatio(_numInputCallbackBytes);
|
||||
return _inputRingBuffer.samplesAvailable() / inputToNetworkInputRatio / _inputRingBuffer.getNumFrameSamples();
|
||||
}
|
||||
|
|
|
@ -79,8 +79,11 @@ public:
|
|||
|
||||
const SequenceNumberStats& getIncomingMixedAudioSequenceNumberStats() const { return _incomingMixedAudioSequenceNumberStats; }
|
||||
|
||||
int getFramesAvailableInAudioOutputBuffer() const;
|
||||
int getAverageFramesAvailableInAudioOutputBuffer() const { return (int)_audioOutputBufferFramesAvailableStats.getWindowAverage(); }
|
||||
int getInputRingBufferFramesAvailable() const;
|
||||
int getInputRingBufferAverageFramesAvailable() const { return (int)_inputRingBufferFramesAvailableStats.getWindowAverage(); }
|
||||
|
||||
int getOutputRingBufferFramesAvailable() const;
|
||||
int getOutputRingBufferAverageFramesAvailable() const { return (int)_audioOutputBufferFramesAvailableStats.getWindowAverage(); }
|
||||
|
||||
public slots:
|
||||
void start();
|
||||
|
@ -224,9 +227,9 @@ private:
|
|||
|
||||
// Callback acceleration dependent calculations
|
||||
static const float CALLBACK_ACCELERATOR_RATIO;
|
||||
int calculateNumberOfInputCallbackBytes(const QAudioFormat& format);
|
||||
int calculateNumberOfFrameSamples(int numBytes);
|
||||
float calculateDeviceToNetworkInputRatio(int numBytes);
|
||||
int calculateNumberOfInputCallbackBytes(const QAudioFormat& format) const;
|
||||
int calculateNumberOfFrameSamples(int numBytes) const;
|
||||
float calculateDeviceToNetworkInputRatio(int numBytes) const;
|
||||
|
||||
// Audio scope methods for allocation/deallocation
|
||||
void allocateScope();
|
||||
|
@ -269,7 +272,10 @@ private:
|
|||
SequenceNumberStats _incomingMixedAudioSequenceNumberStats;
|
||||
|
||||
MovingMinMaxAvg<quint64> _interframeTimeGapStats;
|
||||
MovingMinMaxAvg<int> _ringBufferFramesAvailableStats;
|
||||
|
||||
MovingMinMaxAvg<int> _inputRingBufferFramesAvailableStats;
|
||||
|
||||
MovingMinMaxAvg<int> _outputRingBufferFramesAvailableStats;
|
||||
MovingMinMaxAvg<int> _audioOutputBufferFramesAvailableStats;
|
||||
};
|
||||
|
||||
|
|
|
@ -280,7 +280,7 @@ void Stats::display(
|
|||
Audio* audio = Application::getInstance()->getAudio();
|
||||
const QHash<QUuid, AudioStreamStats>& audioMixerInjectedStreamAudioStatsMap = audio->getAudioMixerInjectedStreamAudioStatsMap();
|
||||
|
||||
lines = _expanded ? 11 + (audioMixerInjectedStreamAudioStatsMap.size() + 2) * 3 : 3;
|
||||
lines = _expanded ? 13 + (audioMixerInjectedStreamAudioStatsMap.size() + 2) * 3 : 3;
|
||||
drawBackground(backgroundColor, horizontalOffset, 0, _pingStatsWidth, lines * STATS_PELS_PER_LINE + 10);
|
||||
horizontalOffset += 5;
|
||||
|
||||
|
@ -314,6 +314,18 @@ void Stats::display(
|
|||
verticalOffset += STATS_PELS_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, voxelMaxPing, color);
|
||||
|
||||
char inputAudioLabelString[] = "Input: avail_avg_10s/avail";
|
||||
|
||||
verticalOffset += STATS_PELS_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, inputAudioLabelString, color);
|
||||
|
||||
char inputAudioStatsString[512];
|
||||
sprintf(inputAudioStatsString, " %d/%d", audio->getInputRingBufferAverageFramesAvailable(),
|
||||
audio->getInputRingBufferFramesAvailable());
|
||||
|
||||
verticalOffset += STATS_PELS_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, inputAudioStatsString, color);
|
||||
|
||||
char audioMixerStatsLabelString[] = "AudioMixer stats:";
|
||||
char streamStatsFormatLabelString[] = "lost%/lost_30s%";
|
||||
char streamStatsFormatLabelString2[] = "desired/avail_avg_10s/avail";
|
||||
|
@ -342,8 +354,8 @@ void Stats::display(
|
|||
sprintf(downstreamAudioStatsString, " mix: %.2f%%/%.2f%%, %u/%u+%d/%u+%d", downstreamAudioStreamStats._packetStreamStats.getLostRate()*100.0f,
|
||||
downstreamAudioStreamStats._packetStreamWindowStats.getLostRate() * 100.0f,
|
||||
downstreamAudioStreamStats._ringBufferDesiredJitterBufferFrames, downstreamAudioStreamStats._ringBufferFramesAvailableAverage,
|
||||
audio->getAverageFramesAvailableInAudioOutputBuffer(),
|
||||
downstreamAudioStreamStats._ringBufferFramesAvailable, audio->getFramesAvailableInAudioOutputBuffer());
|
||||
audio->getOutputRingBufferAverageFramesAvailable(),
|
||||
downstreamAudioStreamStats._ringBufferFramesAvailable, audio->getOutputRingBufferFramesAvailable());
|
||||
|
||||
verticalOffset += STATS_PELS_PER_LINE;
|
||||
drawText(horizontalOffset, verticalOffset, scale, rotation, font, downstreamAudioStatsString, color);
|
||||
|
|
|
@ -30,9 +30,9 @@ const int TIME_GAPS_FOR_STATS_PACKET_INTERVAL_SAMPLES = USECS_PER_SECOND / BUFFE
|
|||
const int TIME_GAPS_FOR_STATS_PACKET_WINDOW_INTERVALS = 30;
|
||||
|
||||
// the stats for calculating the average frames available will recalculate every ~1 second
|
||||
// and will include data for the past ~2 seconds
|
||||
// and will include data for the past ~10 seconds
|
||||
const int FRAMES_AVAILABLE_STATS_INTERVAL_SAMPLES = USECS_PER_SECOND / BUFFER_SEND_INTERVAL_USECS;
|
||||
const int FRAMES_AVAILABLE_STATS_WINDOW_INTERVALS = 2;
|
||||
const int FRAMES_AVAILABLE_STATS_WINDOW_INTERVALS = 10;
|
||||
|
||||
const int AUDIOMIXER_INBOUND_RING_BUFFER_FRAME_CAPACITY = 100;
|
||||
|
||||
|
|
Loading…
Reference in a new issue