mirror of
https://github.com/lubosz/overte.git
synced 2025-08-07 19:21:16 +02:00
Merge branch 'master' of https://github.com/highfidelity/hifi into improvedPreview
This commit is contained in:
commit
427f6b100f
13 changed files with 229 additions and 209 deletions
|
@ -270,6 +270,7 @@ QJsonObject AudioMixerClientData::getAudioStreamStats() {
|
||||||
downstreamStats["desired"] = streamStats._desiredJitterBufferFrames;
|
downstreamStats["desired"] = streamStats._desiredJitterBufferFrames;
|
||||||
downstreamStats["available_avg_10s"] = streamStats._framesAvailableAverage;
|
downstreamStats["available_avg_10s"] = streamStats._framesAvailableAverage;
|
||||||
downstreamStats["available"] = (double) streamStats._framesAvailable;
|
downstreamStats["available"] = (double) streamStats._framesAvailable;
|
||||||
|
downstreamStats["unplayed"] = (double) streamStats._unplayedMs;
|
||||||
downstreamStats["starves"] = (double) streamStats._starveCount;
|
downstreamStats["starves"] = (double) streamStats._starveCount;
|
||||||
downstreamStats["not_mixed"] = (double) streamStats._consecutiveNotMixedCount;
|
downstreamStats["not_mixed"] = (double) streamStats._consecutiveNotMixedCount;
|
||||||
downstreamStats["overflows"] = (double) streamStats._overflowCount;
|
downstreamStats["overflows"] = (double) streamStats._overflowCount;
|
||||||
|
@ -294,6 +295,7 @@ QJsonObject AudioMixerClientData::getAudioStreamStats() {
|
||||||
upstreamStats["desired_calc"] = avatarAudioStream->getCalculatedJitterBufferFrames();
|
upstreamStats["desired_calc"] = avatarAudioStream->getCalculatedJitterBufferFrames();
|
||||||
upstreamStats["available_avg_10s"] = streamStats._framesAvailableAverage;
|
upstreamStats["available_avg_10s"] = streamStats._framesAvailableAverage;
|
||||||
upstreamStats["available"] = (double) streamStats._framesAvailable;
|
upstreamStats["available"] = (double) streamStats._framesAvailable;
|
||||||
|
upstreamStats["unplayed"] = (double) streamStats._unplayedMs;
|
||||||
upstreamStats["starves"] = (double) streamStats._starveCount;
|
upstreamStats["starves"] = (double) streamStats._starveCount;
|
||||||
upstreamStats["not_mixed"] = (double) streamStats._consecutiveNotMixedCount;
|
upstreamStats["not_mixed"] = (double) streamStats._consecutiveNotMixedCount;
|
||||||
upstreamStats["overflows"] = (double) streamStats._overflowCount;
|
upstreamStats["overflows"] = (double) streamStats._overflowCount;
|
||||||
|
@ -323,6 +325,7 @@ QJsonObject AudioMixerClientData::getAudioStreamStats() {
|
||||||
upstreamStats["desired_calc"] = injectorPair.second->getCalculatedJitterBufferFrames();
|
upstreamStats["desired_calc"] = injectorPair.second->getCalculatedJitterBufferFrames();
|
||||||
upstreamStats["available_avg_10s"] = streamStats._framesAvailableAverage;
|
upstreamStats["available_avg_10s"] = streamStats._framesAvailableAverage;
|
||||||
upstreamStats["available"] = (double) streamStats._framesAvailable;
|
upstreamStats["available"] = (double) streamStats._framesAvailable;
|
||||||
|
upstreamStats["unplayed"] = (double) streamStats._unplayedMs;
|
||||||
upstreamStats["starves"] = (double) streamStats._starveCount;
|
upstreamStats["starves"] = (double) streamStats._starveCount;
|
||||||
upstreamStats["not_mixed"] = (double) streamStats._consecutiveNotMixedCount;
|
upstreamStats["not_mixed"] = (double) streamStats._consecutiveNotMixedCount;
|
||||||
upstreamStats["overflows"] = (double) streamStats._overflowCount;
|
upstreamStats["overflows"] = (double) streamStats._overflowCount;
|
||||||
|
|
|
@ -58,21 +58,19 @@ void AudioStatsDisplay::updatedDisplay(QString str) {
|
||||||
AudioStatsDialog::AudioStatsDialog(QWidget* parent) :
|
AudioStatsDialog::AudioStatsDialog(QWidget* parent) :
|
||||||
QDialog(parent, Qt::Window | Qt::WindowCloseButtonHint | Qt::WindowStaysOnTopHint) {
|
QDialog(parent, Qt::Window | Qt::WindowCloseButtonHint | Qt::WindowStaysOnTopHint) {
|
||||||
|
|
||||||
_shouldShowInjectedStreams = false;
|
|
||||||
|
|
||||||
setWindowTitle("Audio Network Statistics");
|
setWindowTitle("Audio Network Statistics");
|
||||||
|
|
||||||
// Get statistics from the Audio Client
|
// Get statistics from the Audio Client
|
||||||
_stats = &DependencyManager::get<AudioClient>()->getStats();
|
_stats = &DependencyManager::get<AudioClient>()->getStats();
|
||||||
|
|
||||||
// Create layout
|
// Create layout
|
||||||
_form = new QFormLayout();
|
_form = new QFormLayout();
|
||||||
_form->setSizeConstraint(QLayout::SetFixedSize);
|
_form->setSizeConstraint(QLayout::SetFixedSize);
|
||||||
QDialog::setLayout(_form);
|
|
||||||
|
|
||||||
// Load and initialize all channels
|
// Initialize channels' content (needed to correctly size channels)
|
||||||
renderStats();
|
updateStats();
|
||||||
|
|
||||||
|
// Create channels
|
||||||
_audioDisplayChannels = QVector<QVector<AudioStatsDisplay*>>(1);
|
_audioDisplayChannels = QVector<QVector<AudioStatsDisplay*>>(1);
|
||||||
|
|
||||||
_audioMixerID = addChannel(_form, _audioMixerStats, COLOR0);
|
_audioMixerID = addChannel(_form, _audioMixerStats, COLOR0);
|
||||||
|
@ -80,9 +78,16 @@ AudioStatsDialog::AudioStatsDialog(QWidget* parent) :
|
||||||
_upstreamMixerID = addChannel(_form, _upstreamMixerStats, COLOR2);
|
_upstreamMixerID = addChannel(_form, _upstreamMixerStats, COLOR2);
|
||||||
_downstreamID = addChannel(_form, _downstreamStats, COLOR3);
|
_downstreamID = addChannel(_form, _downstreamStats, COLOR3);
|
||||||
_upstreamInjectedID = addChannel(_form, _upstreamInjectedStats, COLOR0);
|
_upstreamInjectedID = addChannel(_form, _upstreamInjectedStats, COLOR0);
|
||||||
|
|
||||||
connect(averageUpdateTimer, SIGNAL(timeout()), this, SLOT(updateTimerTimeout()));
|
// Initialize channels
|
||||||
averageUpdateTimer->start(1000);
|
updateChannels();
|
||||||
|
|
||||||
|
// Future renders
|
||||||
|
connect(averageUpdateTimer, SIGNAL(timeout()), this, SLOT(renderStats()));
|
||||||
|
averageUpdateTimer->start(200);
|
||||||
|
|
||||||
|
// Initial render
|
||||||
|
QDialog::setLayout(_form);
|
||||||
}
|
}
|
||||||
|
|
||||||
int AudioStatsDialog::addChannel(QFormLayout* form, QVector<QString>& stats, const unsigned color) {
|
int AudioStatsDialog::addChannel(QFormLayout* form, QVector<QString>& stats, const unsigned color) {
|
||||||
|
@ -99,148 +104,152 @@ int AudioStatsDialog::addChannel(QFormLayout* form, QVector<QString>& stats, con
|
||||||
return channelID;
|
return channelID;
|
||||||
}
|
}
|
||||||
|
|
||||||
void AudioStatsDialog::updateStats(QVector<QString>& stats, int channelID) {
|
void AudioStatsDialog::renderStats() {
|
||||||
|
updateStats();
|
||||||
|
updateChannels();
|
||||||
|
}
|
||||||
|
|
||||||
|
void AudioStatsDialog::updateChannels() {
|
||||||
|
updateChannel(_audioMixerStats, _audioMixerID);
|
||||||
|
updateChannel(_upstreamClientStats, _upstreamClientID);
|
||||||
|
updateChannel(_upstreamMixerStats, _upstreamMixerID);
|
||||||
|
updateChannel(_downstreamStats, _downstreamID);
|
||||||
|
updateChannel(_upstreamInjectedStats, _upstreamInjectedID);
|
||||||
|
}
|
||||||
|
|
||||||
|
void AudioStatsDialog::updateChannel(QVector<QString>& stats, int channelID) {
|
||||||
// Update all stat displays at specified channel
|
// Update all stat displays at specified channel
|
||||||
for (int i = 0; i < stats.size(); i++)
|
for (int i = 0; i < stats.size(); i++)
|
||||||
_audioDisplayChannels[channelID].at(i)->updatedDisplay(stats.at(i));
|
_audioDisplayChannels[channelID].at(i)->updatedDisplay(stats.at(i));
|
||||||
}
|
}
|
||||||
|
|
||||||
void AudioStatsDialog::renderStats() {
|
void AudioStatsDialog::updateStats() {
|
||||||
|
|
||||||
// Clear current stats from all vectors
|
// Clear current stats from all vectors
|
||||||
clearAllChannels();
|
clearAllChannels();
|
||||||
|
|
||||||
double audioInputBufferLatency = 0.0,
|
double audioInputBufferLatency{ 0.0 };
|
||||||
inputRingBufferLatency = 0.0,
|
double inputRingBufferLatency{ 0.0 };
|
||||||
networkRoundtripLatency = 0.0,
|
double networkRoundtripLatency{ 0.0 };
|
||||||
mixerRingBufferLatency = 0.0,
|
double mixerRingBufferLatency{ 0.0 };
|
||||||
outputRingBufferLatency = 0.0,
|
double outputRingBufferLatency{ 0.0 };
|
||||||
audioOutputBufferLatency = 0.0;
|
double audioOutputBufferLatency{ 0.0 };
|
||||||
|
|
||||||
AudioStreamStats downstreamAudioStreamStats = _stats->getMixerDownstreamStats();
|
if (SharedNodePointer audioMixerNodePointer = DependencyManager::get<NodeList>()->soloNodeOfType(NodeType::AudioMixer)) {
|
||||||
SharedNodePointer audioMixerNodePointer = DependencyManager::get<NodeList>()->soloNodeOfType(NodeType::AudioMixer);
|
audioInputBufferLatency = (double)_stats->getInputMsRead().getWindowMax();
|
||||||
|
inputRingBufferLatency = (double)_stats->getInputMsUnplayed().getWindowMax();
|
||||||
if (!audioMixerNodePointer.isNull()) {
|
networkRoundtripLatency = (double)audioMixerNodePointer->getPingMs();
|
||||||
audioInputBufferLatency = (double)_stats->getAudioInputMsecsReadStats().getWindowAverage();
|
mixerRingBufferLatency = (double)_stats->getMixerAvatarStreamStats()._unplayedMs;
|
||||||
inputRingBufferLatency = (double)_stats->getInputRungBufferMsecsAvailableStats().getWindowAverage();
|
outputRingBufferLatency = (double)_stats->getMixerDownstreamStats()._unplayedMs;
|
||||||
networkRoundtripLatency = (double) audioMixerNodePointer->getPingMs();
|
audioOutputBufferLatency = (double)_stats->getOutputMsUnplayed().getWindowMax();
|
||||||
mixerRingBufferLatency = (double)_stats->getMixerAvatarStreamStats()._framesAvailableAverage *
|
|
||||||
(double)AudioConstants::NETWORK_FRAME_MSECS;
|
|
||||||
outputRingBufferLatency = (double)downstreamAudioStreamStats._framesAvailableAverage *
|
|
||||||
(double)AudioConstants::NETWORK_FRAME_MSECS;
|
|
||||||
audioOutputBufferLatency = (double)_stats->getAudioOutputMsecsUnplayedStats().getWindowAverage();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
double totalLatency = audioInputBufferLatency + inputRingBufferLatency + networkRoundtripLatency + mixerRingBufferLatency
|
|
||||||
+ outputRingBufferLatency + audioOutputBufferLatency;
|
|
||||||
|
|
||||||
QString stats = "Audio input buffer: %1ms - avg msecs of samples read to the audio input buffer in last 10s";
|
double totalLatency = audioInputBufferLatency + inputRingBufferLatency + mixerRingBufferLatency
|
||||||
_audioMixerStats.push_back(stats.arg(QString::number(audioInputBufferLatency, 'f', 2)));
|
+ outputRingBufferLatency + audioOutputBufferLatency + networkRoundtripLatency;
|
||||||
|
|
||||||
stats = "Input ring buffer: %1ms - avg msecs of samples read to the input ring buffer in last 10s";
|
QString stats;
|
||||||
_audioMixerStats.push_back(stats.arg(QString::number(inputRingBufferLatency, 'f', 2)));
|
_audioMixerStats.push_back("PIPELINE (averaged over the past 10s)");
|
||||||
stats = "Network to mixer: %1ms - half of last ping value calculated by the node list";
|
stats = "Input Read:\t%1 ms";
|
||||||
_audioMixerStats.push_back(stats.arg(QString::number((networkRoundtripLatency / 2.0), 'f', 2)));
|
_audioMixerStats.push_back(stats.arg(QString::number(audioInputBufferLatency, 'f', 0)));
|
||||||
stats = "Network to client: %1ms - half of last ping value calculated by the node list";
|
stats = "Input Ring:\t%1 ms";
|
||||||
_audioMixerStats.push_back(stats.arg(QString::number((mixerRingBufferLatency / 2.0),'f', 2)));
|
_audioMixerStats.push_back(stats.arg(QString::number(inputRingBufferLatency, 'f', 0)));
|
||||||
stats = "Output ring buffer: %1ms - avg msecs of samples in output ring buffer in last 10s";
|
stats = "Network (client->mixer):\t%1 ms";
|
||||||
_audioMixerStats.push_back(stats.arg(QString::number(outputRingBufferLatency,'f', 2)));
|
_audioMixerStats.push_back(stats.arg(QString::number(networkRoundtripLatency / 2, 'f', 0)));
|
||||||
stats = "Audio output buffer: %1ms - avg msecs of samples in audio output buffer in last 10s";
|
stats = "Mixer Ring:\t%1 ms";
|
||||||
_audioMixerStats.push_back(stats.arg(QString::number(mixerRingBufferLatency,'f', 2)));
|
_audioMixerStats.push_back(stats.arg(QString::number(mixerRingBufferLatency, 'f', 0)));
|
||||||
stats = "TOTAL: %1ms - avg msecs of samples in audio output buffer in last 10s";
|
stats = "Network (mixer->client):\t%1 ms";
|
||||||
_audioMixerStats.push_back(stats.arg(QString::number(totalLatency, 'f', 2)));
|
_audioMixerStats.push_back(stats.arg(QString::number(networkRoundtripLatency / 2, 'f', 0)));
|
||||||
|
stats = "Output Ring:\t%1 ms";
|
||||||
|
_audioMixerStats.push_back(stats.arg(QString::number(outputRingBufferLatency, 'f', 0)));
|
||||||
|
stats = "Output Read:\t%1 ms";
|
||||||
|
_audioMixerStats.push_back(stats.arg(QString::number(audioOutputBufferLatency, 'f', 0)));
|
||||||
|
stats = "TOTAL:\t%1 ms";
|
||||||
|
_audioMixerStats.push_back(stats.arg(QString::number(totalLatency, 'f', 0)));
|
||||||
|
|
||||||
|
const MovingMinMaxAvg<quint64>& packetSentTimeGaps = _stats->getPacketTimegaps();
|
||||||
const MovingMinMaxAvg<quint64>& packetSentTimeGaps = _stats->getPacketSentTimeGaps();
|
|
||||||
|
|
||||||
_upstreamClientStats.push_back("\nUpstream Mic Audio Packets Sent Gaps (by client):");
|
_upstreamClientStats.push_back("\nUpstream Mic Audio Packets Sent Gaps (by client):");
|
||||||
|
|
||||||
stats = "Inter-packet timegaps (overall) | min: %1, max: %2, avg: %3";
|
stats = "Inter-packet timegaps";
|
||||||
|
_upstreamClientStats.push_back(stats);
|
||||||
|
stats = "overall min:\t%1, max:\t%2, avg:\t%3";
|
||||||
stats = stats.arg(formatUsecTime(packetSentTimeGaps.getMin()),
|
stats = stats.arg(formatUsecTime(packetSentTimeGaps.getMin()),
|
||||||
formatUsecTime(packetSentTimeGaps.getMax()),
|
formatUsecTime(packetSentTimeGaps.getMax()),
|
||||||
formatUsecTime(packetSentTimeGaps.getAverage()));
|
formatUsecTime(packetSentTimeGaps.getAverage()));
|
||||||
_upstreamClientStats.push_back(stats);
|
_upstreamClientStats.push_back(stats);
|
||||||
|
|
||||||
stats = "Inter-packet timegaps (last 30s) | min: %1, max: %2, avg: %3";
|
stats = "last window min:\t%1, max:\t%2, avg:\t%3";
|
||||||
stats = stats.arg(formatUsecTime(packetSentTimeGaps.getWindowMin()),
|
stats = stats.arg(formatUsecTime(packetSentTimeGaps.getWindowMin()),
|
||||||
formatUsecTime(packetSentTimeGaps.getWindowMax()),
|
formatUsecTime(packetSentTimeGaps.getWindowMax()),
|
||||||
formatUsecTime(packetSentTimeGaps.getWindowAverage()));
|
formatUsecTime(packetSentTimeGaps.getWindowAverage()));
|
||||||
_upstreamClientStats.push_back(stats);
|
_upstreamClientStats.push_back(stats);
|
||||||
|
|
||||||
_upstreamMixerStats.push_back("\nUpstream mic audio stats (received and reported by audio-mixer):");
|
_upstreamMixerStats.push_back("\nMIXER STREAM");
|
||||||
|
_upstreamMixerStats.push_back("(this client's remote mixer stream performance)");
|
||||||
|
|
||||||
renderAudioStreamStats(&_stats->getMixerAvatarStreamStats(), &_upstreamMixerStats, true);
|
renderAudioStreamStats(&_stats->getMixerAvatarStreamStats(), &_upstreamMixerStats);
|
||||||
|
|
||||||
_downstreamStats.push_back("\nDownstream mixed audio stats:");
|
_downstreamStats.push_back("\nCLIENT STREAM");
|
||||||
|
|
||||||
AudioStreamStats downstreamStats = _stats->getMixerDownstreamStats();
|
AudioStreamStats downstreamStats = _stats->getMixerDownstreamStats();
|
||||||
|
|
||||||
renderAudioStreamStats(&downstreamStats, &_downstreamStats, true);
|
renderAudioStreamStats(&downstreamStats, &_downstreamStats);
|
||||||
|
|
||||||
|
|
||||||
if (_shouldShowInjectedStreams) {
|
if (_shouldShowInjectedStreams) {
|
||||||
|
|
||||||
foreach(const AudioStreamStats& injectedStreamAudioStats, _stats->getMixerInjectedStreamStatsMap()) {
|
foreach(const AudioStreamStats& injectedStreamAudioStats, _stats->getMixerInjectedStreamStatsMap()) {
|
||||||
stats = "\nUpstream injected audio stats: stream ID: %1";
|
stats = "\nINJECTED STREAM (ID: %1)";
|
||||||
stats = stats.arg(injectedStreamAudioStats._streamIdentifier.toString());
|
stats = stats.arg(injectedStreamAudioStats._streamIdentifier.toString());
|
||||||
_upstreamInjectedStats.push_back(stats);
|
_upstreamInjectedStats.push_back(stats);
|
||||||
|
|
||||||
renderAudioStreamStats(&injectedStreamAudioStats, &_upstreamInjectedStats, true);
|
renderAudioStreamStats(&injectedStreamAudioStats, &_upstreamInjectedStats);
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void AudioStatsDialog::renderAudioStreamStats(const AudioStreamStats* streamStats, QVector<QString>* audioStreamStats, bool isDownstreamStats) {
|
void AudioStatsDialog::renderAudioStreamStats(const AudioStreamStats* streamStats, QVector<QString>* audioStreamStats) {
|
||||||
|
|
||||||
QString stats = "Packet loss | overall: %1% (%2 lost), last_30s: %3% (%4 lost)";
|
QString stats = "Packet Loss";
|
||||||
|
audioStreamStats->push_back(stats);
|
||||||
|
stats = "overall:\t%1%\t(%2 lost), window:\t%3%\t(%4 lost)";
|
||||||
stats = stats.arg(QString::number((int)(streamStats->_packetStreamStats.getLostRate() * 100.0f)),
|
stats = stats.arg(QString::number((int)(streamStats->_packetStreamStats.getLostRate() * 100.0f)),
|
||||||
QString::number((int)(streamStats->_packetStreamStats._lost)),
|
QString::number((int)(streamStats->_packetStreamStats._lost)),
|
||||||
QString::number((int)(streamStats->_packetStreamWindowStats.getLostRate() * 100.0f)),
|
QString::number((int)(streamStats->_packetStreamWindowStats.getLostRate() * 100.0f)),
|
||||||
QString::number((int)(streamStats->_packetStreamWindowStats._lost)));
|
QString::number((int)(streamStats->_packetStreamWindowStats._lost)));
|
||||||
audioStreamStats->push_back(stats);
|
audioStreamStats->push_back(stats);
|
||||||
|
|
||||||
if (isDownstreamStats) {
|
stats = "Ringbuffer";
|
||||||
stats = "Ringbuffer frames | desired: %1, avg_available(10s): %2 + %3, available: %4 + %5";
|
audioStreamStats->push_back(stats);
|
||||||
stats = stats.arg(QString::number(streamStats->_desiredJitterBufferFrames),
|
stats = "available frames (avg):\t%1\t(%2), desired:\t%3";
|
||||||
QString::number(streamStats->_framesAvailableAverage),
|
stats = stats.arg(QString::number(streamStats->_framesAvailable),
|
||||||
QString::number((int)((float)_stats->getAudioInputMsecsReadStats().getWindowAverage() /
|
QString::number(streamStats->_framesAvailableAverage),
|
||||||
AudioConstants::NETWORK_FRAME_MSECS)),
|
QString::number(streamStats->_desiredJitterBufferFrames));
|
||||||
QString::number(streamStats->_framesAvailable),
|
audioStreamStats->push_back(stats);
|
||||||
QString::number((int)(_stats->getAudioOutputMsecsUnplayedStats().getCurrentIntervalLastSample() /
|
stats = "starves:\t%1, last starve duration:\t%2, drops:\t%3, overflows:\t%4";
|
||||||
AudioConstants::NETWORK_FRAME_MSECS)));
|
|
||||||
audioStreamStats->push_back(stats);
|
|
||||||
} else {
|
|
||||||
stats = "Ringbuffer frames | desired: %1, avg_available(10s): %2, available: %3";
|
|
||||||
stats = stats.arg(QString::number(streamStats->_desiredJitterBufferFrames),
|
|
||||||
QString::number(streamStats->_framesAvailableAverage),
|
|
||||||
QString::number(streamStats->_framesAvailable));
|
|
||||||
audioStreamStats->push_back(stats);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
stats = "Ringbuffer stats | starves: %1, prev_starve_lasted: %2, frames_dropped: %3, overflows: %4";
|
|
||||||
stats = stats.arg(QString::number(streamStats->_starveCount),
|
stats = stats.arg(QString::number(streamStats->_starveCount),
|
||||||
QString::number(streamStats->_consecutiveNotMixedCount),
|
QString::number(streamStats->_consecutiveNotMixedCount),
|
||||||
QString::number(streamStats->_framesDropped),
|
QString::number(streamStats->_framesDropped),
|
||||||
QString::number(streamStats->_overflowCount));
|
QString::number(streamStats->_overflowCount));
|
||||||
audioStreamStats->push_back(stats);
|
audioStreamStats->push_back(stats);
|
||||||
|
|
||||||
|
stats = "Inter-packet timegaps";
|
||||||
|
audioStreamStats->push_back(stats);
|
||||||
|
|
||||||
stats = "Inter-packet timegaps (overall) | min: %1, max: %2, avg: %3";
|
stats = "overall min:\t%1, max:\t%2, avg:\t%3";
|
||||||
stats = stats.arg(formatUsecTime(streamStats->_timeGapMin),
|
stats = stats.arg(formatUsecTime(streamStats->_timeGapMin),
|
||||||
formatUsecTime(streamStats->_timeGapMax),
|
formatUsecTime(streamStats->_timeGapMax),
|
||||||
formatUsecTime(streamStats->_timeGapAverage));
|
formatUsecTime(streamStats->_timeGapAverage));
|
||||||
audioStreamStats->push_back(stats);
|
audioStreamStats->push_back(stats);
|
||||||
|
|
||||||
|
|
||||||
stats = "Inter-packet timegaps (last 30s) | min: %1, max: %2, avg: %3";
|
stats = "last window min:\t%1, max:\t%2, avg:\t%3";
|
||||||
stats = stats.arg(formatUsecTime(streamStats->_timeGapWindowMin),
|
stats = stats.arg(formatUsecTime(streamStats->_timeGapWindowMin),
|
||||||
formatUsecTime(streamStats->_timeGapWindowMax),
|
formatUsecTime(streamStats->_timeGapWindowMax),
|
||||||
formatUsecTime(streamStats->_timeGapWindowAverage));
|
formatUsecTime(streamStats->_timeGapWindowAverage));
|
||||||
audioStreamStats->push_back(stats);
|
audioStreamStats->push_back(stats);
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void AudioStatsDialog::clearAllChannels() {
|
void AudioStatsDialog::clearAllChannels() {
|
||||||
|
@ -251,21 +260,6 @@ void AudioStatsDialog::clearAllChannels() {
|
||||||
_upstreamInjectedStats.clear();
|
_upstreamInjectedStats.clear();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void AudioStatsDialog::updateTimerTimeout() {
|
|
||||||
|
|
||||||
renderStats();
|
|
||||||
|
|
||||||
// Update all audio stats
|
|
||||||
updateStats(_audioMixerStats, _audioMixerID);
|
|
||||||
updateStats(_upstreamClientStats, _upstreamClientID);
|
|
||||||
updateStats(_upstreamMixerStats, _upstreamMixerID);
|
|
||||||
updateStats(_downstreamStats, _downstreamID);
|
|
||||||
updateStats(_upstreamInjectedStats, _upstreamInjectedID);
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
void AudioStatsDialog::paintEvent(QPaintEvent* event) {
|
void AudioStatsDialog::paintEvent(QPaintEvent* event) {
|
||||||
|
|
||||||
// Repaint each stat in each channel
|
// Repaint each stat in each channel
|
||||||
|
|
|
@ -70,18 +70,18 @@ private:
|
||||||
|
|
||||||
QVector<QVector<AudioStatsDisplay*>> _audioDisplayChannels;
|
QVector<QVector<AudioStatsDisplay*>> _audioDisplayChannels;
|
||||||
|
|
||||||
|
void updateStats();
|
||||||
int addChannel(QFormLayout* form, QVector<QString>& stats, const unsigned color);
|
int addChannel(QFormLayout* form, QVector<QString>& stats, const unsigned color);
|
||||||
void updateStats(QVector<QString>& stats, const int channelID);
|
void updateChannel(QVector<QString>& stats, const int channelID);
|
||||||
void renderStats();
|
void updateChannels();
|
||||||
void clearAllChannels();
|
void clearAllChannels();
|
||||||
void renderAudioStreamStats(const AudioStreamStats* streamStats, QVector<QString>* audioStreamstats, bool isDownstreamStats);
|
void renderAudioStreamStats(const AudioStreamStats* streamStats, QVector<QString>* audioStreamstats);
|
||||||
|
|
||||||
|
|
||||||
const AudioIOStats* _stats;
|
const AudioIOStats* _stats;
|
||||||
QFormLayout* _form;
|
QFormLayout* _form;
|
||||||
|
|
||||||
bool _isEnabled;
|
bool _shouldShowInjectedStreams{ false };
|
||||||
bool _shouldShowInjectedStreams;
|
|
||||||
|
|
||||||
|
|
||||||
signals:
|
signals:
|
||||||
|
@ -93,7 +93,7 @@ signals:
|
||||||
|
|
||||||
|
|
||||||
void reject() override;
|
void reject() override;
|
||||||
void updateTimerTimeout();
|
void renderStats();
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
|
|
||||||
|
|
|
@ -47,6 +47,7 @@
|
||||||
|
|
||||||
#include "PositionalAudioStream.h"
|
#include "PositionalAudioStream.h"
|
||||||
#include "AudioClientLogging.h"
|
#include "AudioClientLogging.h"
|
||||||
|
#include "AudioLogging.h"
|
||||||
|
|
||||||
#include "AudioClient.h"
|
#include "AudioClient.h"
|
||||||
|
|
||||||
|
@ -122,12 +123,11 @@ AudioClient::AudioClient() :
|
||||||
_outputBufferSizeFrames("audioOutputBufferSizeFrames", DEFAULT_AUDIO_OUTPUT_BUFFER_SIZE_FRAMES),
|
_outputBufferSizeFrames("audioOutputBufferSizeFrames", DEFAULT_AUDIO_OUTPUT_BUFFER_SIZE_FRAMES),
|
||||||
_sessionOutputBufferSizeFrames(_outputBufferSizeFrames.get()),
|
_sessionOutputBufferSizeFrames(_outputBufferSizeFrames.get()),
|
||||||
_outputStarveDetectionEnabled("audioOutputBufferStarveDetectionEnabled",
|
_outputStarveDetectionEnabled("audioOutputBufferStarveDetectionEnabled",
|
||||||
DEFAULT_AUDIO_OUTPUT_STARVE_DETECTION_ENABLED),
|
DEFAULT_AUDIO_OUTPUT_STARVE_DETECTION_ENABLED),
|
||||||
_outputStarveDetectionPeriodMsec("audioOutputStarveDetectionPeriod",
|
_outputStarveDetectionPeriodMsec("audioOutputStarveDetectionPeriod",
|
||||||
DEFAULT_AUDIO_OUTPUT_STARVE_DETECTION_PERIOD),
|
DEFAULT_AUDIO_OUTPUT_STARVE_DETECTION_PERIOD),
|
||||||
_outputStarveDetectionThreshold("audioOutputStarveDetectionThreshold",
|
_outputStarveDetectionThreshold("audioOutputStarveDetectionThreshold",
|
||||||
DEFAULT_AUDIO_OUTPUT_STARVE_DETECTION_THRESHOLD),
|
DEFAULT_AUDIO_OUTPUT_STARVE_DETECTION_THRESHOLD),
|
||||||
_averagedLatency(0.0f),
|
|
||||||
_lastInputLoudness(0.0f),
|
_lastInputLoudness(0.0f),
|
||||||
_timeSinceLastClip(-1.0f),
|
_timeSinceLastClip(-1.0f),
|
||||||
_muted(false),
|
_muted(false),
|
||||||
|
@ -441,7 +441,7 @@ void possibleResampling(AudioSRC* resampler,
|
||||||
if (!sampleChannelConversion(sourceSamples, destinationSamples, numSourceSamples,
|
if (!sampleChannelConversion(sourceSamples, destinationSamples, numSourceSamples,
|
||||||
sourceAudioFormat, destinationAudioFormat)) {
|
sourceAudioFormat, destinationAudioFormat)) {
|
||||||
// no conversion, we can copy the samples directly across
|
// no conversion, we can copy the samples directly across
|
||||||
memcpy(destinationSamples, sourceSamples, numSourceSamples * sizeof(int16_t));
|
memcpy(destinationSamples, sourceSamples, numSourceSamples * AudioConstants::SAMPLE_SIZE);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
|
||||||
|
@ -815,10 +815,10 @@ void AudioClient::handleLocalEchoAndReverb(QByteArray& inputByteArray) {
|
||||||
|
|
||||||
static QByteArray loopBackByteArray;
|
static QByteArray loopBackByteArray;
|
||||||
|
|
||||||
int numInputSamples = inputByteArray.size() / sizeof(int16_t);
|
int numInputSamples = inputByteArray.size() / AudioConstants::SAMPLE_SIZE;
|
||||||
int numLoopbackSamples = numDestinationSamplesRequired(_inputFormat, _outputFormat, numInputSamples);
|
int numLoopbackSamples = numDestinationSamplesRequired(_inputFormat, _outputFormat, numInputSamples);
|
||||||
|
|
||||||
loopBackByteArray.resize(numLoopbackSamples * sizeof(int16_t));
|
loopBackByteArray.resize(numLoopbackSamples * AudioConstants::SAMPLE_SIZE);
|
||||||
|
|
||||||
int16_t* inputSamples = reinterpret_cast<int16_t*>(inputByteArray.data());
|
int16_t* inputSamples = reinterpret_cast<int16_t*>(inputByteArray.data());
|
||||||
int16_t* loopbackSamples = reinterpret_cast<int16_t*>(loopBackByteArray.data());
|
int16_t* loopbackSamples = reinterpret_cast<int16_t*>(loopBackByteArray.data());
|
||||||
|
@ -826,7 +826,7 @@ void AudioClient::handleLocalEchoAndReverb(QByteArray& inputByteArray) {
|
||||||
// upmix mono to stereo
|
// upmix mono to stereo
|
||||||
if (!sampleChannelConversion(inputSamples, loopbackSamples, numInputSamples, _inputFormat, _outputFormat)) {
|
if (!sampleChannelConversion(inputSamples, loopbackSamples, numInputSamples, _inputFormat, _outputFormat)) {
|
||||||
// no conversion, just copy the samples
|
// no conversion, just copy the samples
|
||||||
memcpy(loopbackSamples, inputSamples, numInputSamples * sizeof(int16_t));
|
memcpy(loopbackSamples, inputSamples, numInputSamples * AudioConstants::SAMPLE_SIZE);
|
||||||
}
|
}
|
||||||
|
|
||||||
// apply stereo reverb at the source, to the loopback audio
|
// apply stereo reverb at the source, to the loopback audio
|
||||||
|
@ -853,7 +853,7 @@ void AudioClient::handleAudioInput() {
|
||||||
_inputRingBuffer.writeData(inputByteArray.data(), inputByteArray.size());
|
_inputRingBuffer.writeData(inputByteArray.data(), inputByteArray.size());
|
||||||
|
|
||||||
float audioInputMsecsRead = inputByteArray.size() / (float)(_inputFormat.bytesForDuration(USECS_PER_MSEC));
|
float audioInputMsecsRead = inputByteArray.size() / (float)(_inputFormat.bytesForDuration(USECS_PER_MSEC));
|
||||||
_stats.updateInputMsecsRead(audioInputMsecsRead);
|
_stats.updateInputMsRead(audioInputMsecsRead);
|
||||||
|
|
||||||
const int numNetworkBytes = _isStereoInput
|
const int numNetworkBytes = _isStereoInput
|
||||||
? AudioConstants::NETWORK_FRAME_BYTES_STEREO
|
? AudioConstants::NETWORK_FRAME_BYTES_STEREO
|
||||||
|
@ -941,6 +941,10 @@ void AudioClient::handleAudioInput() {
|
||||||
|
|
||||||
emitAudioPacket(encodedBuffer.constData(), encodedBuffer.size(), _outgoingAvatarAudioSequenceNumber, audioTransform, packetType, _selectedCodecName);
|
emitAudioPacket(encodedBuffer.constData(), encodedBuffer.size(), _outgoingAvatarAudioSequenceNumber, audioTransform, packetType, _selectedCodecName);
|
||||||
_stats.sentPacket();
|
_stats.sentPacket();
|
||||||
|
|
||||||
|
int bytesInInputRingBuffer = _inputRingBuffer.samplesAvailable() * AudioConstants::SAMPLE_SIZE;
|
||||||
|
float msecsInInputRingBuffer = bytesInInputRingBuffer / (float)(_inputFormat.bytesForDuration(USECS_PER_MSEC));
|
||||||
|
_stats.updateInputMsUnplayed(msecsInInputRingBuffer);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1021,7 +1025,7 @@ void AudioClient::processReceivedSamples(const QByteArray& decodedBuffer, QByteA
|
||||||
const int16_t* decodedSamples = reinterpret_cast<const int16_t*>(decodedBuffer.data());
|
const int16_t* decodedSamples = reinterpret_cast<const int16_t*>(decodedBuffer.data());
|
||||||
assert(decodedBuffer.size() == AudioConstants::NETWORK_FRAME_BYTES_STEREO);
|
assert(decodedBuffer.size() == AudioConstants::NETWORK_FRAME_BYTES_STEREO);
|
||||||
|
|
||||||
outputBuffer.resize(_outputFrameSize * sizeof(int16_t));
|
outputBuffer.resize(_outputFrameSize * AudioConstants::SAMPLE_SIZE);
|
||||||
int16_t* outputSamples = reinterpret_cast<int16_t*>(outputBuffer.data());
|
int16_t* outputSamples = reinterpret_cast<int16_t*>(outputBuffer.data());
|
||||||
|
|
||||||
// convert network audio to float
|
// convert network audio to float
|
||||||
|
@ -1280,7 +1284,7 @@ bool AudioClient::switchOutputToAudioDevice(const QAudioDeviceInfo& outputDevice
|
||||||
// setup our general output device for audio-mixer audio
|
// setup our general output device for audio-mixer audio
|
||||||
_audioOutput = new QAudioOutput(outputDeviceInfo, _outputFormat, this);
|
_audioOutput = new QAudioOutput(outputDeviceInfo, _outputFormat, this);
|
||||||
int osDefaultBufferSize = _audioOutput->bufferSize();
|
int osDefaultBufferSize = _audioOutput->bufferSize();
|
||||||
int requestedSize = _sessionOutputBufferSizeFrames *_outputFrameSize * sizeof(int16_t);
|
int requestedSize = _sessionOutputBufferSizeFrames *_outputFrameSize * AudioConstants::SAMPLE_SIZE;
|
||||||
_audioOutput->setBufferSize(requestedSize);
|
_audioOutput->setBufferSize(requestedSize);
|
||||||
|
|
||||||
connect(_audioOutput, &QAudioOutput::notify, this, &AudioClient::outputNotify);
|
connect(_audioOutput, &QAudioOutput::notify, this, &AudioClient::outputNotify);
|
||||||
|
@ -1292,7 +1296,7 @@ bool AudioClient::switchOutputToAudioDevice(const QAudioDeviceInfo& outputDevice
|
||||||
_audioOutput->start(&_audioOutputIODevice);
|
_audioOutput->start(&_audioOutputIODevice);
|
||||||
lock.unlock();
|
lock.unlock();
|
||||||
|
|
||||||
qCDebug(audioclient) << "Output Buffer capacity in frames: " << _audioOutput->bufferSize() / sizeof(int16_t) / (float)_outputFrameSize <<
|
qCDebug(audioclient) << "Output Buffer capacity in frames: " << _audioOutput->bufferSize() / AudioConstants::SAMPLE_SIZE / (float)_outputFrameSize <<
|
||||||
"requested bytes:" << requestedSize << "actual bytes:" << _audioOutput->bufferSize() <<
|
"requested bytes:" << requestedSize << "actual bytes:" << _audioOutput->bufferSize() <<
|
||||||
"os default:" << osDefaultBufferSize << "period size:" << _audioOutput->periodSize();
|
"os default:" << osDefaultBufferSize << "period size:" << _audioOutput->periodSize();
|
||||||
|
|
||||||
|
@ -1354,26 +1358,10 @@ int AudioClient::calculateNumberOfInputCallbackBytes(const QAudioFormat& format)
|
||||||
}
|
}
|
||||||
|
|
||||||
int AudioClient::calculateNumberOfFrameSamples(int numBytes) const {
|
int AudioClient::calculateNumberOfFrameSamples(int numBytes) const {
|
||||||
int frameSamples = (int)(numBytes * CALLBACK_ACCELERATOR_RATIO + 0.5f) / sizeof(int16_t);
|
int frameSamples = (int)(numBytes * CALLBACK_ACCELERATOR_RATIO + 0.5f) / AudioConstants::SAMPLE_SIZE;
|
||||||
return frameSamples;
|
return frameSamples;
|
||||||
}
|
}
|
||||||
|
|
||||||
float AudioClient::getInputRingBufferMsecsAvailable() const {
|
|
||||||
int bytesInInputRingBuffer = _inputRingBuffer.samplesAvailable() * sizeof(int16_t);
|
|
||||||
float msecsInInputRingBuffer = bytesInInputRingBuffer / (float)(_inputFormat.bytesForDuration(USECS_PER_MSEC));
|
|
||||||
return msecsInInputRingBuffer;
|
|
||||||
}
|
|
||||||
|
|
||||||
float AudioClient::getAudioOutputMsecsUnplayed() const {
|
|
||||||
if (!_audioOutput) {
|
|
||||||
return 0.0f;
|
|
||||||
}
|
|
||||||
int bytesAudioOutputUnplayed = _audioOutput->bufferSize() - _audioOutput->bytesFree();
|
|
||||||
float msecsAudioOutputUnplayed = bytesAudioOutputUnplayed / (float)_outputFormat.bytesForDuration(USECS_PER_MSEC);
|
|
||||||
return msecsAudioOutputUnplayed;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
float AudioClient::azimuthForSource(const glm::vec3& relativePosition) {
|
float AudioClient::azimuthForSource(const glm::vec3& relativePosition) {
|
||||||
// copied from AudioMixer, more or less
|
// copied from AudioMixer, more or less
|
||||||
glm::quat inverseOrientation = glm::inverse(_orientationGetter());
|
glm::quat inverseOrientation = glm::inverse(_orientationGetter());
|
||||||
|
@ -1414,14 +1402,15 @@ float AudioClient::gainForSource(float distance, float volume) {
|
||||||
}
|
}
|
||||||
|
|
||||||
qint64 AudioClient::AudioOutputIODevice::readData(char * data, qint64 maxSize) {
|
qint64 AudioClient::AudioOutputIODevice::readData(char * data, qint64 maxSize) {
|
||||||
auto samplesRequested = maxSize / sizeof(int16_t);
|
auto samplesRequested = maxSize / AudioConstants::SAMPLE_SIZE;
|
||||||
int samplesPopped;
|
int samplesPopped;
|
||||||
int bytesWritten;
|
int bytesWritten;
|
||||||
|
|
||||||
if ((samplesPopped = _receivedAudioStream.popSamples((int)samplesRequested, false)) > 0) {
|
if ((samplesPopped = _receivedAudioStream.popSamples((int)samplesRequested, false)) > 0) {
|
||||||
|
qCDebug(audiostream, "Read %d samples from buffer (%d available)", samplesPopped, _receivedAudioStream.getSamplesAvailable());
|
||||||
AudioRingBuffer::ConstIterator lastPopOutput = _receivedAudioStream.getLastPopOutput();
|
AudioRingBuffer::ConstIterator lastPopOutput = _receivedAudioStream.getLastPopOutput();
|
||||||
lastPopOutput.readSamples((int16_t*)data, samplesPopped);
|
lastPopOutput.readSamples((int16_t*)data, samplesPopped);
|
||||||
bytesWritten = samplesPopped * sizeof(int16_t);
|
bytesWritten = samplesPopped * AudioConstants::SAMPLE_SIZE;
|
||||||
} else {
|
} else {
|
||||||
// nothing on network, don't grab anything from injectors, and just return 0s
|
// nothing on network, don't grab anything from injectors, and just return 0s
|
||||||
// this will flood the log: qCDebug(audioclient, "empty/partial network buffer");
|
// this will flood the log: qCDebug(audioclient, "empty/partial network buffer");
|
||||||
|
@ -1429,8 +1418,11 @@ qint64 AudioClient::AudioOutputIODevice::readData(char * data, qint64 maxSize) {
|
||||||
bytesWritten = maxSize;
|
bytesWritten = maxSize;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool wasBufferStarved = _audio->_audioOutput->bufferSize() == _audio->_audioOutput->bytesFree();
|
int bytesAudioOutputUnplayed = _audio->_audioOutput->bufferSize() - _audio->_audioOutput->bytesFree();
|
||||||
if (wasBufferStarved) {
|
float msecsAudioOutputUnplayed = bytesAudioOutputUnplayed / (float)_audio->_outputFormat.bytesForDuration(USECS_PER_MSEC);
|
||||||
|
_audio->_stats.updateOutputMsUnplayed(msecsAudioOutputUnplayed);
|
||||||
|
|
||||||
|
if (bytesAudioOutputUnplayed == 0) {
|
||||||
_unfulfilledReads++;
|
_unfulfilledReads++;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -121,9 +121,6 @@ public:
|
||||||
|
|
||||||
const AudioIOStats& getStats() const { return _stats; }
|
const AudioIOStats& getStats() const { return _stats; }
|
||||||
|
|
||||||
float getInputRingBufferMsecsAvailable() const;
|
|
||||||
float getAudioOutputMsecsUnplayed() const;
|
|
||||||
|
|
||||||
int getOutputBufferSize() { return _outputBufferSizeFrames.get(); }
|
int getOutputBufferSize() { return _outputBufferSizeFrames.get(); }
|
||||||
|
|
||||||
bool getOutputStarveDetectionEnabled() { return _outputStarveDetectionEnabled.get(); }
|
bool getOutputStarveDetectionEnabled() { return _outputStarveDetectionEnabled.get(); }
|
||||||
|
@ -284,7 +281,6 @@ private:
|
||||||
|
|
||||||
StDev _stdev;
|
StDev _stdev;
|
||||||
QElapsedTimer _timeSinceLastReceived;
|
QElapsedTimer _timeSinceLastReceived;
|
||||||
float _averagedLatency;
|
|
||||||
float _lastInputLoudness;
|
float _lastInputLoudness;
|
||||||
float _timeSinceLastClip;
|
float _timeSinceLastClip;
|
||||||
int _totalInputAudioSamples;
|
int _totalInputAudioSamples;
|
||||||
|
|
|
@ -18,54 +18,73 @@
|
||||||
|
|
||||||
#include "AudioIOStats.h"
|
#include "AudioIOStats.h"
|
||||||
|
|
||||||
const int FRAMES_AVAILABLE_STATS_WINDOW_SECONDS = 10;
|
// This is called 5x/sec (see AudioStatsDialog), and we want it to log the last 5s
|
||||||
|
static const int INPUT_READS_WINDOW = 25;
|
||||||
|
static const int INPUT_UNPLAYED_WINDOW = 25;
|
||||||
|
static const int OUTPUT_UNPLAYED_WINDOW = 25;
|
||||||
|
|
||||||
const int APPROXIMATELY_30_SECONDS_OF_AUDIO_PACKETS = (int)(30.0f * 1000.0f / AudioConstants::NETWORK_FRAME_MSECS);
|
static const int APPROXIMATELY_30_SECONDS_OF_AUDIO_PACKETS = (int)(30.0f * 1000.0f / AudioConstants::NETWORK_FRAME_MSECS);
|
||||||
|
|
||||||
|
|
||||||
AudioIOStats::AudioIOStats(MixedProcessedAudioStream* receivedAudioStream) :
|
AudioIOStats::AudioIOStats(MixedProcessedAudioStream* receivedAudioStream) :
|
||||||
_receivedAudioStream(receivedAudioStream),
|
_receivedAudioStream(receivedAudioStream),
|
||||||
_audioInputMsecsReadStats(MSECS_PER_SECOND / (float)AudioConstants::NETWORK_FRAME_MSECS * AudioClient::CALLBACK_ACCELERATOR_RATIO, FRAMES_AVAILABLE_STATS_WINDOW_SECONDS),
|
_inputMsRead(0, INPUT_READS_WINDOW),
|
||||||
_inputRingBufferMsecsAvailableStats(1, FRAMES_AVAILABLE_STATS_WINDOW_SECONDS),
|
_inputMsUnplayed(0, INPUT_UNPLAYED_WINDOW),
|
||||||
_audioOutputMsecsUnplayedStats(1, FRAMES_AVAILABLE_STATS_WINDOW_SECONDS),
|
_outputMsUnplayed(0, OUTPUT_UNPLAYED_WINDOW),
|
||||||
_lastSentAudioPacket(0),
|
_lastSentPacketTime(0),
|
||||||
_packetSentTimeGaps(1, APPROXIMATELY_30_SECONDS_OF_AUDIO_PACKETS)
|
_packetTimegaps(0, APPROXIMATELY_30_SECONDS_OF_AUDIO_PACKETS)
|
||||||
{
|
{
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
AudioStreamStats AudioIOStats::getMixerDownstreamStats() const {
|
|
||||||
return _receivedAudioStream->getAudioStreamStats();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void AudioIOStats::reset() {
|
void AudioIOStats::reset() {
|
||||||
_receivedAudioStream->resetStats();
|
_receivedAudioStream->resetStats();
|
||||||
|
|
||||||
|
_inputMsRead.reset();
|
||||||
|
_inputMsUnplayed.reset();
|
||||||
|
_outputMsUnplayed.reset();
|
||||||
|
_packetTimegaps.reset();
|
||||||
|
|
||||||
_mixerAvatarStreamStats = AudioStreamStats();
|
_mixerAvatarStreamStats = AudioStreamStats();
|
||||||
_mixerInjectedStreamStatsMap.clear();
|
_mixerInjectedStreamStatsMap.clear();
|
||||||
|
|
||||||
_audioInputMsecsReadStats.reset();
|
|
||||||
_inputRingBufferMsecsAvailableStats.reset();
|
|
||||||
|
|
||||||
_audioOutputMsecsUnplayedStats.reset();
|
|
||||||
_packetSentTimeGaps.reset();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void AudioIOStats::sentPacket() {
|
void AudioIOStats::sentPacket() {
|
||||||
// first time this is 0
|
// first time this is 0
|
||||||
if (_lastSentAudioPacket == 0) {
|
if (_lastSentPacketTime == 0) {
|
||||||
_lastSentAudioPacket = usecTimestampNow();
|
_lastSentPacketTime = usecTimestampNow();
|
||||||
} else {
|
} else {
|
||||||
quint64 now = usecTimestampNow();
|
quint64 now = usecTimestampNow();
|
||||||
quint64 gap = now - _lastSentAudioPacket;
|
quint64 gap = now - _lastSentPacketTime;
|
||||||
_packetSentTimeGaps.update(gap);
|
_lastSentPacketTime = now;
|
||||||
|
_packetTimegaps.update(gap);
|
||||||
_lastSentAudioPacket = now;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void AudioIOStats::processStreamStatsPacket(QSharedPointer<ReceivedMessage> message, SharedNodePointer sendingNode) {
|
const MovingMinMaxAvg<float>& AudioIOStats::getInputMsRead() const {
|
||||||
|
_inputMsRead.currentIntervalComplete();
|
||||||
|
return _inputMsRead;
|
||||||
|
}
|
||||||
|
|
||||||
|
const MovingMinMaxAvg<float>& AudioIOStats::getInputMsUnplayed() const {
|
||||||
|
_inputMsUnplayed.currentIntervalComplete();
|
||||||
|
return _inputMsUnplayed;
|
||||||
|
}
|
||||||
|
|
||||||
|
const MovingMinMaxAvg<float>& AudioIOStats::getOutputMsUnplayed() const {
|
||||||
|
_outputMsUnplayed.currentIntervalComplete();
|
||||||
|
return _outputMsUnplayed;
|
||||||
|
}
|
||||||
|
|
||||||
|
const MovingMinMaxAvg<quint64>& AudioIOStats::getPacketTimegaps() const {
|
||||||
|
_packetTimegaps.currentIntervalComplete();
|
||||||
|
return _packetTimegaps;
|
||||||
|
}
|
||||||
|
|
||||||
|
const AudioStreamStats AudioIOStats::getMixerDownstreamStats() const {
|
||||||
|
return _receivedAudioStream->getAudioStreamStats();
|
||||||
|
}
|
||||||
|
|
||||||
|
void AudioIOStats::processStreamStatsPacket(QSharedPointer<ReceivedMessage> message, SharedNodePointer sendingNode) {
|
||||||
// parse the appendFlag, clear injected audio stream stats if 0
|
// parse the appendFlag, clear injected audio stream stats if 0
|
||||||
quint8 appendFlag;
|
quint8 appendFlag;
|
||||||
message->readPrimitive(&appendFlag);
|
message->readPrimitive(&appendFlag);
|
||||||
|
@ -92,14 +111,9 @@ void AudioIOStats::processStreamStatsPacket(QSharedPointer<ReceivedMessage> mess
|
||||||
}
|
}
|
||||||
|
|
||||||
void AudioIOStats::sendDownstreamAudioStatsPacket() {
|
void AudioIOStats::sendDownstreamAudioStatsPacket() {
|
||||||
|
|
||||||
auto audioIO = DependencyManager::get<AudioClient>();
|
auto audioIO = DependencyManager::get<AudioClient>();
|
||||||
|
|
||||||
// since this function is called every second, we'll sample for some of our stats here
|
// call _receivedAudioStream's per-second callback
|
||||||
_inputRingBufferMsecsAvailableStats.update(audioIO->getInputRingBufferMsecsAvailable());
|
|
||||||
_audioOutputMsecsUnplayedStats.update(audioIO->getAudioOutputMsecsUnplayed());
|
|
||||||
|
|
||||||
// also, call _receivedAudioStream's per-second callback
|
|
||||||
_receivedAudioStream->perSecondCallbackForUpdatingStats();
|
_receivedAudioStream->perSecondCallbackForUpdatingStats();
|
||||||
|
|
||||||
auto nodeList = DependencyManager::get<NodeList>();
|
auto nodeList = DependencyManager::get<NodeList>();
|
||||||
|
|
|
@ -29,19 +29,20 @@ public:
|
||||||
|
|
||||||
void reset();
|
void reset();
|
||||||
|
|
||||||
void updateInputMsecsRead(float msecsRead) { _audioInputMsecsReadStats.update(msecsRead); }
|
void updateInputMsRead(float ms) { _inputMsRead.update(ms); }
|
||||||
|
void updateInputMsUnplayed(float ms) { _inputMsUnplayed.update(ms); }
|
||||||
|
void updateOutputMsUnplayed(float ms) { _outputMsUnplayed.update(ms); }
|
||||||
void sentPacket();
|
void sentPacket();
|
||||||
|
|
||||||
AudioStreamStats getMixerDownstreamStats() const;
|
const MovingMinMaxAvg<float>& getInputMsRead() const;
|
||||||
|
const MovingMinMaxAvg<float>& getInputMsUnplayed() const;
|
||||||
|
const MovingMinMaxAvg<float>& getOutputMsUnplayed() const;
|
||||||
|
const MovingMinMaxAvg<quint64>& getPacketTimegaps() const;
|
||||||
|
|
||||||
|
const AudioStreamStats getMixerDownstreamStats() const;
|
||||||
const AudioStreamStats& getMixerAvatarStreamStats() const { return _mixerAvatarStreamStats; }
|
const AudioStreamStats& getMixerAvatarStreamStats() const { return _mixerAvatarStreamStats; }
|
||||||
const QHash<QUuid, AudioStreamStats>& getMixerInjectedStreamStatsMap() const { return _mixerInjectedStreamStatsMap; }
|
const QHash<QUuid, AudioStreamStats>& getMixerInjectedStreamStatsMap() const { return _mixerInjectedStreamStatsMap; }
|
||||||
|
|
||||||
const MovingMinMaxAvg<float>& getAudioInputMsecsReadStats() const { return _audioInputMsecsReadStats; }
|
|
||||||
const MovingMinMaxAvg<float>& getInputRungBufferMsecsAvailableStats() const { return _inputRingBufferMsecsAvailableStats; }
|
|
||||||
const MovingMinMaxAvg<float>& getAudioOutputMsecsUnplayedStats() const { return _audioOutputMsecsUnplayedStats; }
|
|
||||||
|
|
||||||
const MovingMinMaxAvg<quint64>& getPacketSentTimeGaps() const { return _packetSentTimeGaps; }
|
|
||||||
|
|
||||||
void sendDownstreamAudioStatsPacket();
|
void sendDownstreamAudioStatsPacket();
|
||||||
|
|
||||||
public slots:
|
public slots:
|
||||||
|
@ -49,17 +50,16 @@ public slots:
|
||||||
|
|
||||||
private:
|
private:
|
||||||
MixedProcessedAudioStream* _receivedAudioStream;
|
MixedProcessedAudioStream* _receivedAudioStream;
|
||||||
|
|
||||||
MovingMinMaxAvg<float> _audioInputMsecsReadStats;
|
mutable MovingMinMaxAvg<float> _inputMsRead;
|
||||||
MovingMinMaxAvg<float> _inputRingBufferMsecsAvailableStats;
|
mutable MovingMinMaxAvg<float> _inputMsUnplayed;
|
||||||
|
mutable MovingMinMaxAvg<float> _outputMsUnplayed;
|
||||||
MovingMinMaxAvg<float> _audioOutputMsecsUnplayedStats;
|
|
||||||
|
quint64 _lastSentPacketTime;
|
||||||
|
mutable MovingMinMaxAvg<quint64> _packetTimegaps;
|
||||||
|
|
||||||
AudioStreamStats _mixerAvatarStreamStats;
|
AudioStreamStats _mixerAvatarStreamStats;
|
||||||
QHash<QUuid, AudioStreamStats> _mixerInjectedStreamStatsMap;
|
QHash<QUuid, AudioStreamStats> _mixerInjectedStreamStatsMap;
|
||||||
|
|
||||||
quint64 _lastSentAudioPacket;
|
|
||||||
MovingMinMaxAvg<quint64> _packetSentTimeGaps;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif // hifi_AudioIOStats_h
|
#endif // hifi_AudioIOStats_h
|
||||||
|
|
|
@ -23,15 +23,16 @@ namespace AudioConstants {
|
||||||
|
|
||||||
|
|
||||||
typedef int16_t AudioSample;
|
typedef int16_t AudioSample;
|
||||||
|
const int SAMPLE_SIZE = sizeof(AudioSample);
|
||||||
|
|
||||||
inline const char* getAudioFrameName() { return "com.highfidelity.recording.Audio"; }
|
inline const char* getAudioFrameName() { return "com.highfidelity.recording.Audio"; }
|
||||||
|
|
||||||
const int MAX_CODEC_NAME_LENGTH = 30;
|
const int MAX_CODEC_NAME_LENGTH = 30;
|
||||||
const int MAX_CODEC_NAME_LENGTH_ON_WIRE = MAX_CODEC_NAME_LENGTH + sizeof(uint32_t);
|
const int MAX_CODEC_NAME_LENGTH_ON_WIRE = MAX_CODEC_NAME_LENGTH + sizeof(uint32_t);
|
||||||
const int NETWORK_FRAME_BYTES_STEREO = 960;
|
const int NETWORK_FRAME_BYTES_STEREO = 960;
|
||||||
const int NETWORK_FRAME_SAMPLES_STEREO = NETWORK_FRAME_BYTES_STEREO / sizeof(AudioSample);
|
const int NETWORK_FRAME_SAMPLES_STEREO = NETWORK_FRAME_BYTES_STEREO / SAMPLE_SIZE;
|
||||||
const int NETWORK_FRAME_BYTES_PER_CHANNEL = NETWORK_FRAME_BYTES_STEREO / 2;
|
const int NETWORK_FRAME_BYTES_PER_CHANNEL = NETWORK_FRAME_BYTES_STEREO / 2;
|
||||||
const int NETWORK_FRAME_SAMPLES_PER_CHANNEL = NETWORK_FRAME_BYTES_PER_CHANNEL / sizeof(AudioSample);
|
const int NETWORK_FRAME_SAMPLES_PER_CHANNEL = NETWORK_FRAME_BYTES_PER_CHANNEL / SAMPLE_SIZE;
|
||||||
const float NETWORK_FRAME_SECS = (AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL / float(AudioConstants::SAMPLE_RATE));
|
const float NETWORK_FRAME_SECS = (AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL / float(AudioConstants::SAMPLE_RATE));
|
||||||
const float NETWORK_FRAME_MSECS = NETWORK_FRAME_SECS * 1000.0f;
|
const float NETWORK_FRAME_MSECS = NETWORK_FRAME_SECS * 1000.0f;
|
||||||
const float NETWORK_FRAMES_PER_SEC = 1.0f / NETWORK_FRAME_SECS;
|
const float NETWORK_FRAMES_PER_SEC = 1.0f / NETWORK_FRAME_SECS;
|
||||||
|
|
|
@ -14,7 +14,7 @@
|
||||||
Q_LOGGING_CATEGORY(audio, "hifi.audio")
|
Q_LOGGING_CATEGORY(audio, "hifi.audio")
|
||||||
|
|
||||||
#if DEV_BUILD || PR_BUILD
|
#if DEV_BUILD || PR_BUILD
|
||||||
Q_LOGGING_CATEGORY(audiostream, "hifi.audio-stream", QtDebugMsg)
|
|
||||||
#else
|
|
||||||
Q_LOGGING_CATEGORY(audiostream, "hifi.audio-stream", QtInfoMsg)
|
Q_LOGGING_CATEGORY(audiostream, "hifi.audio-stream", QtInfoMsg)
|
||||||
|
#else
|
||||||
|
Q_LOGGING_CATEGORY(audiostream, "hifi.audio-stream", QtWarningMsg)
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -48,6 +48,7 @@ public:
|
||||||
|
|
||||||
quint32 _framesAvailable;
|
quint32 _framesAvailable;
|
||||||
quint16 _framesAvailableAverage;
|
quint16 _framesAvailableAverage;
|
||||||
|
quint16 _unplayedMs;
|
||||||
quint16 _desiredJitterBufferFrames;
|
quint16 _desiredJitterBufferFrames;
|
||||||
quint32 _starveCount;
|
quint32 _starveCount;
|
||||||
quint32 _consecutiveNotMixedCount;
|
quint32 _consecutiveNotMixedCount;
|
||||||
|
|
|
@ -18,7 +18,10 @@
|
||||||
#include "InboundAudioStream.h"
|
#include "InboundAudioStream.h"
|
||||||
#include "AudioLogging.h"
|
#include "AudioLogging.h"
|
||||||
|
|
||||||
const int STARVE_HISTORY_CAPACITY = 50;
|
static const int STARVE_HISTORY_CAPACITY = 50;
|
||||||
|
|
||||||
|
// This is called 1x/s, and we want it to log the last 5s
|
||||||
|
static const int UNPLAYED_MS_WINDOW_SECS = 5;
|
||||||
|
|
||||||
InboundAudioStream::InboundAudioStream(int numFrameSamples, int numFramesCapacity, const Settings& settings) :
|
InboundAudioStream::InboundAudioStream(int numFrameSamples, int numFramesCapacity, const Settings& settings) :
|
||||||
_ringBuffer(numFrameSamples, numFramesCapacity),
|
_ringBuffer(numFrameSamples, numFramesCapacity),
|
||||||
|
@ -46,6 +49,7 @@ InboundAudioStream::InboundAudioStream(int numFrameSamples, int numFramesCapacit
|
||||||
_starveHistory(STARVE_HISTORY_CAPACITY),
|
_starveHistory(STARVE_HISTORY_CAPACITY),
|
||||||
_starveThreshold(settings._windowStarveThreshold),
|
_starveThreshold(settings._windowStarveThreshold),
|
||||||
_framesAvailableStat(),
|
_framesAvailableStat(),
|
||||||
|
_unplayedMs(0, UNPLAYED_MS_WINDOW_SECS),
|
||||||
_currentJitterBufferFrames(0),
|
_currentJitterBufferFrames(0),
|
||||||
_timeGapStatsForStatsPacket(0, STATS_FOR_STATS_PACKET_WINDOW_SECONDS),
|
_timeGapStatsForStatsPacket(0, STATS_FOR_STATS_PACKET_WINDOW_SECONDS),
|
||||||
_repetitionWithFade(settings._repetitionWithFade),
|
_repetitionWithFade(settings._repetitionWithFade),
|
||||||
|
@ -82,6 +86,7 @@ void InboundAudioStream::resetStats() {
|
||||||
_framesAvailableStat.reset();
|
_framesAvailableStat.reset();
|
||||||
_currentJitterBufferFrames = 0;
|
_currentJitterBufferFrames = 0;
|
||||||
_timeGapStatsForStatsPacket.reset();
|
_timeGapStatsForStatsPacket.reset();
|
||||||
|
_unplayedMs.reset();
|
||||||
}
|
}
|
||||||
|
|
||||||
void InboundAudioStream::clearBuffer() {
|
void InboundAudioStream::clearBuffer() {
|
||||||
|
@ -101,6 +106,7 @@ void InboundAudioStream::perSecondCallbackForUpdatingStats() {
|
||||||
_timeGapStatsForDesiredCalcOnTooManyStarves.currentIntervalComplete();
|
_timeGapStatsForDesiredCalcOnTooManyStarves.currentIntervalComplete();
|
||||||
_timeGapStatsForDesiredReduction.currentIntervalComplete();
|
_timeGapStatsForDesiredReduction.currentIntervalComplete();
|
||||||
_timeGapStatsForStatsPacket.currentIntervalComplete();
|
_timeGapStatsForStatsPacket.currentIntervalComplete();
|
||||||
|
_unplayedMs.currentIntervalComplete();
|
||||||
}
|
}
|
||||||
|
|
||||||
int InboundAudioStream::parseData(ReceivedMessage& message) {
|
int InboundAudioStream::parseData(ReceivedMessage& message) {
|
||||||
|
@ -163,6 +169,7 @@ int InboundAudioStream::parseData(ReceivedMessage& message) {
|
||||||
int framesAvailable = _ringBuffer.framesAvailable();
|
int framesAvailable = _ringBuffer.framesAvailable();
|
||||||
// if this stream was starved, check if we're still starved.
|
// if this stream was starved, check if we're still starved.
|
||||||
if (_isStarved && framesAvailable >= _desiredJitterBufferFrames) {
|
if (_isStarved && framesAvailable >= _desiredJitterBufferFrames) {
|
||||||
|
qCInfo(audiostream, "Starve ended");
|
||||||
_isStarved = false;
|
_isStarved = false;
|
||||||
}
|
}
|
||||||
// if the ringbuffer exceeds the desired size by more than the threshold specified,
|
// if the ringbuffer exceeds the desired size by more than the threshold specified,
|
||||||
|
@ -176,8 +183,8 @@ int InboundAudioStream::parseData(ReceivedMessage& message) {
|
||||||
|
|
||||||
_oldFramesDropped += framesToDrop;
|
_oldFramesDropped += framesToDrop;
|
||||||
|
|
||||||
qCDebug(audiostream, "Dropped %d frames", framesToDrop);
|
qCInfo(audiostream, "Dropped %d frames", framesToDrop);
|
||||||
qCDebug(audiostream, "Resetted current jitter frames");
|
qCInfo(audiostream, "Reset current jitter frames");
|
||||||
}
|
}
|
||||||
|
|
||||||
framesAvailableChanged();
|
framesAvailableChanged();
|
||||||
|
@ -232,8 +239,8 @@ int InboundAudioStream::writeDroppableSilentSamples(int silentSamples) {
|
||||||
_currentJitterBufferFrames -= numSilentFramesToDrop;
|
_currentJitterBufferFrames -= numSilentFramesToDrop;
|
||||||
_silentFramesDropped += numSilentFramesToDrop;
|
_silentFramesDropped += numSilentFramesToDrop;
|
||||||
|
|
||||||
qCDebug(audiostream, "Dropped %d silent frames", numSilentFramesToDrop);
|
qCInfo(audiostream, "Dropped %d silent frames", numSilentFramesToDrop);
|
||||||
qCDebug(audiostream, "Set current jitter frames to %d", _currentJitterBufferFrames);
|
qCInfo(audiostream, "Set current jitter frames to %d (dropped)", _currentJitterBufferFrames);
|
||||||
|
|
||||||
_framesAvailableStat.reset();
|
_framesAvailableStat.reset();
|
||||||
}
|
}
|
||||||
|
@ -302,6 +309,9 @@ int InboundAudioStream::popFrames(int maxFrames, bool allOrNothing, bool starveI
|
||||||
}
|
}
|
||||||
|
|
||||||
void InboundAudioStream::popSamplesNoCheck(int samples) {
|
void InboundAudioStream::popSamplesNoCheck(int samples) {
|
||||||
|
float unplayedMs = (_ringBuffer.samplesAvailable() / (float)_ringBuffer.getNumFrameSamples()) * AudioConstants::NETWORK_FRAME_MSECS;
|
||||||
|
_unplayedMs.update(unplayedMs);
|
||||||
|
|
||||||
_lastPopOutput = _ringBuffer.nextOutput();
|
_lastPopOutput = _ringBuffer.nextOutput();
|
||||||
_ringBuffer.shiftReadPosition(samples);
|
_ringBuffer.shiftReadPosition(samples);
|
||||||
framesAvailableChanged();
|
framesAvailableChanged();
|
||||||
|
@ -315,13 +325,17 @@ void InboundAudioStream::framesAvailableChanged() {
|
||||||
|
|
||||||
if (_framesAvailableStat.getElapsedUsecs() >= FRAMES_AVAILABLE_STAT_WINDOW_USECS) {
|
if (_framesAvailableStat.getElapsedUsecs() >= FRAMES_AVAILABLE_STAT_WINDOW_USECS) {
|
||||||
_currentJitterBufferFrames = (int)ceil(_framesAvailableStat.getAverage());
|
_currentJitterBufferFrames = (int)ceil(_framesAvailableStat.getAverage());
|
||||||
qCDebug(audiostream, "Set current jitter frames to %d", _currentJitterBufferFrames);
|
qCInfo(audiostream, "Set current jitter frames to %d (changed)", _currentJitterBufferFrames);
|
||||||
|
|
||||||
_framesAvailableStat.reset();
|
_framesAvailableStat.reset();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void InboundAudioStream::setToStarved() {
|
void InboundAudioStream::setToStarved() {
|
||||||
|
if (!_isStarved) {
|
||||||
|
qCInfo(audiostream, "Starved");
|
||||||
|
}
|
||||||
|
|
||||||
_consecutiveNotMixedCount = 0;
|
_consecutiveNotMixedCount = 0;
|
||||||
_starveCount++;
|
_starveCount++;
|
||||||
// if we have more than the desired frames when setToStarved() is called, then we'll immediately
|
// if we have more than the desired frames when setToStarved() is called, then we'll immediately
|
||||||
|
@ -364,7 +378,7 @@ void InboundAudioStream::setToStarved() {
|
||||||
// make sure _desiredJitterBufferFrames does not become lower here
|
// make sure _desiredJitterBufferFrames does not become lower here
|
||||||
if (calculatedJitterBufferFrames >= _desiredJitterBufferFrames) {
|
if (calculatedJitterBufferFrames >= _desiredJitterBufferFrames) {
|
||||||
_desiredJitterBufferFrames = calculatedJitterBufferFrames;
|
_desiredJitterBufferFrames = calculatedJitterBufferFrames;
|
||||||
qCDebug(audiostream, "Set desired jitter frames to %d", _desiredJitterBufferFrames);
|
qCInfo(audiostream, "Set desired jitter frames to %d (starved)", _desiredJitterBufferFrames);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -420,7 +434,7 @@ void InboundAudioStream::packetReceivedUpdateTimingStats() {
|
||||||
|
|
||||||
// update our timegap stats and desired jitter buffer frames if necessary
|
// update our timegap stats and desired jitter buffer frames if necessary
|
||||||
// discard the first few packets we receive since they usually have gaps that aren't represensative of normal jitter
|
// discard the first few packets we receive since they usually have gaps that aren't represensative of normal jitter
|
||||||
const quint32 NUM_INITIAL_PACKETS_DISCARD = 3;
|
const quint32 NUM_INITIAL_PACKETS_DISCARD = 1000; // 10s
|
||||||
quint64 now = usecTimestampNow();
|
quint64 now = usecTimestampNow();
|
||||||
if (_incomingSequenceNumberStats.getReceived() > NUM_INITIAL_PACKETS_DISCARD) {
|
if (_incomingSequenceNumberStats.getReceived() > NUM_INITIAL_PACKETS_DISCARD) {
|
||||||
quint64 gap = now - _lastPacketReceivedTime;
|
quint64 gap = now - _lastPacketReceivedTime;
|
||||||
|
@ -454,7 +468,7 @@ void InboundAudioStream::packetReceivedUpdateTimingStats() {
|
||||||
/ (float)AudioConstants::NETWORK_FRAME_USECS);
|
/ (float)AudioConstants::NETWORK_FRAME_USECS);
|
||||||
if (calculatedJitterBufferFrames < _desiredJitterBufferFrames) {
|
if (calculatedJitterBufferFrames < _desiredJitterBufferFrames) {
|
||||||
_desiredJitterBufferFrames = calculatedJitterBufferFrames;
|
_desiredJitterBufferFrames = calculatedJitterBufferFrames;
|
||||||
qCDebug(audiostream, "Set desired jitter frames to %d", _desiredJitterBufferFrames);
|
qCInfo(audiostream, "Set desired jitter frames to %d (reduced)", _desiredJitterBufferFrames);
|
||||||
}
|
}
|
||||||
_timeGapStatsForDesiredReduction.clearNewStatsAvailableFlag();
|
_timeGapStatsForDesiredReduction.clearNewStatsAvailableFlag();
|
||||||
}
|
}
|
||||||
|
@ -502,6 +516,7 @@ AudioStreamStats InboundAudioStream::getAudioStreamStats() const {
|
||||||
|
|
||||||
streamStats._framesAvailable = _ringBuffer.framesAvailable();
|
streamStats._framesAvailable = _ringBuffer.framesAvailable();
|
||||||
streamStats._framesAvailableAverage = _framesAvailableStat.getAverage();
|
streamStats._framesAvailableAverage = _framesAvailableStat.getAverage();
|
||||||
|
streamStats._unplayedMs = (quint16)_unplayedMs.getWindowMax();
|
||||||
streamStats._desiredJitterBufferFrames = _desiredJitterBufferFrames;
|
streamStats._desiredJitterBufferFrames = _desiredJitterBufferFrames;
|
||||||
streamStats._starveCount = _starveCount;
|
streamStats._starveCount = _starveCount;
|
||||||
streamStats._consecutiveNotMixedCount = _consecutiveNotMixedCount;
|
streamStats._consecutiveNotMixedCount = _consecutiveNotMixedCount;
|
||||||
|
|
|
@ -161,6 +161,7 @@ public:
|
||||||
int getFrameCapacity() const { return _ringBuffer.getFrameCapacity(); }
|
int getFrameCapacity() const { return _ringBuffer.getFrameCapacity(); }
|
||||||
int getFramesAvailable() const { return _ringBuffer.framesAvailable(); }
|
int getFramesAvailable() const { return _ringBuffer.framesAvailable(); }
|
||||||
double getFramesAvailableAverage() const { return _framesAvailableStat.getAverage(); }
|
double getFramesAvailableAverage() const { return _framesAvailableStat.getAverage(); }
|
||||||
|
int getSamplesAvailable() const { return _ringBuffer.samplesAvailable(); }
|
||||||
|
|
||||||
bool isStarved() const { return _isStarved; }
|
bool isStarved() const { return _isStarved; }
|
||||||
bool hasStarted() const { return _hasStarted; }
|
bool hasStarted() const { return _hasStarted; }
|
||||||
|
@ -264,6 +265,7 @@ protected:
|
||||||
int _starveThreshold;
|
int _starveThreshold;
|
||||||
|
|
||||||
TimeWeightedAvg<int> _framesAvailableStat;
|
TimeWeightedAvg<int> _framesAvailableStat;
|
||||||
|
MovingMinMaxAvg<float> _unplayedMs;
|
||||||
|
|
||||||
// this value is periodically updated with the time-weighted avg from _framesAvailableStat. it is only used for
|
// this value is periodically updated with the time-weighted avg from _framesAvailableStat. it is only used for
|
||||||
// dropping silent frames right now.
|
// dropping silent frames right now.
|
||||||
|
|
|
@ -10,6 +10,7 @@
|
||||||
//
|
//
|
||||||
|
|
||||||
#include "MixedProcessedAudioStream.h"
|
#include "MixedProcessedAudioStream.h"
|
||||||
|
#include "AudioLogging.h"
|
||||||
|
|
||||||
static const int STEREO_FACTOR = 2;
|
static const int STEREO_FACTOR = 2;
|
||||||
|
|
||||||
|
@ -56,6 +57,7 @@ int MixedProcessedAudioStream::parseAudioData(PacketType type, const QByteArray&
|
||||||
emit processSamples(decodedBuffer, outputBuffer);
|
emit processSamples(decodedBuffer, outputBuffer);
|
||||||
|
|
||||||
_ringBuffer.writeData(outputBuffer.data(), outputBuffer.size());
|
_ringBuffer.writeData(outputBuffer.data(), outputBuffer.size());
|
||||||
|
qCDebug(audiostream, "Wrote %d samples to buffer (%d available)", outputBuffer.size() / (int)sizeof(int16_t), getSamplesAvailable());
|
||||||
|
|
||||||
return packetAfterStreamProperties.size();
|
return packetAfterStreamProperties.size();
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue