mirror of
https://github.com/JulianGro/overte.git
synced 2025-04-13 18:42:33 +02:00
added PacketTypeAudioStreamStats for j-buffer stats
...instead of packing that info into audio packets.
This commit is contained in:
parent
1257e0595a
commit
591bfa14b9
11 changed files with 93 additions and 46 deletions
|
@ -75,7 +75,8 @@ AudioMixer::AudioMixer(const QByteArray& packet) :
|
|||
_sumListeners(0),
|
||||
_sumMixes(0),
|
||||
_sourceUnattenuatedZone(NULL),
|
||||
_listenerUnattenuatedZone(NULL)
|
||||
_listenerUnattenuatedZone(NULL),
|
||||
_lastSendAudioStreamStatsTime(usecTimestampNow())
|
||||
{
|
||||
|
||||
}
|
||||
|
@ -448,7 +449,7 @@ void AudioMixer::sendStatsPacket() {
|
|||
AudioMixerClientData* clientData = static_cast<AudioMixerClientData*>(node->getLinkedData());
|
||||
if (clientData) {
|
||||
QString property = "jitterStats." + node->getUUID().toString();
|
||||
QString value = clientData->getJitterBufferStats();
|
||||
QString value = clientData->getJitterBufferStatsString();
|
||||
statsObject2[qPrintable(property)] = value;
|
||||
somethingToSend = true;
|
||||
sizeOfStats += property.size() + value.size();
|
||||
|
@ -587,6 +588,17 @@ void AudioMixer::run() {
|
|||
++framesSinceCutoffEvent;
|
||||
}
|
||||
|
||||
|
||||
const quint64 TOO_LONG_SINCE_LAST_SEND_AUDIO_STREAM_STATS = 1 * USECS_PER_SECOND;
|
||||
|
||||
char audioStreamStatsPacket[MAX_PACKET_SIZE];
|
||||
bool sendAudioStreamStats = false;
|
||||
quint64 now = usecTimestampNow();
|
||||
if (now - _lastSendAudioStreamStatsTime > TOO_LONG_SINCE_LAST_SEND_AUDIO_STREAM_STATS) {
|
||||
_lastSendAudioStreamStatsTime = now;
|
||||
sendAudioStreamStats = true;
|
||||
}
|
||||
|
||||
foreach (const SharedNodePointer& node, nodeList->getNodeHash()) {
|
||||
if (node->getType() == NodeType::Agent && node->getActiveSocket() && node->getLinkedData()
|
||||
&& ((AudioMixerClientData*) node->getLinkedData())->getAvatarAudioRingBuffer()) {
|
||||
|
@ -596,20 +608,22 @@ void AudioMixer::run() {
|
|||
int numBytesPacketHeader = populatePacketHeader(clientMixBuffer, PacketTypeMixedAudio);
|
||||
char* dataAt = clientMixBuffer + numBytesPacketHeader;
|
||||
|
||||
// calculate and pack the jitter buffer size stats for this node
|
||||
AudioMixerJitterBuffersStats jitterBuffersStats;
|
||||
((AudioMixerClientData*)node->getLinkedData())->calculateJitterBuffersStats(jitterBuffersStats);
|
||||
memcpy(dataAt, &jitterBuffersStats, sizeof(AudioMixerJitterBuffersStats));
|
||||
dataAt += sizeof(AudioMixerJitterBuffersStats);
|
||||
|
||||
// pack mixed audio
|
||||
memcpy(dataAt, _clientSamples, NETWORK_BUFFER_LENGTH_BYTES_STEREO);
|
||||
nodeList->writeDatagram(clientMixBuffer, dataAt - clientMixBuffer, node);
|
||||
// pack mixed audio, send mixed audio packet
|
||||
memcpy(clientMixBuffer + numBytesPacketHeader, _clientSamples, NETWORK_BUFFER_LENGTH_BYTES_STEREO);
|
||||
nodeList->writeDatagram(clientMixBuffer, NETWORK_BUFFER_LENGTH_BYTES_STEREO + numBytesPacketHeader, node);
|
||||
|
||||
|
||||
// send an audio stream stats packet if it's time
|
||||
if (sendAudioStreamStats) {
|
||||
int numBytesWritten = ((AudioMixerClientData*)node->getLinkedData())
|
||||
->encodeAudioStreamStatsPacket(audioStreamStatsPacket);
|
||||
nodeList->writeDatagram(audioStreamStatsPacket, numBytesWritten, node);
|
||||
}
|
||||
|
||||
++_sumListeners;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// push forward the next output pointers for any audio buffers we used
|
||||
foreach (const SharedNodePointer& node, nodeList->getNodeHash()) {
|
||||
if (node->getLinkedData()) {
|
||||
|
|
|
@ -58,6 +58,8 @@ private:
|
|||
AABox* _sourceUnattenuatedZone;
|
||||
AABox* _listenerUnattenuatedZone;
|
||||
static bool _useDynamicJitterBuffers;
|
||||
|
||||
quint64 _lastSendAudioStreamStatsTime;
|
||||
};
|
||||
|
||||
#endif // hifi_AudioMixer_h
|
||||
|
|
|
@ -142,7 +142,7 @@ void AudioMixerClientData::pushBuffersAfterFrameSend() {
|
|||
}
|
||||
}
|
||||
|
||||
void AudioMixerClientData::calculateJitterBuffersStats(AudioMixerJitterBuffersStats& stats) const {
|
||||
void AudioMixerClientData::getJitterBuffersStats(AudioMixerJitterBuffersStats& stats) const {
|
||||
int avatarJitterBufferFrames = 0;
|
||||
int maxJitterBufferFrames = 0;
|
||||
int sumJitterBufferFrames = 0;
|
||||
|
@ -166,7 +166,20 @@ void AudioMixerClientData::calculateJitterBuffersStats(AudioMixerJitterBuffersSt
|
|||
stats._avgJitterBufferFrames = (float)sumJitterBufferFrames / (float)_ringBuffers.size();
|
||||
}
|
||||
|
||||
QString AudioMixerClientData::getJitterBufferStats() const {
|
||||
int AudioMixerClientData::encodeAudioStreamStatsPacket(char* packet) const {
|
||||
int numBytesPacketHeader = populatePacketHeader(packet, PacketTypeAudioStreamStats);
|
||||
char* dataAt = packet + numBytesPacketHeader;
|
||||
|
||||
// pack jitter buffer stats
|
||||
AudioMixerJitterBuffersStats jitterBufferStats;
|
||||
getJitterBuffersStats(jitterBufferStats);
|
||||
memcpy(dataAt, &jitterBufferStats, sizeof(AudioMixerJitterBuffersStats));
|
||||
dataAt += sizeof(AudioMixerJitterBuffersStats);
|
||||
|
||||
return dataAt - packet;
|
||||
}
|
||||
|
||||
QString AudioMixerClientData::getJitterBufferStatsString() const {
|
||||
QString result;
|
||||
AvatarAudioRingBuffer* avatarRingBuffer = getAvatarAudioRingBuffer();
|
||||
if (avatarRingBuffer) {
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
#include <PositionalAudioRingBuffer.h>
|
||||
|
||||
#include "AvatarAudioRingBuffer.h"
|
||||
#include "AudioStreamStats.h"
|
||||
|
||||
class AudioMixerClientData : public NodeData {
|
||||
public:
|
||||
|
@ -30,8 +31,11 @@ public:
|
|||
void checkBuffersBeforeFrameSend(AABox* checkSourceZone = NULL, AABox* listenerZone = NULL);
|
||||
void pushBuffersAfterFrameSend();
|
||||
|
||||
void calculateJitterBuffersStats(AudioMixerJitterBuffersStats& stats) const;
|
||||
QString getJitterBufferStats() const;
|
||||
void getJitterBuffersStats(AudioMixerJitterBuffersStats& stats) const;
|
||||
|
||||
int encodeAudioStreamStatsPacket(char* packet) const;
|
||||
|
||||
QString getJitterBufferStatsString() const;
|
||||
|
||||
private:
|
||||
QList<PositionalAudioRingBuffer*> _ringBuffers;
|
||||
|
|
|
@ -708,6 +708,16 @@ void Audio::addReceivedAudioToBuffer(const QByteArray& audioByteArray) {
|
|||
Application::getInstance()->getBandwidthMeter()->inputStream(BandwidthMeter::AUDIO).updateValue(audioByteArray.size());
|
||||
}
|
||||
|
||||
void Audio::parseAudioStreamStatsPacket(const QByteArray& packet) {
|
||||
|
||||
int numBytesPacketHeader = numBytesForPacketHeader(packet);
|
||||
const char* dataAt = packet.constData() + numBytesPacketHeader;
|
||||
|
||||
// parse audio mixer jitter buffer stats
|
||||
memcpy(&_audioMixerJitterBufferStats, dataAt, sizeof(AudioMixerJitterBuffersStats));
|
||||
dataAt += sizeof(AudioMixerJitterBuffersStats);
|
||||
}
|
||||
|
||||
// NOTE: numSamples is the total number of single channel samples, since callers will always call this with stereo
|
||||
// data we know that we will have 2x samples for each stereo time sample at the format's sample rate
|
||||
void Audio::addSpatialAudioToBuffer(unsigned int sampleTime, const QByteArray& spatialAudio, unsigned int numSamples) {
|
||||
|
@ -806,16 +816,7 @@ void Audio::toggleStereoInput() {
|
|||
}
|
||||
}
|
||||
|
||||
void Audio::parseAudioMixerJitterBuffersStats(const QByteArray& audioByteArray) {
|
||||
|
||||
int numBytesPacketHeader = numBytesForPacketHeader(audioByteArray);
|
||||
const char* dataAt = reinterpret_cast<const char*>(audioByteArray.data() + numBytesPacketHeader);
|
||||
|
||||
memcpy(&_audioMixerJitterBufferStats, dataAt, sizeof(AudioMixerJitterBuffersStats));
|
||||
}
|
||||
|
||||
void Audio::processReceivedAudio(const QByteArray& audioByteArray) {
|
||||
parseAudioMixerJitterBuffersStats(audioByteArray);
|
||||
_ringBuffer.parseData(audioByteArray);
|
||||
|
||||
float networkOutputToOutputRatio = (_desiredOutputFormat.sampleRate() / (float) _outputFormat.sampleRate())
|
||||
|
|
|
@ -16,7 +16,7 @@
|
|||
#include <vector>
|
||||
|
||||
#include "InterfaceConfig.h"
|
||||
#include "PositionalAudioRingBuffer.h"
|
||||
#include "AudioStreamStats.h"
|
||||
|
||||
#include <QAudio>
|
||||
#include <QAudioInput>
|
||||
|
@ -77,6 +77,7 @@ public slots:
|
|||
void start();
|
||||
void stop();
|
||||
void addReceivedAudioToBuffer(const QByteArray& audioByteArray);
|
||||
void parseAudioStreamStatsPacket(const QByteArray& packet);
|
||||
void addSpatialAudioToBuffer(unsigned int sampleTime, const QByteArray& spatialAudio, unsigned int numSamples);
|
||||
void handleAudioInput();
|
||||
void reset();
|
||||
|
@ -219,8 +220,6 @@ private:
|
|||
void renderGrid(const float* color, int x, int y, int width, int height, int rows, int cols);
|
||||
void renderLineStrip(const float* color, int x, int y, int n, int offset, const QByteArray* byteArray);
|
||||
|
||||
void parseAudioMixerJitterBuffersStats(const QByteArray& audioByteArray);
|
||||
|
||||
// Audio scope data
|
||||
static const unsigned int NETWORK_SAMPLES_PER_FRAME = NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL;
|
||||
static const unsigned int DEFAULT_FRAMES_PER_SCOPE = 5;
|
||||
|
|
|
@ -51,7 +51,10 @@ void DatagramProcessor::processDatagrams() {
|
|||
QMetaObject::invokeMethod(&application->_audio, "addReceivedAudioToBuffer", Qt::QueuedConnection,
|
||||
Q_ARG(QByteArray, incomingPacket));
|
||||
break;
|
||||
|
||||
case PacketTypeAudioStreamStats:
|
||||
QMetaObject::invokeMethod(&application->_audio, "parseAudioStreamStatsPacket", Qt::QueuedConnection,
|
||||
Q_ARG(QByteArray, incomingPacket));
|
||||
break;
|
||||
case PacketTypeParticleAddResponse:
|
||||
// this will keep creatorTokenIDs to IDs mapped correctly
|
||||
Particle::handleAddParticleResponse(incomingPacket);
|
||||
|
|
|
@ -16,9 +16,8 @@
|
|||
#include <QtCore/QDebug>
|
||||
|
||||
#include "PacketHeaders.h"
|
||||
|
||||
#include "AudioRingBuffer.h"
|
||||
#include "PositionalAudioRingBuffer.h"
|
||||
|
||||
|
||||
AudioRingBuffer::AudioRingBuffer(int numFrameSamples, bool randomAccessMode) :
|
||||
NodeData(),
|
||||
|
@ -69,9 +68,6 @@ void AudioRingBuffer::resizeForFrameSize(qint64 numFrameSamples) {
|
|||
|
||||
int AudioRingBuffer::parseData(const QByteArray& packet) {
|
||||
int numBytesBeforeAudioData = numBytesForPacketHeader(packet);
|
||||
if (packetTypeForPacket(packet) == PacketTypeMixedAudio) {
|
||||
numBytesBeforeAudioData += sizeof(AudioMixerJitterBuffersStats);
|
||||
}
|
||||
return writeData(packet.data() + numBytesBeforeAudioData, packet.size() - numBytesBeforeAudioData);
|
||||
}
|
||||
|
||||
|
|
26
libraries/audio/src/AudioStreamStats.h
Normal file
26
libraries/audio/src/AudioStreamStats.h
Normal file
|
@ -0,0 +1,26 @@
|
|||
//
|
||||
// AudioStreamStats.h
|
||||
// libraries/audio/src
|
||||
//
|
||||
// Created by Yixin Wang on 6/25/2014
|
||||
// Copyright 2014 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
#ifndef hifi_AudioStreamStats_h
|
||||
#define hifi_AudioStreamStats_h
|
||||
|
||||
class AudioMixerJitterBuffersStats {
|
||||
public:
|
||||
AudioMixerJitterBuffersStats()
|
||||
: _avatarJitterBufferFrames(0), _maxJitterBufferFrames(0), _avgJitterBufferFrames(0)
|
||||
{}
|
||||
|
||||
quint16 _avatarJitterBufferFrames;
|
||||
quint16 _maxJitterBufferFrames;
|
||||
float _avgJitterBufferFrames;
|
||||
};
|
||||
|
||||
#endif // hifi_AudioStreamStats_h
|
|
@ -43,17 +43,6 @@ private:
|
|||
bool _newWindowMaxGapAvailable;
|
||||
};
|
||||
|
||||
class AudioMixerJitterBuffersStats {
|
||||
public:
|
||||
AudioMixerJitterBuffersStats()
|
||||
: _avatarJitterBufferFrames(0), _maxJitterBufferFrames(0), _avgJitterBufferFrames(0)
|
||||
{}
|
||||
|
||||
int _avatarJitterBufferFrames;
|
||||
int _maxJitterBufferFrames;
|
||||
float _avgJitterBufferFrames;
|
||||
};
|
||||
|
||||
class PositionalAudioRingBuffer : public AudioRingBuffer {
|
||||
public:
|
||||
enum Type {
|
||||
|
|
|
@ -40,7 +40,7 @@ enum PacketType {
|
|||
PacketTypeCreateAssignment,
|
||||
PacketTypeDomainOAuthRequest,
|
||||
PacketTypeMuteEnvironment,
|
||||
PacketTypeDataServerSend, // reusable
|
||||
PacketTypeAudioStreamStats,
|
||||
PacketTypeDataServerConfirm,
|
||||
PacketTypeVoxelQuery,
|
||||
PacketTypeVoxelData,
|
||||
|
|
Loading…
Reference in a new issue