mirror of
https://github.com/overte-org/overte.git
synced 2025-08-09 03:37:49 +02:00
store channels on audio stream
This commit is contained in:
parent
05aded5c9d
commit
5af95c6062
9 changed files with 30 additions and 20 deletions
|
@ -48,8 +48,7 @@ static const int RECEIVED_AUDIO_STREAM_CAPACITY_FRAMES = 10;
|
||||||
Agent::Agent(ReceivedMessage& message) :
|
Agent::Agent(ReceivedMessage& message) :
|
||||||
ThreadedAssignment(message),
|
ThreadedAssignment(message),
|
||||||
_entityEditSender(),
|
_entityEditSender(),
|
||||||
_receivedAudioStream(AudioConstants::NETWORK_FRAME_SAMPLES_STEREO,
|
_receivedAudioStream(RECEIVED_AUDIO_STREAM_CAPACITY_FRAMES, RECEIVED_AUDIO_STREAM_CAPACITY_FRAMES) {
|
||||||
RECEIVED_AUDIO_STREAM_CAPACITY_FRAMES, RECEIVED_AUDIO_STREAM_CAPACITY_FRAMES) {
|
|
||||||
DependencyManager::get<EntityScriptingInterface>()->setPacketSender(&_entityEditSender);
|
DependencyManager::get<EntityScriptingInterface>()->setPacketSender(&_entityEditSender);
|
||||||
|
|
||||||
ResourceManager::init();
|
ResourceManager::init();
|
||||||
|
|
|
@ -115,7 +115,7 @@ AudioClient::AudioClient() :
|
||||||
_loopbackAudioOutput(NULL),
|
_loopbackAudioOutput(NULL),
|
||||||
_loopbackOutputDevice(NULL),
|
_loopbackOutputDevice(NULL),
|
||||||
_inputRingBuffer(0),
|
_inputRingBuffer(0),
|
||||||
_receivedAudioStream(0, RECEIVED_AUDIO_STREAM_CAPACITY_FRAMES),
|
_receivedAudioStream(RECEIVED_AUDIO_STREAM_CAPACITY_FRAMES),
|
||||||
_isStereoInput(false),
|
_isStereoInput(false),
|
||||||
_outputStarveDetectionStartTimeMsec(0),
|
_outputStarveDetectionStartTimeMsec(0),
|
||||||
_outputStarveDetectionCount(0),
|
_outputStarveDetectionCount(0),
|
||||||
|
|
|
@ -46,10 +46,11 @@ static const int STATS_FOR_STATS_PACKET_WINDOW_SECONDS = 30;
|
||||||
// _currentJitterBufferFrames is updated with the time-weighted avg and the running time-weighted avg is reset.
|
// _currentJitterBufferFrames is updated with the time-weighted avg and the running time-weighted avg is reset.
|
||||||
static const quint64 FRAMES_AVAILABLE_STAT_WINDOW_USECS = 10 * USECS_PER_SECOND;
|
static const quint64 FRAMES_AVAILABLE_STAT_WINDOW_USECS = 10 * USECS_PER_SECOND;
|
||||||
|
|
||||||
InboundAudioStream::InboundAudioStream(int numFrameSamples, int numFramesCapacity, int numStaticJitterFrames) :
|
InboundAudioStream::InboundAudioStream(int numChannels, int numFrames, int numBlocks, int numStaticJitterBlocks) :
|
||||||
_ringBuffer(numFrameSamples, numFramesCapacity),
|
_ringBuffer(numChannels * numFrames, numBlocks),
|
||||||
_dynamicJitterBufferEnabled(numStaticJitterFrames == -1),
|
_numChannels(numChannels),
|
||||||
_staticJitterBufferFrames(std::max(numStaticJitterFrames, DEFAULT_STATIC_JITTER_FRAMES)),
|
_dynamicJitterBufferEnabled(numStaticJitterBlocks == -1),
|
||||||
|
_staticJitterBufferFrames(std::max(numStaticJitterBlocks, DEFAULT_STATIC_JITTER_FRAMES)),
|
||||||
_desiredJitterBufferFrames(_dynamicJitterBufferEnabled ? 1 : _staticJitterBufferFrames),
|
_desiredJitterBufferFrames(_dynamicJitterBufferEnabled ? 1 : _staticJitterBufferFrames),
|
||||||
_incomingSequenceNumberStats(STATS_FOR_STATS_PACKET_WINDOW_SECONDS),
|
_incomingSequenceNumberStats(STATS_FOR_STATS_PACKET_WINDOW_SECONDS),
|
||||||
_starveHistory(STARVE_HISTORY_CAPACITY),
|
_starveHistory(STARVE_HISTORY_CAPACITY),
|
||||||
|
@ -224,7 +225,7 @@ int InboundAudioStream::writeDroppableSilentFrames(int silentFrames) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// calculate how many silent frames we should drop.
|
// calculate how many silent frames we should drop.
|
||||||
int silentSamples = silentFrames * 2;
|
int silentSamples = silentFrames * _numChannels;
|
||||||
int samplesPerFrame = _ringBuffer.getNumFrameSamples();
|
int samplesPerFrame = _ringBuffer.getNumFrameSamples();
|
||||||
int desiredJitterBufferFramesPlusPadding = _desiredJitterBufferFrames + DESIRED_JITTER_BUFFER_FRAMES_PADDING;
|
int desiredJitterBufferFramesPlusPadding = _desiredJitterBufferFrames + DESIRED_JITTER_BUFFER_FRAMES_PADDING;
|
||||||
int numSilentFramesToDrop = 0;
|
int numSilentFramesToDrop = 0;
|
||||||
|
@ -422,7 +423,7 @@ int InboundAudioStream::writeFramesForDroppedPackets(int networkFrames) {
|
||||||
int InboundAudioStream::writeLastFrameRepeatedWithFade(int frames) {
|
int InboundAudioStream::writeLastFrameRepeatedWithFade(int frames) {
|
||||||
AudioRingBuffer::ConstIterator frameToRepeat = _ringBuffer.lastFrameWritten();
|
AudioRingBuffer::ConstIterator frameToRepeat = _ringBuffer.lastFrameWritten();
|
||||||
int frameSize = _ringBuffer.getNumFrameSamples();
|
int frameSize = _ringBuffer.getNumFrameSamples();
|
||||||
int samplesToWrite = frames * 2;
|
int samplesToWrite = frames * _numChannels;
|
||||||
int indexOfRepeat = 0;
|
int indexOfRepeat = 0;
|
||||||
do {
|
do {
|
||||||
int samplesToWriteThisIteration = std::min(samplesToWrite, frameSize);
|
int samplesToWriteThisIteration = std::min(samplesToWrite, frameSize);
|
||||||
|
|
|
@ -47,7 +47,7 @@ public:
|
||||||
static const bool REPETITION_WITH_FADE;
|
static const bool REPETITION_WITH_FADE;
|
||||||
|
|
||||||
InboundAudioStream() = delete;
|
InboundAudioStream() = delete;
|
||||||
InboundAudioStream(int numFrameSamples, int numFramesCapacity, int numStaticJitterFrames = -1);
|
InboundAudioStream(int numChannels, int numFrames, int numBlocks, int numStaticJitterBlocks);
|
||||||
~InboundAudioStream();
|
~InboundAudioStream();
|
||||||
|
|
||||||
void reset();
|
void reset();
|
||||||
|
@ -144,6 +144,7 @@ protected:
|
||||||
protected:
|
protected:
|
||||||
|
|
||||||
AudioRingBuffer _ringBuffer;
|
AudioRingBuffer _ringBuffer;
|
||||||
|
int _numChannels;
|
||||||
|
|
||||||
bool _lastPopSucceeded { false };
|
bool _lastPopSucceeded { false };
|
||||||
AudioRingBuffer::ConstIterator _lastPopOutput;
|
AudioRingBuffer::ConstIterator _lastPopOutput;
|
||||||
|
|
|
@ -11,5 +11,10 @@
|
||||||
|
|
||||||
#include "MixedAudioStream.h"
|
#include "MixedAudioStream.h"
|
||||||
|
|
||||||
MixedAudioStream::MixedAudioStream(int numFrameSamples, int numFramesCapacity, int numStaticJitterFrames) :
|
#include "AudioConstants.h"
|
||||||
InboundAudioStream(numFrameSamples, numFramesCapacity, numStaticJitterFrames) {}
|
|
||||||
|
static const int STEREO_FACTOR = 2;
|
||||||
|
|
||||||
|
MixedAudioStream::MixedAudioStream(int numFramesCapacity, int numStaticJitterFrames) :
|
||||||
|
InboundAudioStream(STEREO_FACTOR, AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL,
|
||||||
|
numFramesCapacity, numStaticJitterFrames) {}
|
||||||
|
|
|
@ -16,7 +16,7 @@
|
||||||
|
|
||||||
class MixedAudioStream : public InboundAudioStream {
|
class MixedAudioStream : public InboundAudioStream {
|
||||||
public:
|
public:
|
||||||
MixedAudioStream(int numFrameSamples, int numFramesCapacity, int numStaticJitterFrames = -1);
|
MixedAudioStream(int numFramesCapacity, int numStaticJitterFrames = -1);
|
||||||
|
|
||||||
float getNextOutputFrameLoudness() const { return _ringBuffer.getNextOutputFrameLoudness(); }
|
float getNextOutputFrameLoudness() const { return _ringBuffer.getNextOutputFrameLoudness(); }
|
||||||
};
|
};
|
||||||
|
|
|
@ -14,8 +14,9 @@
|
||||||
|
|
||||||
static const int STEREO_FACTOR = 2;
|
static const int STEREO_FACTOR = 2;
|
||||||
|
|
||||||
MixedProcessedAudioStream::MixedProcessedAudioStream(int numFrameSamples, int numFramesCapacity, int numStaticJitterFrames)
|
MixedProcessedAudioStream::MixedProcessedAudioStream(int numFramesCapacity, int numStaticJitterFrames)
|
||||||
: InboundAudioStream(numFrameSamples, numFramesCapacity, numStaticJitterFrames) {}
|
: InboundAudioStream(STEREO_FACTOR, AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL,
|
||||||
|
numFramesCapacity, numStaticJitterFrames) {}
|
||||||
|
|
||||||
void MixedProcessedAudioStream::outputFormatChanged(int sampleRate, int channelCount) {
|
void MixedProcessedAudioStream::outputFormatChanged(int sampleRate, int channelCount) {
|
||||||
_outputSampleRate = sampleRate;
|
_outputSampleRate = sampleRate;
|
||||||
|
|
|
@ -19,7 +19,7 @@ class AudioClient;
|
||||||
class MixedProcessedAudioStream : public InboundAudioStream {
|
class MixedProcessedAudioStream : public InboundAudioStream {
|
||||||
Q_OBJECT
|
Q_OBJECT
|
||||||
public:
|
public:
|
||||||
MixedProcessedAudioStream(int numFrameSamples, int numFramesCapacity, int numStaticJitterFrames = -1);
|
MixedProcessedAudioStream(int numFramesCapacity, int numStaticJitterFrames = -1);
|
||||||
|
|
||||||
signals:
|
signals:
|
||||||
|
|
||||||
|
|
|
@ -21,11 +21,14 @@
|
||||||
#include <udt/PacketHeaders.h>
|
#include <udt/PacketHeaders.h>
|
||||||
#include <UUID.h>
|
#include <UUID.h>
|
||||||
|
|
||||||
|
static const int MONO_FACTOR = 1;
|
||||||
|
static const int STEREO_FACTOR = 2;
|
||||||
|
|
||||||
PositionalAudioStream::PositionalAudioStream(PositionalAudioStream::Type type, bool isStereo, int numStaticJitterFrames) :
|
PositionalAudioStream::PositionalAudioStream(PositionalAudioStream::Type type, bool isStereo, int numStaticJitterFrames) :
|
||||||
InboundAudioStream(isStereo
|
InboundAudioStream(isStereo ? STEREO_FACTOR : MONO_FACTOR,
|
||||||
? AudioConstants::NETWORK_FRAME_SAMPLES_STEREO
|
AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL,
|
||||||
: AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL,
|
AUDIOMIXER_INBOUND_RING_BUFFER_FRAME_CAPACITY,
|
||||||
AUDIOMIXER_INBOUND_RING_BUFFER_FRAME_CAPACITY, numStaticJitterFrames),
|
numStaticJitterFrames),
|
||||||
_type(type),
|
_type(type),
|
||||||
_position(0.0f, 0.0f, 0.0f),
|
_position(0.0f, 0.0f, 0.0f),
|
||||||
_orientation(0.0f, 0.0f, 0.0f, 0.0f),
|
_orientation(0.0f, 0.0f, 0.0f, 0.0f),
|
||||||
|
|
Loading…
Reference in a new issue