mirror of
https://github.com/overte-org/overte.git
synced 2025-08-06 18:50:00 +02:00
Merge pull request #3254 from wangyix/pull_mode_audio_output
Audio output switched to pull mode
This commit is contained in:
commit
b940d082f3
13 changed files with 312 additions and 196 deletions
|
@ -104,7 +104,7 @@ void AudioMixerClientData::checkBuffersBeforeFrameSend(AABox* checkSourceZone, A
|
||||||
QHash<QUuid, PositionalAudioStream*>::ConstIterator i;
|
QHash<QUuid, PositionalAudioStream*>::ConstIterator i;
|
||||||
for (i = _audioStreams.constBegin(); i != _audioStreams.constEnd(); i++) {
|
for (i = _audioStreams.constBegin(); i != _audioStreams.constEnd(); i++) {
|
||||||
PositionalAudioStream* stream = i.value();
|
PositionalAudioStream* stream = i.value();
|
||||||
if (stream->popFrames(1)) {
|
if (stream->popFrames(1, true) > 0) {
|
||||||
// this is a ring buffer that is ready to go
|
// this is a ring buffer that is ready to go
|
||||||
|
|
||||||
// calculate the trailing avg loudness for the next frame
|
// calculate the trailing avg loudness for the next frame
|
||||||
|
|
|
@ -54,6 +54,8 @@ static const int APPROXIMATELY_30_SECONDS_OF_AUDIO_PACKETS = (int)(30.0f * 1000.
|
||||||
// Mute icon configration
|
// Mute icon configration
|
||||||
static const int MUTE_ICON_SIZE = 24;
|
static const int MUTE_ICON_SIZE = 24;
|
||||||
|
|
||||||
|
static const int RECEIVED_AUDIO_STREAM_CAPACITY_FRAMES = 100;
|
||||||
|
|
||||||
|
|
||||||
Audio::Audio(QObject* parent) :
|
Audio::Audio(QObject* parent) :
|
||||||
AbstractAudioInterface(parent),
|
AbstractAudioInterface(parent),
|
||||||
|
@ -64,19 +66,14 @@ Audio::Audio(QObject* parent) :
|
||||||
_audioOutput(NULL),
|
_audioOutput(NULL),
|
||||||
_desiredOutputFormat(),
|
_desiredOutputFormat(),
|
||||||
_outputFormat(),
|
_outputFormat(),
|
||||||
_outputDevice(NULL),
|
_outputFrameSize(0),
|
||||||
_numOutputCallbackBytes(0),
|
_numOutputCallbackBytes(0),
|
||||||
_loopbackAudioOutput(NULL),
|
_loopbackAudioOutput(NULL),
|
||||||
_loopbackOutputDevice(NULL),
|
_loopbackOutputDevice(NULL),
|
||||||
_proceduralAudioOutput(NULL),
|
_proceduralAudioOutput(NULL),
|
||||||
_proceduralOutputDevice(NULL),
|
_proceduralOutputDevice(NULL),
|
||||||
|
|
||||||
// NOTE: Be very careful making changes to the initializers of these ring buffers. There is a known problem with some
|
|
||||||
// Mac audio devices that slowly introduce additional delay in the audio device because they play out audio slightly
|
|
||||||
// slower than real time (or at least the desired sample rate). If you increase the size of the ring buffer, then it
|
|
||||||
// this delay will slowly add up and the longer someone runs, they more delayed their audio will be.
|
|
||||||
_inputRingBuffer(0),
|
_inputRingBuffer(0),
|
||||||
_receivedAudioStream(NETWORK_BUFFER_LENGTH_SAMPLES_STEREO, 100, true, 0, 0, true),
|
_receivedAudioStream(0, RECEIVED_AUDIO_STREAM_CAPACITY_FRAMES, true, 0, 0, true),
|
||||||
_isStereoInput(false),
|
_isStereoInput(false),
|
||||||
_averagedLatency(0.0),
|
_averagedLatency(0.0),
|
||||||
_lastInputLoudness(0),
|
_lastInputLoudness(0),
|
||||||
|
@ -115,13 +112,15 @@ Audio::Audio(QObject* parent) :
|
||||||
_inputRingBufferMsecsAvailableStats(1, FRAMES_AVAILABLE_STATS_WINDOW_SECONDS),
|
_inputRingBufferMsecsAvailableStats(1, FRAMES_AVAILABLE_STATS_WINDOW_SECONDS),
|
||||||
_audioOutputMsecsUnplayedStats(1, FRAMES_AVAILABLE_STATS_WINDOW_SECONDS),
|
_audioOutputMsecsUnplayedStats(1, FRAMES_AVAILABLE_STATS_WINDOW_SECONDS),
|
||||||
_lastSentAudioPacket(0),
|
_lastSentAudioPacket(0),
|
||||||
_packetSentTimeGaps(1, APPROXIMATELY_30_SECONDS_OF_AUDIO_PACKETS)
|
_packetSentTimeGaps(1, APPROXIMATELY_30_SECONDS_OF_AUDIO_PACKETS),
|
||||||
|
_audioOutputIODevice(*this)
|
||||||
{
|
{
|
||||||
// clear the array of locally injected samples
|
// clear the array of locally injected samples
|
||||||
memset(_localProceduralSamples, 0, NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL);
|
memset(_localProceduralSamples, 0, NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL);
|
||||||
// Create the noise sample array
|
// Create the noise sample array
|
||||||
_noiseSampleFrames = new float[NUMBER_OF_NOISE_SAMPLE_FRAMES];
|
_noiseSampleFrames = new float[NUMBER_OF_NOISE_SAMPLE_FRAMES];
|
||||||
|
|
||||||
|
connect(&_receivedAudioStream, &MixedProcessedAudioStream::processSamples, this, &Audio::processReceivedAudioStreamSamples, Qt::DirectConnection);
|
||||||
}
|
}
|
||||||
|
|
||||||
void Audio::init(QGLWidget *parent) {
|
void Audio::init(QGLWidget *parent) {
|
||||||
|
@ -312,7 +311,7 @@ bool adjustedFormatForAudioDevice(const QAudioDeviceInfo& audioDevice,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void linearResampling(int16_t* sourceSamples, int16_t* destinationSamples,
|
void linearResampling(const int16_t* sourceSamples, int16_t* destinationSamples,
|
||||||
unsigned int numSourceSamples, unsigned int numDestinationSamples,
|
unsigned int numSourceSamples, unsigned int numDestinationSamples,
|
||||||
const QAudioFormat& sourceAudioFormat, const QAudioFormat& destinationAudioFormat) {
|
const QAudioFormat& sourceAudioFormat, const QAudioFormat& destinationAudioFormat) {
|
||||||
if (sourceAudioFormat == destinationAudioFormat) {
|
if (sourceAudioFormat == destinationAudioFormat) {
|
||||||
|
@ -723,21 +722,92 @@ void Audio::handleAudioInput() {
|
||||||
}
|
}
|
||||||
delete[] inputAudioSamples;
|
delete[] inputAudioSamples;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if (_receivedAudioStream.getPacketsReceived() > 0) {
|
void Audio::processReceivedAudioStreamSamples(const QByteArray& inputBuffer, QByteArray& outputBuffer) {
|
||||||
pushAudioToOutput();
|
|
||||||
|
const int numNetworkOutputSamples = inputBuffer.size() / sizeof(int16_t);
|
||||||
|
const int numDeviceOutputSamples = numNetworkOutputSamples * (_outputFormat.sampleRate() * _outputFormat.channelCount())
|
||||||
|
/ (_desiredOutputFormat.sampleRate() * _desiredOutputFormat.channelCount());
|
||||||
|
|
||||||
|
|
||||||
|
outputBuffer.resize(numDeviceOutputSamples * sizeof(int16_t));
|
||||||
|
|
||||||
|
const int16_t* receivedSamples;
|
||||||
|
if (_processSpatialAudio) {
|
||||||
|
unsigned int sampleTime = _spatialAudioStart;
|
||||||
|
QByteArray buffer = inputBuffer;
|
||||||
|
|
||||||
|
// Accumulate direct transmission of audio from sender to receiver
|
||||||
|
if (Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingIncludeOriginal)) {
|
||||||
|
emit preProcessOriginalInboundAudio(sampleTime, buffer, _desiredOutputFormat);
|
||||||
|
addSpatialAudioToBuffer(sampleTime, buffer, numNetworkOutputSamples);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Send audio off for spatial processing
|
||||||
|
emit processInboundAudio(sampleTime, buffer, _desiredOutputFormat);
|
||||||
|
|
||||||
|
// copy the samples we'll resample from the spatial audio ring buffer - this also
|
||||||
|
// pushes the read pointer of the spatial audio ring buffer forwards
|
||||||
|
_spatialAudioRingBuffer.readSamples(_outputProcessingBuffer, numNetworkOutputSamples);
|
||||||
|
|
||||||
|
// Advance the start point for the next packet of audio to arrive
|
||||||
|
_spatialAudioStart += numNetworkOutputSamples / _desiredOutputFormat.channelCount();
|
||||||
|
|
||||||
|
receivedSamples = _outputProcessingBuffer;
|
||||||
|
} else {
|
||||||
|
// copy the samples we'll resample from the ring buffer - this also
|
||||||
|
// pushes the read pointer of the ring buffer forwards
|
||||||
|
//receivedAudioStreamPopOutput.readSamples(receivedSamples, numNetworkOutputSamples);
|
||||||
|
|
||||||
|
receivedSamples = reinterpret_cast<const int16_t*>(inputBuffer.data());
|
||||||
|
}
|
||||||
|
|
||||||
|
// copy the packet from the RB to the output
|
||||||
|
linearResampling(receivedSamples,
|
||||||
|
(int16_t*)outputBuffer.data(),
|
||||||
|
numNetworkOutputSamples,
|
||||||
|
numDeviceOutputSamples,
|
||||||
|
_desiredOutputFormat, _outputFormat);
|
||||||
|
|
||||||
|
|
||||||
|
if (_scopeEnabled && !_scopeEnabledPause) {
|
||||||
|
unsigned int numAudioChannels = _desiredOutputFormat.channelCount();
|
||||||
|
const int16_t* samples = receivedSamples;
|
||||||
|
for (int numSamples = numNetworkOutputSamples / numAudioChannels; numSamples > 0; numSamples -= NETWORK_SAMPLES_PER_FRAME) {
|
||||||
|
|
||||||
|
unsigned int audioChannel = 0;
|
||||||
|
addBufferToScope(
|
||||||
|
_scopeOutputLeft,
|
||||||
|
_scopeOutputOffset,
|
||||||
|
samples, audioChannel, numAudioChannels);
|
||||||
|
|
||||||
|
audioChannel = 1;
|
||||||
|
addBufferToScope(
|
||||||
|
_scopeOutputRight,
|
||||||
|
_scopeOutputOffset,
|
||||||
|
samples, audioChannel, numAudioChannels);
|
||||||
|
|
||||||
|
_scopeOutputOffset += NETWORK_SAMPLES_PER_FRAME;
|
||||||
|
_scopeOutputOffset %= _samplesPerScope;
|
||||||
|
samples += NETWORK_SAMPLES_PER_FRAME * numAudioChannels;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void Audio::addReceivedAudioToStream(const QByteArray& audioByteArray) {
|
void Audio::addReceivedAudioToStream(const QByteArray& audioByteArray) {
|
||||||
|
|
||||||
if (_audioOutput) {
|
if (_audioOutput) {
|
||||||
// Audio output must exist and be correctly set up if we're going to process received audio
|
// Audio output must exist and be correctly set up if we're going to process received audio
|
||||||
processReceivedAudio(audioByteArray);
|
_receivedAudioStream.parseData(audioByteArray);
|
||||||
}
|
}
|
||||||
|
|
||||||
Application::getInstance()->getBandwidthMeter()->inputStream(BandwidthMeter::AUDIO).updateValue(audioByteArray.size());
|
Application::getInstance()->getBandwidthMeter()->inputStream(BandwidthMeter::AUDIO).updateValue(audioByteArray.size());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
void Audio::parseAudioStreamStatsPacket(const QByteArray& packet) {
|
void Audio::parseAudioStreamStatsPacket(const QByteArray& packet) {
|
||||||
|
|
||||||
int numBytesPacketHeader = numBytesForPacketHeader(packet);
|
int numBytesPacketHeader = numBytesForPacketHeader(packet);
|
||||||
|
@ -901,119 +971,6 @@ void Audio::toggleStereoInput() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void Audio::processReceivedAudio(const QByteArray& audioByteArray) {
|
|
||||||
|
|
||||||
// parse audio data
|
|
||||||
_receivedAudioStream.parseData(audioByteArray);
|
|
||||||
|
|
||||||
|
|
||||||
// This call has been moved to handleAudioInput. handleAudioInput is called at a much more regular interval
|
|
||||||
// than processReceivedAudio since handleAudioInput does not experience network-related jitter.
|
|
||||||
// This way, we reduce the jitter of the frames being pushed to the audio output, allowing us to use a reduced
|
|
||||||
// buffer size for it, which reduces latency.
|
|
||||||
|
|
||||||
//pushAudioToOutput();
|
|
||||||
}
|
|
||||||
|
|
||||||
void Audio::pushAudioToOutput() {
|
|
||||||
|
|
||||||
if (_audioOutput->bytesFree() == _audioOutput->bufferSize()) {
|
|
||||||
// the audio output has no samples to play. set the downstream audio to starved so that it
|
|
||||||
// refills to its desired size before pushing frames
|
|
||||||
_receivedAudioStream.setToStarved();
|
|
||||||
}
|
|
||||||
|
|
||||||
float networkOutputToOutputRatio = (_desiredOutputFormat.sampleRate() / (float)_outputFormat.sampleRate())
|
|
||||||
* (_desiredOutputFormat.channelCount() / (float)_outputFormat.channelCount());
|
|
||||||
|
|
||||||
int numFramesToPush;
|
|
||||||
if (Menu::getInstance()->isOptionChecked(MenuOption::DisableQAudioOutputOverflowCheck)) {
|
|
||||||
numFramesToPush = _receivedAudioStream.getFramesAvailable();
|
|
||||||
} else {
|
|
||||||
// make sure to push a whole number of frames to the audio output
|
|
||||||
int numFramesAudioOutputRoomFor = (int)(_audioOutput->bytesFree() / sizeof(int16_t) * networkOutputToOutputRatio) / _receivedAudioStream.getNumFrameSamples();
|
|
||||||
numFramesToPush = std::min(_receivedAudioStream.getFramesAvailable(), numFramesAudioOutputRoomFor);
|
|
||||||
}
|
|
||||||
|
|
||||||
// if there is data in the received stream and room in the audio output, decide what to do
|
|
||||||
|
|
||||||
if (numFramesToPush > 0 && _receivedAudioStream.popFrames(numFramesToPush, false)) {
|
|
||||||
|
|
||||||
int numNetworkOutputSamples = numFramesToPush * NETWORK_BUFFER_LENGTH_SAMPLES_STEREO;
|
|
||||||
int numDeviceOutputSamples = numNetworkOutputSamples / networkOutputToOutputRatio;
|
|
||||||
|
|
||||||
QByteArray outputBuffer;
|
|
||||||
outputBuffer.resize(numDeviceOutputSamples * sizeof(int16_t));
|
|
||||||
|
|
||||||
AudioRingBuffer::ConstIterator receivedAudioStreamPopOutput = _receivedAudioStream.getLastPopOutput();
|
|
||||||
|
|
||||||
int16_t* receivedSamples = new int16_t[numNetworkOutputSamples];
|
|
||||||
if (_processSpatialAudio) {
|
|
||||||
unsigned int sampleTime = _spatialAudioStart;
|
|
||||||
QByteArray buffer;
|
|
||||||
buffer.resize(numNetworkOutputSamples * sizeof(int16_t));
|
|
||||||
|
|
||||||
receivedAudioStreamPopOutput.readSamples((int16_t*)buffer.data(), numNetworkOutputSamples);
|
|
||||||
|
|
||||||
// Accumulate direct transmission of audio from sender to receiver
|
|
||||||
if (Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingIncludeOriginal)) {
|
|
||||||
emit preProcessOriginalInboundAudio(sampleTime, buffer, _desiredOutputFormat);
|
|
||||||
addSpatialAudioToBuffer(sampleTime, buffer, numNetworkOutputSamples);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Send audio off for spatial processing
|
|
||||||
emit processInboundAudio(sampleTime, buffer, _desiredOutputFormat);
|
|
||||||
|
|
||||||
// copy the samples we'll resample from the spatial audio ring buffer - this also
|
|
||||||
// pushes the read pointer of the spatial audio ring buffer forwards
|
|
||||||
_spatialAudioRingBuffer.readSamples(receivedSamples, numNetworkOutputSamples);
|
|
||||||
|
|
||||||
// Advance the start point for the next packet of audio to arrive
|
|
||||||
_spatialAudioStart += numNetworkOutputSamples / _desiredOutputFormat.channelCount();
|
|
||||||
} else {
|
|
||||||
// copy the samples we'll resample from the ring buffer - this also
|
|
||||||
// pushes the read pointer of the ring buffer forwards
|
|
||||||
receivedAudioStreamPopOutput.readSamples(receivedSamples, numNetworkOutputSamples);
|
|
||||||
}
|
|
||||||
|
|
||||||
// copy the packet from the RB to the output
|
|
||||||
linearResampling(receivedSamples,
|
|
||||||
(int16_t*)outputBuffer.data(),
|
|
||||||
numNetworkOutputSamples,
|
|
||||||
numDeviceOutputSamples,
|
|
||||||
_desiredOutputFormat, _outputFormat);
|
|
||||||
|
|
||||||
if (_outputDevice) {
|
|
||||||
_outputDevice->write(outputBuffer);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (_scopeEnabled && !_scopeEnabledPause) {
|
|
||||||
unsigned int numAudioChannels = _desiredOutputFormat.channelCount();
|
|
||||||
int16_t* samples = receivedSamples;
|
|
||||||
for (int numSamples = numNetworkOutputSamples / numAudioChannels; numSamples > 0; numSamples -= NETWORK_SAMPLES_PER_FRAME) {
|
|
||||||
|
|
||||||
unsigned int audioChannel = 0;
|
|
||||||
addBufferToScope(
|
|
||||||
_scopeOutputLeft,
|
|
||||||
_scopeOutputOffset,
|
|
||||||
samples, audioChannel, numAudioChannels);
|
|
||||||
|
|
||||||
audioChannel = 1;
|
|
||||||
addBufferToScope(
|
|
||||||
_scopeOutputRight,
|
|
||||||
_scopeOutputOffset,
|
|
||||||
samples, audioChannel, numAudioChannels);
|
|
||||||
|
|
||||||
_scopeOutputOffset += NETWORK_SAMPLES_PER_FRAME;
|
|
||||||
_scopeOutputOffset %= _samplesPerScope;
|
|
||||||
samples += NETWORK_SAMPLES_PER_FRAME * numAudioChannels;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
delete[] receivedSamples;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void Audio::processProceduralAudio(int16_t* monoInput, int numSamples) {
|
void Audio::processProceduralAudio(int16_t* monoInput, int numSamples) {
|
||||||
|
|
||||||
// zero out the locally injected audio in preparation for audio procedural sounds
|
// zero out the locally injected audio in preparation for audio procedural sounds
|
||||||
|
@ -1514,11 +1471,11 @@ void Audio::renderScope(int width, int height) {
|
||||||
if (!_scopeEnabled)
|
if (!_scopeEnabled)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
static const float backgroundColor[4] = { 0.2f, 0.2f, 0.2f, 0.6f };
|
static const float backgroundColor[4] = { 0.4f, 0.4f, 0.4f, 0.6f };
|
||||||
static const float gridColor[4] = { 0.3f, 0.3f, 0.3f, 0.6f };
|
static const float gridColor[4] = { 0.3f, 0.3f, 0.3f, 0.6f };
|
||||||
static const float inputColor[4] = { 0.3f, .7f, 0.3f, 0.6f };
|
static const float inputColor[4] = { 0.3f, 1.0f, 0.3f, 1.0f };
|
||||||
static const float outputLeftColor[4] = { 0.7f, .3f, 0.3f, 0.6f };
|
static const float outputLeftColor[4] = { 1.0f, 0.3f, 0.3f, 1.0f };
|
||||||
static const float outputRightColor[4] = { 0.3f, .3f, 0.7f, 0.6f };
|
static const float outputRightColor[4] = { 0.3f, 0.3f, 1.0f, 1.0f };
|
||||||
static const int gridRows = 2;
|
static const int gridRows = 2;
|
||||||
int gridCols = _framesPerScope;
|
int gridCols = _framesPerScope;
|
||||||
|
|
||||||
|
@ -1631,6 +1588,12 @@ void Audio::renderLineStrip(const float* color, int x, int y, int n, int offset,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void Audio::outputFormatChanged() {
|
||||||
|
int outputFormatChannelCountTimesSampleRate = _outputFormat.channelCount() * _outputFormat.sampleRate();
|
||||||
|
_outputFrameSize = NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL * outputFormatChannelCountTimesSampleRate / _desiredOutputFormat.sampleRate();
|
||||||
|
_receivedAudioStream.outputFormatChanged(outputFormatChannelCountTimesSampleRate);
|
||||||
|
}
|
||||||
|
|
||||||
bool Audio::switchInputToAudioDevice(const QAudioDeviceInfo& inputDeviceInfo) {
|
bool Audio::switchInputToAudioDevice(const QAudioDeviceInfo& inputDeviceInfo) {
|
||||||
bool supportedFormat = false;
|
bool supportedFormat = false;
|
||||||
|
|
||||||
|
@ -1681,7 +1644,6 @@ bool Audio::switchOutputToAudioDevice(const QAudioDeviceInfo& outputDeviceInfo)
|
||||||
// cleanup any previously initialized device
|
// cleanup any previously initialized device
|
||||||
if (_audioOutput) {
|
if (_audioOutput) {
|
||||||
_audioOutput->stop();
|
_audioOutput->stop();
|
||||||
_outputDevice = NULL;
|
|
||||||
|
|
||||||
delete _audioOutput;
|
delete _audioOutput;
|
||||||
_audioOutput = NULL;
|
_audioOutput = NULL;
|
||||||
|
@ -1703,13 +1665,17 @@ bool Audio::switchOutputToAudioDevice(const QAudioDeviceInfo& outputDeviceInfo)
|
||||||
if (adjustedFormatForAudioDevice(outputDeviceInfo, _desiredOutputFormat, _outputFormat)) {
|
if (adjustedFormatForAudioDevice(outputDeviceInfo, _desiredOutputFormat, _outputFormat)) {
|
||||||
qDebug() << "The format to be used for audio output is" << _outputFormat;
|
qDebug() << "The format to be used for audio output is" << _outputFormat;
|
||||||
|
|
||||||
const int AUDIO_OUTPUT_BUFFER_SIZE_FRAMES = 10;
|
outputFormatChanged();
|
||||||
|
|
||||||
|
const int AUDIO_OUTPUT_BUFFER_SIZE_FRAMES = 3;
|
||||||
|
|
||||||
// setup our general output device for audio-mixer audio
|
// setup our general output device for audio-mixer audio
|
||||||
_audioOutput = new QAudioOutput(outputDeviceInfo, _outputFormat, this);
|
_audioOutput = new QAudioOutput(outputDeviceInfo, _outputFormat, this);
|
||||||
_audioOutput->setBufferSize(AUDIO_OUTPUT_BUFFER_SIZE_FRAMES * _outputFormat.bytesForDuration(BUFFER_SEND_INTERVAL_USECS));
|
_audioOutput->setBufferSize(AUDIO_OUTPUT_BUFFER_SIZE_FRAMES * _outputFrameSize * sizeof(int16_t));
|
||||||
qDebug() << "Ring Buffer capacity in frames: " << AUDIO_OUTPUT_BUFFER_SIZE_FRAMES;
|
qDebug() << "Ring Buffer capacity in frames: " << _audioOutput->bufferSize() / sizeof(int16_t) / (float)_outputFrameSize;
|
||||||
_outputDevice = _audioOutput->start();
|
|
||||||
|
_audioOutputIODevice.start();
|
||||||
|
_audioOutput->start(&_audioOutputIODevice);
|
||||||
|
|
||||||
// setup a loopback audio output device
|
// setup a loopback audio output device
|
||||||
_loopbackAudioOutput = new QAudioOutput(outputDeviceInfo, _outputFormat, this);
|
_loopbackAudioOutput = new QAudioOutput(outputDeviceInfo, _outputFormat, this);
|
||||||
|
@ -1779,3 +1745,21 @@ float Audio::getInputRingBufferMsecsAvailable() const {
|
||||||
float msecsInInputRingBuffer = bytesInInputRingBuffer / (float)(_inputFormat.bytesForDuration(USECS_PER_MSEC));
|
float msecsInInputRingBuffer = bytesInInputRingBuffer / (float)(_inputFormat.bytesForDuration(USECS_PER_MSEC));
|
||||||
return msecsInInputRingBuffer;
|
return msecsInInputRingBuffer;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
qint64 Audio::AudioOutputIODevice::readData(char * data, qint64 maxSize) {
|
||||||
|
MixedProcessedAudioStream& receivedAUdioStream = _parent._receivedAudioStream;
|
||||||
|
|
||||||
|
int samplesRequested = maxSize / sizeof(int16_t);
|
||||||
|
int samplesPopped;
|
||||||
|
int bytesWritten;
|
||||||
|
if ((samplesPopped = receivedAUdioStream.popSamples(samplesRequested, false)) > 0) {
|
||||||
|
AudioRingBuffer::ConstIterator lastPopOutput = receivedAUdioStream.getLastPopOutput();
|
||||||
|
lastPopOutput.readSamples((int16_t*)data, samplesPopped);
|
||||||
|
bytesWritten = samplesPopped * sizeof(int16_t);
|
||||||
|
} else {
|
||||||
|
memset(data, 0, maxSize);
|
||||||
|
bytesWritten = maxSize;
|
||||||
|
}
|
||||||
|
|
||||||
|
return bytesWritten;
|
||||||
|
}
|
||||||
|
|
|
@ -33,7 +33,7 @@
|
||||||
#include <AbstractAudioInterface.h>
|
#include <AbstractAudioInterface.h>
|
||||||
#include <StdDev.h>
|
#include <StdDev.h>
|
||||||
|
|
||||||
#include "MixedAudioStream.h"
|
#include "MixedProcessedAudioStream.h"
|
||||||
|
|
||||||
static const int NUM_AUDIO_CHANNELS = 2;
|
static const int NUM_AUDIO_CHANNELS = 2;
|
||||||
|
|
||||||
|
@ -45,6 +45,20 @@ class QIODevice;
|
||||||
class Audio : public AbstractAudioInterface {
|
class Audio : public AbstractAudioInterface {
|
||||||
Q_OBJECT
|
Q_OBJECT
|
||||||
public:
|
public:
|
||||||
|
|
||||||
|
class AudioOutputIODevice : public QIODevice {
|
||||||
|
public:
|
||||||
|
AudioOutputIODevice(Audio& parent) : _parent(parent) {};
|
||||||
|
|
||||||
|
void start() { open(QIODevice::ReadOnly); }
|
||||||
|
void stop() { close(); }
|
||||||
|
qint64 readData(char * data, qint64 maxSize);
|
||||||
|
qint64 writeData(const char * data, qint64 maxSize) { return 0; }
|
||||||
|
private:
|
||||||
|
Audio& _parent;
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
// setup for audio I/O
|
// setup for audio I/O
|
||||||
Audio(QObject* parent = 0);
|
Audio(QObject* parent = 0);
|
||||||
|
|
||||||
|
@ -94,6 +108,7 @@ public slots:
|
||||||
void addReceivedAudioToStream(const QByteArray& audioByteArray);
|
void addReceivedAudioToStream(const QByteArray& audioByteArray);
|
||||||
void parseAudioStreamStatsPacket(const QByteArray& packet);
|
void parseAudioStreamStatsPacket(const QByteArray& packet);
|
||||||
void addSpatialAudioToBuffer(unsigned int sampleTime, const QByteArray& spatialAudio, unsigned int numSamples);
|
void addSpatialAudioToBuffer(unsigned int sampleTime, const QByteArray& spatialAudio, unsigned int numSamples);
|
||||||
|
void processReceivedAudioStreamSamples(const QByteArray& inputBuffer, QByteArray& outputBuffer);
|
||||||
void handleAudioInput();
|
void handleAudioInput();
|
||||||
void reset();
|
void reset();
|
||||||
void resetStats();
|
void resetStats();
|
||||||
|
@ -133,7 +148,10 @@ signals:
|
||||||
void preProcessOriginalInboundAudio(unsigned int sampleTime, QByteArray& samples, const QAudioFormat& format);
|
void preProcessOriginalInboundAudio(unsigned int sampleTime, QByteArray& samples, const QAudioFormat& format);
|
||||||
void processInboundAudio(unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format);
|
void processInboundAudio(unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format);
|
||||||
void processLocalAudio(unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format);
|
void processLocalAudio(unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format);
|
||||||
|
|
||||||
|
private:
|
||||||
|
void outputFormatChanged();
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
|
||||||
QByteArray firstInputFrame;
|
QByteArray firstInputFrame;
|
||||||
|
@ -146,14 +164,15 @@ private:
|
||||||
QAudioOutput* _audioOutput;
|
QAudioOutput* _audioOutput;
|
||||||
QAudioFormat _desiredOutputFormat;
|
QAudioFormat _desiredOutputFormat;
|
||||||
QAudioFormat _outputFormat;
|
QAudioFormat _outputFormat;
|
||||||
QIODevice* _outputDevice;
|
int _outputFrameSize;
|
||||||
|
int16_t _outputProcessingBuffer[NETWORK_BUFFER_LENGTH_SAMPLES_STEREO];
|
||||||
int _numOutputCallbackBytes;
|
int _numOutputCallbackBytes;
|
||||||
QAudioOutput* _loopbackAudioOutput;
|
QAudioOutput* _loopbackAudioOutput;
|
||||||
QIODevice* _loopbackOutputDevice;
|
QIODevice* _loopbackOutputDevice;
|
||||||
QAudioOutput* _proceduralAudioOutput;
|
QAudioOutput* _proceduralAudioOutput;
|
||||||
QIODevice* _proceduralOutputDevice;
|
QIODevice* _proceduralOutputDevice;
|
||||||
AudioRingBuffer _inputRingBuffer;
|
AudioRingBuffer _inputRingBuffer;
|
||||||
MixedAudioStream _receivedAudioStream;
|
MixedProcessedAudioStream _receivedAudioStream;
|
||||||
bool _isStereoInput;
|
bool _isStereoInput;
|
||||||
|
|
||||||
QString _inputAudioDeviceName;
|
QString _inputAudioDeviceName;
|
||||||
|
@ -211,12 +230,6 @@ private:
|
||||||
|
|
||||||
// Add sounds that we want the user to not hear themselves, by adding on top of mic input signal
|
// Add sounds that we want the user to not hear themselves, by adding on top of mic input signal
|
||||||
void addProceduralSounds(int16_t* monoInput, int numSamples);
|
void addProceduralSounds(int16_t* monoInput, int numSamples);
|
||||||
|
|
||||||
// Process received audio
|
|
||||||
void processReceivedAudio(const QByteArray& audioByteArray);
|
|
||||||
|
|
||||||
// Pushes frames from the output ringbuffer to the audio output device
|
|
||||||
void pushAudioToOutput();
|
|
||||||
|
|
||||||
bool switchInputToAudioDevice(const QAudioDeviceInfo& inputDeviceInfo);
|
bool switchInputToAudioDevice(const QAudioDeviceInfo& inputDeviceInfo);
|
||||||
bool switchOutputToAudioDevice(const QAudioDeviceInfo& outputDeviceInfo);
|
bool switchOutputToAudioDevice(const QAudioDeviceInfo& outputDeviceInfo);
|
||||||
|
@ -282,6 +295,8 @@ private:
|
||||||
|
|
||||||
quint64 _lastSentAudioPacket;
|
quint64 _lastSentAudioPacket;
|
||||||
MovingMinMaxAvg<quint64> _packetSentTimeGaps;
|
MovingMinMaxAvg<quint64> _packetSentTimeGaps;
|
||||||
|
|
||||||
|
AudioOutputIODevice _audioOutputIODevice;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -606,8 +606,6 @@ Menu::Menu() :
|
||||||
appInstance->getAudio(),
|
appInstance->getAudio(),
|
||||||
SLOT(toggleStatsShowInjectedStreams()));
|
SLOT(toggleStatsShowInjectedStreams()));
|
||||||
|
|
||||||
addCheckableActionToQMenuAndActionHash(audioDebugMenu, MenuOption::DisableQAudioOutputOverflowCheck, 0, false);
|
|
||||||
|
|
||||||
addActionToQMenuAndActionHash(developerMenu, MenuOption::PasteToVoxel,
|
addActionToQMenuAndActionHash(developerMenu, MenuOption::PasteToVoxel,
|
||||||
Qt::CTRL | Qt::SHIFT | Qt::Key_V,
|
Qt::CTRL | Qt::SHIFT | Qt::Key_V,
|
||||||
this,
|
this,
|
||||||
|
|
|
@ -353,7 +353,6 @@ namespace MenuOption {
|
||||||
const QString DisableActivityLogger = "Disable Activity Logger";
|
const QString DisableActivityLogger = "Disable Activity Logger";
|
||||||
const QString DisableAutoAdjustLOD = "Disable Automatically Adjusting LOD";
|
const QString DisableAutoAdjustLOD = "Disable Automatically Adjusting LOD";
|
||||||
const QString DisableNackPackets = "Disable NACK Packets";
|
const QString DisableNackPackets = "Disable NACK Packets";
|
||||||
const QString DisableQAudioOutputOverflowCheck = "Disable Audio Output Device Overflow Check";
|
|
||||||
const QString DisplayFrustum = "Display Frustum";
|
const QString DisplayFrustum = "Display Frustum";
|
||||||
const QString DisplayHands = "Display Hands";
|
const QString DisplayHands = "Display Hands";
|
||||||
const QString DisplayHandTargets = "Display Hand Targets";
|
const QString DisplayHandTargets = "Display Hand Targets";
|
||||||
|
|
|
@ -70,7 +70,12 @@ void InboundAudioStream::clearBuffer() {
|
||||||
_currentJitterBufferFrames = 0;
|
_currentJitterBufferFrames = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int InboundAudioStream::parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties, int numAudioSamples) {
|
||||||
|
return _ringBuffer.writeData(packetAfterStreamProperties.data(), numAudioSamples * sizeof(int16_t));
|
||||||
|
}
|
||||||
|
|
||||||
int InboundAudioStream::parseData(const QByteArray& packet) {
|
int InboundAudioStream::parseData(const QByteArray& packet) {
|
||||||
|
|
||||||
PacketType packetType = packetTypeForPacket(packet);
|
PacketType packetType = packetTypeForPacket(packet);
|
||||||
QUuid senderUUID = uuidFromPacketHeader(packet);
|
QUuid senderUUID = uuidFromPacketHeader(packet);
|
||||||
|
|
||||||
|
@ -82,7 +87,9 @@ int InboundAudioStream::parseData(const QByteArray& packet) {
|
||||||
// parse sequence number and track it
|
// parse sequence number and track it
|
||||||
quint16 sequence = *(reinterpret_cast<const quint16*>(sequenceAt));
|
quint16 sequence = *(reinterpret_cast<const quint16*>(sequenceAt));
|
||||||
readBytes += sizeof(quint16);
|
readBytes += sizeof(quint16);
|
||||||
SequenceNumberStats::ArrivalInfo arrivalInfo = frameReceivedUpdateNetworkStats(sequence, senderUUID);
|
SequenceNumberStats::ArrivalInfo arrivalInfo = _incomingSequenceNumberStats.sequenceNumberReceived(sequence, senderUUID);
|
||||||
|
|
||||||
|
frameReceivedUpdateTimingStats();
|
||||||
|
|
||||||
// TODO: handle generalized silent packet here?????
|
// TODO: handle generalized silent packet here?????
|
||||||
|
|
||||||
|
@ -130,32 +137,71 @@ int InboundAudioStream::parseData(const QByteArray& packet) {
|
||||||
return readBytes;
|
return readBytes;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool InboundAudioStream::popFrames(int numFrames, bool starveOnFail) {
|
int InboundAudioStream::popSamples(int maxSamples, bool allOrNothing, bool starveIfNoSamplesPopped) {
|
||||||
int numSamplesRequested = numFrames * _ringBuffer.getNumFrameSamples();
|
int samplesPopped = 0;
|
||||||
|
int samplesAvailable = _ringBuffer.samplesAvailable();
|
||||||
if (_isStarved) {
|
if (_isStarved) {
|
||||||
// we're still refilling; don't pop
|
// we're still refilling; don't pop
|
||||||
_consecutiveNotMixedCount++;
|
_consecutiveNotMixedCount++;
|
||||||
_lastPopSucceeded = false;
|
_lastPopSucceeded = false;
|
||||||
} else {
|
} else {
|
||||||
if (_ringBuffer.samplesAvailable() >= numSamplesRequested) {
|
if (samplesAvailable >= maxSamples) {
|
||||||
// we have enough samples to pop, so we're good to mix
|
// we have enough samples to pop, so we're good to pop
|
||||||
_lastPopOutput = _ringBuffer.nextOutput();
|
popSamplesNoCheck(maxSamples);
|
||||||
_ringBuffer.shiftReadPosition(numSamplesRequested);
|
samplesPopped = maxSamples;
|
||||||
framesAvailableChanged();
|
} else if (!allOrNothing && samplesAvailable > 0) {
|
||||||
|
// we don't have the requested number of samples, but we do have some
|
||||||
_hasStarted = true;
|
// samples available, so pop all those (except in all-or-nothing mode)
|
||||||
_lastPopSucceeded = true;
|
popSamplesNoCheck(samplesAvailable);
|
||||||
|
samplesPopped = samplesAvailable;
|
||||||
} else {
|
} else {
|
||||||
// we don't have enough samples, so set this stream to starve
|
// we can't pop any samples. set this stream to starved if needed
|
||||||
// if starveOnFail is true
|
if (starveIfNoSamplesPopped) {
|
||||||
if (starveOnFail) {
|
setToStarved();
|
||||||
starved();
|
|
||||||
_consecutiveNotMixedCount++;
|
_consecutiveNotMixedCount++;
|
||||||
}
|
}
|
||||||
_lastPopSucceeded = false;
|
_lastPopSucceeded = false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return _lastPopSucceeded;
|
return samplesPopped;
|
||||||
|
}
|
||||||
|
|
||||||
|
int InboundAudioStream::popFrames(int maxFrames, bool allOrNothing, bool starveIfNoFramesPopped) {
|
||||||
|
int framesPopped = 0;
|
||||||
|
int framesAvailable = _ringBuffer.framesAvailable();
|
||||||
|
if (_isStarved) {
|
||||||
|
// we're still refilling; don't pop
|
||||||
|
_consecutiveNotMixedCount++;
|
||||||
|
_lastPopSucceeded = false;
|
||||||
|
} else {
|
||||||
|
if (framesAvailable >= maxFrames) {
|
||||||
|
// we have enough frames to pop, so we're good to pop
|
||||||
|
popSamplesNoCheck(maxFrames * _ringBuffer.getNumFrameSamples());
|
||||||
|
framesPopped = maxFrames;
|
||||||
|
} else if (!allOrNothing && framesAvailable > 0) {
|
||||||
|
// we don't have the requested number of frames, but we do have some
|
||||||
|
// frames available, so pop all those (except in all-or-nothing mode)
|
||||||
|
popSamplesNoCheck(framesAvailable * _ringBuffer.getNumFrameSamples());
|
||||||
|
framesPopped = framesAvailable;
|
||||||
|
} else {
|
||||||
|
// we can't pop any frames. set this stream to starved if needed
|
||||||
|
if (starveIfNoFramesPopped) {
|
||||||
|
setToStarved();
|
||||||
|
_consecutiveNotMixedCount = 1;
|
||||||
|
}
|
||||||
|
_lastPopSucceeded = false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return framesPopped;
|
||||||
|
}
|
||||||
|
|
||||||
|
void InboundAudioStream::popSamplesNoCheck(int samples) {
|
||||||
|
_lastPopOutput = _ringBuffer.nextOutput();
|
||||||
|
_ringBuffer.shiftReadPosition(samples);
|
||||||
|
framesAvailableChanged();
|
||||||
|
|
||||||
|
_hasStarted = true;
|
||||||
|
_lastPopSucceeded = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
void InboundAudioStream::framesAvailableChanged() {
|
void InboundAudioStream::framesAvailableChanged() {
|
||||||
|
@ -168,16 +214,12 @@ void InboundAudioStream::framesAvailableChanged() {
|
||||||
}
|
}
|
||||||
|
|
||||||
void InboundAudioStream::setToStarved() {
|
void InboundAudioStream::setToStarved() {
|
||||||
starved();
|
|
||||||
if (_ringBuffer.framesAvailable() >= _desiredJitterBufferFrames) {
|
|
||||||
_isStarved = false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void InboundAudioStream::starved() {
|
|
||||||
_isStarved = true;
|
_isStarved = true;
|
||||||
_consecutiveNotMixedCount = 0;
|
_consecutiveNotMixedCount = 0;
|
||||||
_starveCount++;
|
_starveCount++;
|
||||||
|
// if we have more than the desired frames when setToStarved() is called, then we'll immediately
|
||||||
|
// be considered refilled. in that case, there's no need to set _isStarved to true.
|
||||||
|
_isStarved = (_ringBuffer.framesAvailable() < _desiredJitterBufferFrames);
|
||||||
}
|
}
|
||||||
|
|
||||||
void InboundAudioStream::setDynamicJitterBuffers(bool dynamicJitterBuffers) {
|
void InboundAudioStream::setDynamicJitterBuffers(bool dynamicJitterBuffers) {
|
||||||
|
@ -204,9 +246,7 @@ int InboundAudioStream::clampDesiredJitterBufferFramesValue(int desired) const {
|
||||||
return glm::clamp(desired, MIN_FRAMES_DESIRED, MAX_FRAMES_DESIRED);
|
return glm::clamp(desired, MIN_FRAMES_DESIRED, MAX_FRAMES_DESIRED);
|
||||||
}
|
}
|
||||||
|
|
||||||
SequenceNumberStats::ArrivalInfo InboundAudioStream::frameReceivedUpdateNetworkStats(quint16 sequenceNumber, const QUuid& senderUUID) {
|
void InboundAudioStream::frameReceivedUpdateTimingStats() {
|
||||||
// track the sequence number we received
|
|
||||||
SequenceNumberStats::ArrivalInfo arrivalInfo = _incomingSequenceNumberStats.sequenceNumberReceived(sequenceNumber, senderUUID);
|
|
||||||
|
|
||||||
// update our timegap stats and desired jitter buffer frames if necessary
|
// update our timegap stats and desired jitter buffer frames if necessary
|
||||||
// discard the first few packets we receive since they usually have gaps that aren't represensative of normal jitter
|
// discard the first few packets we receive since they usually have gaps that aren't represensative of normal jitter
|
||||||
|
@ -243,8 +283,6 @@ SequenceNumberStats::ArrivalInfo InboundAudioStream::frameReceivedUpdateNetworkS
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
_lastFrameReceivedTime = now;
|
_lastFrameReceivedTime = now;
|
||||||
|
|
||||||
return arrivalInfo;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int InboundAudioStream::writeDroppableSilentSamples(int numSilentSamples) {
|
int InboundAudioStream::writeDroppableSilentSamples(int numSilentSamples) {
|
||||||
|
|
|
@ -63,8 +63,8 @@ public:
|
||||||
|
|
||||||
virtual int parseData(const QByteArray& packet);
|
virtual int parseData(const QByteArray& packet);
|
||||||
|
|
||||||
|
int popFrames(int maxFrames, bool allOrNothing, bool starveIfNoFramesPopped = true);
|
||||||
bool popFrames(int numFrames, bool starveOnFail = true);
|
int popSamples(int maxSamples, bool allOrNothing, bool starveIfNoSamplesPopped = true);
|
||||||
|
|
||||||
bool lastPopSucceeded() const { return _lastPopSucceeded; };
|
bool lastPopSucceeded() const { return _lastPopSucceeded; };
|
||||||
const AudioRingBuffer::ConstIterator& getLastPopOutput() const { return _lastPopOutput; }
|
const AudioRingBuffer::ConstIterator& getLastPopOutput() const { return _lastPopOutput; }
|
||||||
|
@ -111,13 +111,12 @@ public:
|
||||||
int getPacketsReceived() const { return _incomingSequenceNumberStats.getReceived(); }
|
int getPacketsReceived() const { return _incomingSequenceNumberStats.getReceived(); }
|
||||||
|
|
||||||
private:
|
private:
|
||||||
void starved();
|
void frameReceivedUpdateTimingStats();
|
||||||
|
|
||||||
SequenceNumberStats::ArrivalInfo frameReceivedUpdateNetworkStats(quint16 sequenceNumber, const QUuid& senderUUID);
|
|
||||||
int clampDesiredJitterBufferFramesValue(int desired) const;
|
int clampDesiredJitterBufferFramesValue(int desired) const;
|
||||||
|
|
||||||
int writeSamplesForDroppedPackets(int numSamples);
|
int writeSamplesForDroppedPackets(int numSamples);
|
||||||
|
|
||||||
|
void popSamplesNoCheck(int samples);
|
||||||
void framesAvailableChanged();
|
void framesAvailableChanged();
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
|
@ -126,11 +125,12 @@ protected:
|
||||||
InboundAudioStream& operator= (const InboundAudioStream&);
|
InboundAudioStream& operator= (const InboundAudioStream&);
|
||||||
|
|
||||||
/// parses the info between the seq num and the audio data in the network packet and calculates
|
/// parses the info between the seq num and the audio data in the network packet and calculates
|
||||||
/// how many audio samples this packet contains
|
/// how many audio samples this packet contains (used when filling in samples for dropped packets).
|
||||||
virtual int parseStreamProperties(PacketType type, const QByteArray& packetAfterSeqNum, int& numAudioSamples) = 0;
|
virtual int parseStreamProperties(PacketType type, const QByteArray& packetAfterSeqNum, int& numAudioSamples) = 0;
|
||||||
|
|
||||||
/// parses the audio data in the network packet
|
/// parses the audio data in the network packet.
|
||||||
virtual int parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties, int numAudioSamples) = 0;
|
/// default implementation assumes packet contains raw audio samples after stream properties
|
||||||
|
virtual int parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties, int numAudioSamples);
|
||||||
|
|
||||||
int writeDroppableSilentSamples(int numSilentSamples);
|
int writeDroppableSilentSamples(int numSilentSamples);
|
||||||
|
|
||||||
|
|
|
@ -58,10 +58,6 @@ int InjectedAudioStream::parseStreamProperties(PacketType type, const QByteArray
|
||||||
return packetStream.device()->pos();
|
return packetStream.device()->pos();
|
||||||
}
|
}
|
||||||
|
|
||||||
int InjectedAudioStream::parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties, int numAudioSamples) {
|
|
||||||
return _ringBuffer.writeData(packetAfterStreamProperties.data(), numAudioSamples * sizeof(int16_t));
|
|
||||||
}
|
|
||||||
|
|
||||||
AudioStreamStats InjectedAudioStream::getAudioStreamStats() const {
|
AudioStreamStats InjectedAudioStream::getAudioStreamStats() const {
|
||||||
AudioStreamStats streamStats = PositionalAudioStream::getAudioStreamStats();
|
AudioStreamStats streamStats = PositionalAudioStream::getAudioStreamStats();
|
||||||
streamStats._streamIdentifier = _streamIdentifier;
|
streamStats._streamIdentifier = _streamIdentifier;
|
||||||
|
|
|
@ -32,7 +32,6 @@ private:
|
||||||
|
|
||||||
AudioStreamStats getAudioStreamStats() const;
|
AudioStreamStats getAudioStreamStats() const;
|
||||||
int parseStreamProperties(PacketType type, const QByteArray& packetAfterSeqNum, int& numAudioSamples);
|
int parseStreamProperties(PacketType type, const QByteArray& packetAfterSeqNum, int& numAudioSamples);
|
||||||
int parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties, int numAudioSamples);
|
|
||||||
|
|
||||||
const QUuid _streamIdentifier;
|
const QUuid _streamIdentifier;
|
||||||
float _radius;
|
float _radius;
|
||||||
|
|
|
@ -1,3 +1,13 @@
|
||||||
|
//
|
||||||
|
// MixedAudioStream.cpp
|
||||||
|
// libraries/audio/src
|
||||||
|
//
|
||||||
|
// Created by Yixin Wang on 8/4/14.
|
||||||
|
// Copyright 2013 High Fidelity, Inc.
|
||||||
|
//
|
||||||
|
// Distributed under the Apache License, Version 2.0.
|
||||||
|
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||||
|
//
|
||||||
|
|
||||||
#include "MixedAudioStream.h"
|
#include "MixedAudioStream.h"
|
||||||
|
|
||||||
|
@ -11,7 +21,3 @@ int MixedAudioStream::parseStreamProperties(PacketType type, const QByteArray& p
|
||||||
numAudioSamples = packetAfterSeqNum.size() / sizeof(int16_t);
|
numAudioSamples = packetAfterSeqNum.size() / sizeof(int16_t);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int MixedAudioStream::parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties, int numAudioSamples) {
|
|
||||||
return _ringBuffer.writeData(packetAfterStreamProperties.data(), numAudioSamples * sizeof(int16_t));
|
|
||||||
}
|
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
// MixedAudioStream.h
|
// MixedAudioStream.h
|
||||||
// libraries/audio/src
|
// libraries/audio/src
|
||||||
//
|
//
|
||||||
// Created by Stephen Birarda on 6/5/13.
|
// Created by Yixin Wang on 8/4/14.
|
||||||
// Copyright 2013 High Fidelity, Inc.
|
// Copyright 2013 High Fidelity, Inc.
|
||||||
//
|
//
|
||||||
// Distributed under the Apache License, Version 2.0.
|
// Distributed under the Apache License, Version 2.0.
|
||||||
|
@ -23,7 +23,6 @@ public:
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
int parseStreamProperties(PacketType type, const QByteArray& packetAfterSeqNum, int& numAudioSamples);
|
int parseStreamProperties(PacketType type, const QByteArray& packetAfterSeqNum, int& numAudioSamples);
|
||||||
int parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties, int numAudioSamples);
|
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif // hifi_MixedAudioStream_h
|
#endif // hifi_MixedAudioStream_h
|
||||||
|
|
45
libraries/audio/src/MixedProcessedAudioStream.cpp
Normal file
45
libraries/audio/src/MixedProcessedAudioStream.cpp
Normal file
|
@ -0,0 +1,45 @@
|
||||||
|
//
|
||||||
|
// MixedProcessedAudioStream.cpp
|
||||||
|
// libraries/audio/src
|
||||||
|
//
|
||||||
|
// Created by Yixin Wang on 8/4/14.
|
||||||
|
// Copyright 2013 High Fidelity, Inc.
|
||||||
|
//
|
||||||
|
// Distributed under the Apache License, Version 2.0.
|
||||||
|
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||||
|
//
|
||||||
|
|
||||||
|
#include "MixedProcessedAudioStream.h"
|
||||||
|
|
||||||
|
MixedProcessedAudioStream ::MixedProcessedAudioStream (int numFrameSamples, int numFramesCapacity, bool dynamicJitterBuffers, int staticDesiredJitterBufferFrames, int maxFramesOverDesired, bool useStDevForJitterCalc)
|
||||||
|
: InboundAudioStream(numFrameSamples, numFramesCapacity, dynamicJitterBuffers, staticDesiredJitterBufferFrames, maxFramesOverDesired, useStDevForJitterCalc)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
void MixedProcessedAudioStream::outputFormatChanged(int outputFormatChannelCountTimesSampleRate) {
|
||||||
|
_outputFormatChannelsTimesSampleRate = outputFormatChannelCountTimesSampleRate;
|
||||||
|
int deviceOutputFrameSize = NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL * _outputFormatChannelsTimesSampleRate / SAMPLE_RATE;
|
||||||
|
_ringBuffer.resizeForFrameSize(deviceOutputFrameSize);
|
||||||
|
}
|
||||||
|
|
||||||
|
int MixedProcessedAudioStream::parseStreamProperties(PacketType type, const QByteArray& packetAfterSeqNum, int& numAudioSamples) {
|
||||||
|
// mixed audio packets do not have any info between the seq num and the audio data.
|
||||||
|
int numNetworkSamples = packetAfterSeqNum.size() / sizeof(int16_t);
|
||||||
|
|
||||||
|
// since numAudioSamples is used to know how many samples to add for each dropped packet before this one,
|
||||||
|
// we want to set it to the number of device audio samples since this stream contains device audio samples, not network samples.
|
||||||
|
const int STEREO_DIVIDER = 2;
|
||||||
|
numAudioSamples = numNetworkSamples * _outputFormatChannelsTimesSampleRate / (STEREO_DIVIDER * SAMPLE_RATE);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
int MixedProcessedAudioStream::parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties, int numAudioSamples) {
|
||||||
|
|
||||||
|
QByteArray outputBuffer;
|
||||||
|
emit processSamples(packetAfterStreamProperties, outputBuffer);
|
||||||
|
|
||||||
|
_ringBuffer.writeData(outputBuffer.data(), outputBuffer.size());
|
||||||
|
|
||||||
|
return packetAfterStreamProperties.size();
|
||||||
|
}
|
37
libraries/audio/src/MixedProcessedAudioStream.h
Normal file
37
libraries/audio/src/MixedProcessedAudioStream.h
Normal file
|
@ -0,0 +1,37 @@
|
||||||
|
//
|
||||||
|
// MixedProcessedAudioStream.h
|
||||||
|
// libraries/audio/src
|
||||||
|
//
|
||||||
|
// Created by Yixin Wang on 8/4/14.
|
||||||
|
// Copyright 2013 High Fidelity, Inc.
|
||||||
|
//
|
||||||
|
// Distributed under the Apache License, Version 2.0.
|
||||||
|
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||||
|
//
|
||||||
|
|
||||||
|
#ifndef hifi_MixedProcessedAudioStream_h
|
||||||
|
#define hifi_MixedProcessedAudioStream_h
|
||||||
|
|
||||||
|
#include "InboundAudioStream.h"
|
||||||
|
|
||||||
|
class MixedProcessedAudioStream : public InboundAudioStream {
|
||||||
|
Q_OBJECT
|
||||||
|
public:
|
||||||
|
MixedProcessedAudioStream (int numFrameSamples, int numFramesCapacity, bool dynamicJitterBuffers, int staticDesiredJitterBufferFrames, int maxFramesOverDesired, bool useStDevForJitterCalc);
|
||||||
|
|
||||||
|
signals:
|
||||||
|
|
||||||
|
void processSamples(const QByteArray& inputBuffer, QByteArray& outputBuffer);
|
||||||
|
|
||||||
|
public:
|
||||||
|
void outputFormatChanged(int outputFormatChannelCountTimesSampleRate);
|
||||||
|
|
||||||
|
protected:
|
||||||
|
int parseStreamProperties(PacketType type, const QByteArray& packetAfterSeqNum, int& numAudioSamples);
|
||||||
|
int parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties, int numAudioSamples);
|
||||||
|
|
||||||
|
private:
|
||||||
|
int _outputFormatChannelsTimesSampleRate;
|
||||||
|
};
|
||||||
|
|
||||||
|
#endif // hifi_MixedProcessedAudioStream_h
|
Loading…
Reference in a new issue