drive input from buffer callback and output from network

This commit is contained in:
Stephen Birarda 2013-12-16 11:58:23 -08:00
parent 25b7065298
commit 1f9ca00317
9 changed files with 270 additions and 231 deletions

View file

@ -54,7 +54,7 @@
const short JITTER_BUFFER_MSECS = 12; const short JITTER_BUFFER_MSECS = 12;
const short JITTER_BUFFER_SAMPLES = JITTER_BUFFER_MSECS * (SAMPLE_RATE / 1000.0); const short JITTER_BUFFER_SAMPLES = JITTER_BUFFER_MSECS * (SAMPLE_RATE / 1000.0);
const unsigned int BUFFER_SEND_INTERVAL_USECS = floorf((BUFFER_LENGTH_SAMPLES_PER_CHANNEL / (float) SAMPLE_RATE) * 1000 * 1000); const unsigned int BUFFER_SEND_INTERVAL_USECS = floorf((NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL / (float) SAMPLE_RATE) * 1000 * 1000);
const int MAX_SAMPLE_VALUE = std::numeric_limits<int16_t>::max(); const int MAX_SAMPLE_VALUE = std::numeric_limits<int16_t>::max();
const int MIN_SAMPLE_VALUE = std::numeric_limits<int16_t>::min(); const int MIN_SAMPLE_VALUE = std::numeric_limits<int16_t>::min();
@ -164,27 +164,29 @@ void AudioMixer::addBufferToMixForListeningNodeWithBuffer(PositionalAudioRingBuf
int delayedChannelOffset = (bearingRelativeAngleToSource > 0.0f) ? 1 : 0; int delayedChannelOffset = (bearingRelativeAngleToSource > 0.0f) ? 1 : 0;
int goodChannelOffset = delayedChannelOffset == 0 ? 1 : 0; int goodChannelOffset = delayedChannelOffset == 0 ? 1 : 0;
for (int s = 0; s < BUFFER_LENGTH_SAMPLES_PER_CHANNEL * 2; s += 2) { for (int s = 0; s < NETWORK_BUFFER_LENGTH_SAMPLES_STEREO; s += 2) {
if (s < numSamplesDelay) { // if (s < numSamplesDelay) {
// pull the earlier sample for the delayed channel // // pull the earlier sample for the delayed channel
int earlierSample = (*bufferToAdd)[(s / 2) - numSamplesDelay] * attenuationCoefficient * weakChannelAmplitudeRatio; // int earlierSample = (*bufferToAdd)[(s / 2) - numSamplesDelay] * attenuationCoefficient * weakChannelAmplitudeRatio;
//
// _clientSamples[s + delayedChannelOffset] = glm::clamp(_clientSamples[s + delayedChannelOffset] + earlierSample,
// MIN_SAMPLE_VALUE, MAX_SAMPLE_VALUE);
// }
//
// // pull the current sample for the good channel
// int16_t currentSample = (*bufferToAdd)[s / 2] * attenuationCoefficient;
// _clientSamples[s + goodChannelOffset] = glm::clamp(_clientSamples[s + goodChannelOffset] + currentSample,
// MIN_SAMPLE_VALUE, MAX_SAMPLE_VALUE);
//
// if (s + numSamplesDelay < NETWORK_BUFFER_LENGTH_SAMPLES_STEREO) {
// // place the curernt sample at the right spot in the delayed channel
// int16_t clampedSample = glm::clamp((int) (_clientSamples[s + numSamplesDelay + delayedChannelOffset]
// + (currentSample * weakChannelAmplitudeRatio)),
// MIN_SAMPLE_VALUE, MAX_SAMPLE_VALUE);
// _clientSamples[s + numSamplesDelay + delayedChannelOffset] = clampedSample;
// }
_clientSamples[s + delayedChannelOffset] = glm::clamp(_clientSamples[s + delayedChannelOffset] + earlierSample, _clientSamples[s] = _clientSamples[s + 1] = (*bufferToAdd)[s / 2] * attenuationCoefficient;
MIN_SAMPLE_VALUE, MAX_SAMPLE_VALUE);
}
// pull the current sample for the good channel
int16_t currentSample = (*bufferToAdd)[s / 2] * attenuationCoefficient;
_clientSamples[s + goodChannelOffset] = glm::clamp(_clientSamples[s + goodChannelOffset] + currentSample,
MIN_SAMPLE_VALUE, MAX_SAMPLE_VALUE);
if (s + numSamplesDelay < BUFFER_LENGTH_SAMPLES_PER_CHANNEL) {
// place the curernt sample at the right spot in the delayed channel
int16_t clampedSample = glm::clamp((int) (_clientSamples[s + numSamplesDelay + delayedChannelOffset]
+ (currentSample * weakChannelAmplitudeRatio)),
MIN_SAMPLE_VALUE, MAX_SAMPLE_VALUE);
_clientSamples[s + numSamplesDelay + delayedChannelOffset] = clampedSample;
}
} }
} }
@ -277,7 +279,7 @@ void AudioMixer::run() {
gettimeofday(&startTime, NULL); gettimeofday(&startTime, NULL);
int numBytesPacketHeader = numBytesForPacketHeader((unsigned char*) &PACKET_TYPE_MIXED_AUDIO); int numBytesPacketHeader = numBytesForPacketHeader((unsigned char*) &PACKET_TYPE_MIXED_AUDIO);
unsigned char clientPacket[BUFFER_LENGTH_BYTES_STEREO + numBytesPacketHeader]; unsigned char clientPacket[NETWORK_BUFFER_LENGTH_BYTES_STEREO + numBytesPacketHeader];
populateTypeAndVersion(clientPacket, PACKET_TYPE_MIXED_AUDIO); populateTypeAndVersion(clientPacket, PACKET_TYPE_MIXED_AUDIO);
while (!_isFinished) { while (!_isFinished) {

View file

@ -35,7 +35,7 @@ private:
void prepareMixForListeningNode(Node* node); void prepareMixForListeningNode(Node* node);
int16_t _clientSamples[BUFFER_LENGTH_SAMPLES_PER_CHANNEL * 2]; int16_t _clientSamples[NETWORK_BUFFER_LENGTH_SAMPLES_STEREO];
}; };
#endif /* defined(__hifi__AudioMixer__) */ #endif /* defined(__hifi__AudioMixer__) */

View file

@ -91,7 +91,7 @@ void AudioMixerClientData::pushBuffersAfterFrameSend() {
PositionalAudioRingBuffer* audioBuffer = _ringBuffers[i]; PositionalAudioRingBuffer* audioBuffer = _ringBuffers[i];
if (audioBuffer->willBeAddedToMix()) { if (audioBuffer->willBeAddedToMix()) {
audioBuffer->shiftReadPosition(BUFFER_LENGTH_SAMPLES_PER_CHANNEL); audioBuffer->shiftReadPosition(NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL);
audioBuffer->setWillBeAddedToMix(false); audioBuffer->setWillBeAddedToMix(false);
} else if (audioBuffer->isStarved()) { } else if (audioBuffer->isStarved()) {

View file

@ -13,6 +13,7 @@
#include <CoreAudio/AudioHardware.h> #include <CoreAudio/AudioHardware.h>
#endif #endif
#include <QtCore/QBuffer>
#include <QtMultimedia/QAudioInput> #include <QtMultimedia/QAudioInput>
#include <QtMultimedia/QAudioOutput> #include <QtMultimedia/QAudioOutput>
#include <QSvgRenderer> #include <QSvgRenderer>
@ -33,7 +34,7 @@
static const float JITTER_BUFFER_LENGTH_MSECS = 12; static const float JITTER_BUFFER_LENGTH_MSECS = 12;
static const short JITTER_BUFFER_SAMPLES = JITTER_BUFFER_LENGTH_MSECS * NUM_AUDIO_CHANNELS * (SAMPLE_RATE / 1000.0); static const short JITTER_BUFFER_SAMPLES = JITTER_BUFFER_LENGTH_MSECS * NUM_AUDIO_CHANNELS * (SAMPLE_RATE / 1000.0);
static const float AUDIO_CALLBACK_MSECS = (float)BUFFER_LENGTH_SAMPLES_PER_CHANNEL / (float)SAMPLE_RATE * 1000.0; static const float AUDIO_CALLBACK_MSECS = (float) NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL / (float)SAMPLE_RATE * 1000.0;
// Mute icon configration // Mute icon configration
static const int ICON_SIZE = 24; static const int ICON_SIZE = 24;
@ -45,17 +46,15 @@ Audio::Audio(Oscilloscope* scope, int16_t initialJitterBufferSamples, QObject* p
_audioInput(NULL), _audioInput(NULL),
_desiredInputFormat(), _desiredInputFormat(),
_inputFormat(), _inputFormat(),
_inputDevice(NULL),
_inputBuffer(), _inputBuffer(),
_numInputCallbackBytes(0), _numInputCallbackBytes(0),
_audioOutput(NULL), _audioOutput(NULL),
_desiredOutputFormat(), _desiredOutputFormat(),
_outputFormat(), _outputFormat(),
_outputDevice(NULL), _outputDevice(NULL),
_outputBuffer(),
_numOutputCallbackBytes(0), _numOutputCallbackBytes(0),
_nextOutputSamples(NULL), _inputRingBuffer(0),
_ringBuffer(true), _ringBuffer(NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL * 2),
_scope(scope), _scope(scope),
_averagedLatency(0.0), _averagedLatency(0.0),
_measuredJitter(0), _measuredJitter(0),
@ -249,7 +248,7 @@ void Audio::start() {
qDebug() << "The format to be used for audio input is" << _inputFormat << "\n"; qDebug() << "The format to be used for audio input is" << _inputFormat << "\n";
_audioInput = new QAudioInput(inputDeviceInfo, _inputFormat, this); _audioInput = new QAudioInput(inputDeviceInfo, _inputFormat, this);
_numInputCallbackBytes = BUFFER_LENGTH_BYTES_PER_CHANNEL * _inputFormat.channelCount() _numInputCallbackBytes = NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL * _inputFormat.channelCount()
* (_inputFormat.sampleRate() / SAMPLE_RATE) * (_inputFormat.sampleRate() / SAMPLE_RATE)
/ CALLBACK_ACCELERATOR_RATIO; / CALLBACK_ACCELERATOR_RATIO;
_audioInput->setBufferSize(_numInputCallbackBytes); _audioInput->setBufferSize(_numInputCallbackBytes);
@ -261,14 +260,11 @@ void Audio::start() {
if (adjustedFormatForAudioDevice(outputDeviceInfo, _desiredOutputFormat, _outputFormat)) { if (adjustedFormatForAudioDevice(outputDeviceInfo, _desiredOutputFormat, _outputFormat)) {
qDebug() << "The format to be used for audio output is" << _outputFormat << "\n"; qDebug() << "The format to be used for audio output is" << _outputFormat << "\n";
_inputRingBuffer.resizeForFrameSize(_numInputCallbackBytes * CALLBACK_ACCELERATOR_RATIO / sizeof(int16_t));
_inputDevice = _audioInput->start(); _inputDevice = _audioInput->start();
connect(_inputDevice, SIGNAL(readyRead()), SLOT(handleAudioInput())); connect(_inputDevice, SIGNAL(readyRead()), this, SLOT(handleAudioInput()));
_audioOutput = new QAudioOutput(outputDeviceInfo, _outputFormat, this); _audioOutput = new QAudioOutput(outputDeviceInfo, _outputFormat, this);
_numOutputCallbackBytes = BUFFER_LENGTH_BYTES_PER_CHANNEL * _outputFormat.channelCount()
* (_outputFormat.sampleRate() / SAMPLE_RATE)
/ CALLBACK_ACCELERATOR_RATIO;
_audioOutput->setBufferSize(_numOutputCallbackBytes);
_outputDevice = _audioOutput->start(); _outputDevice = _audioOutput->start();
gettimeofday(&_lastReceiveTime, NULL); gettimeofday(&_lastReceiveTime, NULL);
@ -281,50 +277,66 @@ void Audio::start() {
} }
void Audio::handleAudioInput() { void Audio::handleAudioInput() {
static char monoAudioDataPacket[MAX_PACKET_SIZE]; static char monoAudioDataPacket[MAX_PACKET_SIZE];
static int numBytesPacketHeader = numBytesForPacketHeader((unsigned char*) &PACKET_TYPE_MICROPHONE_AUDIO_NO_ECHO); static int numBytesPacketHeader = numBytesForPacketHeader((unsigned char*) &PACKET_TYPE_MICROPHONE_AUDIO_NO_ECHO);
static int leadingBytes = numBytesPacketHeader + sizeof(glm::vec3) + sizeof(glm::quat) + NUM_BYTES_RFC4122_UUID; static int leadingBytes = numBytesPacketHeader + sizeof(glm::vec3) + sizeof(glm::quat) + NUM_BYTES_RFC4122_UUID;
static int16_t* monoAudioSamples = (int16_t*) (monoAudioDataPacket + leadingBytes); static int16_t* monoAudioSamples = (int16_t*) (monoAudioDataPacket + leadingBytes);
static float inputToOutputRatio = _numOutputCallbackBytes / _numInputCallbackBytes; static float inputToNetworkInputRatio = _numInputCallbackBytes * CALLBACK_ACCELERATOR_RATIO
static float inputToNetworkInputRatio = _numInputCallbackBytes * CALLBACK_ACCELERATOR_RATIO / BUFFER_LENGTH_BYTES_PER_CHANNEL; / NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL;
static int inputSamplesRequired = NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL * inputToNetworkInputRatio;
QByteArray inputByteArray = _inputDevice->readAll(); QByteArray inputByteArray = _inputDevice->readAll();
int numResampledNetworkInputBytes = inputByteArray.size() / inputToNetworkInputRatio; _inputRingBuffer.writeData(inputByteArray.data(), inputByteArray.size());
int numResampledNetworkInputSamples = numResampledNetworkInputBytes / sizeof(int16_t);
// zero out the monoAudioSamples array while (_inputRingBuffer.samplesAvailable() > inputSamplesRequired) {
memset(monoAudioSamples, 0, numResampledNetworkInputBytes);
if (Menu::getInstance()->isOptionChecked(MenuOption::EchoLocalAudio) && !_muted) { int16_t inputAudioSamples[inputSamplesRequired];
_outputBuffer.resize(inputByteArray.size()); _inputRingBuffer.readSamples(inputAudioSamples, inputSamplesRequired);
// if local loopback enabled, copy input to output
linearResampling((int16_t*) inputByteArray.data(), (int16_t*) _outputBuffer.data(),
inputByteArray.size() / sizeof(int16_t),
inputByteArray.size() * inputToOutputRatio / sizeof(int16_t),
_inputFormat, _outputFormat);
} else {
_outputBuffer.fill(0, inputByteArray.size());
}
// add input data just written to the scope // zero out the monoAudioSamples array
// QMetaObject::invokeMethod(_scope, "addStereoSamples", Qt::QueuedConnection, memset(monoAudioSamples, 0, NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL);
// Q_ARG(QByteArray, inputByteArray), Q_ARG(bool, true));
// add procedural effects to the appropriate input samples if (!_muted) {
// addProceduralSounds(monoAudioSamples + (_isBufferSendCallback // we aren't muted, downsample the input audio
// ? BUFFER_LENGTH_SAMPLES_PER_CHANNEL / CALLBACK_ACCELERATOR_RATIO : 0), linearResampling((int16_t*) inputAudioSamples,
// (int16_t*) stereoOutputBuffer.data(), monoAudioSamples,
// BUFFER_LENGTH_SAMPLES_PER_CHANNEL / CALLBACK_ACCELERATOR_RATIO); inputSamplesRequired,
NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL,
_inputFormat, _desiredInputFormat);
NodeList* nodeList = NodeList::getInstance(); // add input data just written to the scope
Node* audioMixer = nodeList->soloNodeOfType(NODE_TYPE_AUDIO_MIXER); // QMetaObject::invokeMethod(_scope, "addStereoSamples", Qt::QueuedConnection,
// Q_ARG(QByteArray, inputByteArray), Q_ARG(bool, true));
}
if (false) { // if (Menu::getInstance()->isOptionChecked(MenuOption::EchoLocalAudio)) {
if (audioMixer->getActiveSocket()) { // // if local loopback enabled, copy input to output
// QByteArray samplesForOutput;
// samplesForOutput.resize(inputSamplesRequired * outputToInputRatio * sizeof(int16_t));
//
// linearResampling(monoAudioSamples, (int16_t*) samplesForOutput.data(),
// NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL,
// inputSamplesRequired,
// _desiredInputFormat, _outputFormat);
//
// _outputDevice->write(samplesForOutput);
// }
// add procedural effects to the appropriate input samples
// addProceduralSounds(monoAudioSamples + (_isBufferSendCallback
// ? BUFFER_LENGTH_SAMPLES_PER_CHANNEL / CALLBACK_ACCELERATOR_RATIO : 0),
// (int16_t*) stereoOutputBuffer.data(),
// BUFFER_LENGTH_SAMPLES_PER_CHANNEL / CALLBACK_ACCELERATOR_RATIO);
NodeList* nodeList = NodeList::getInstance();
Node* audioMixer = nodeList->soloNodeOfType(NODE_TYPE_AUDIO_MIXER);
if (audioMixer && nodeList->getNodeActiveSocketOrPing(audioMixer)) {
MyAvatar* interfaceAvatar = Application::getInstance()->getAvatar(); MyAvatar* interfaceAvatar = Application::getInstance()->getAvatar();
glm::vec3 headPosition = interfaceAvatar->getHeadJointPosition(); glm::vec3 headPosition = interfaceAvatar->getHeadJointPosition();
@ -334,7 +346,7 @@ void Audio::handleAudioInput() {
// + 12 for 3 floats for position + float for bearing + 1 attenuation byte // + 12 for 3 floats for position + float for bearing + 1 attenuation byte
PACKET_TYPE packetType = Menu::getInstance()->isOptionChecked(MenuOption::EchoServerAudio) PACKET_TYPE packetType = Menu::getInstance()->isOptionChecked(MenuOption::EchoServerAudio)
? PACKET_TYPE_MICROPHONE_AUDIO_WITH_ECHO : PACKET_TYPE_MICROPHONE_AUDIO_NO_ECHO; ? PACKET_TYPE_MICROPHONE_AUDIO_WITH_ECHO : PACKET_TYPE_MICROPHONE_AUDIO_NO_ECHO;
char* currentPacketPtr = monoAudioDataPacket + populateTypeAndVersion((unsigned char*) monoAudioDataPacket, char* currentPacketPtr = monoAudioDataPacket + populateTypeAndVersion((unsigned char*) monoAudioDataPacket,
packetType); packetType);
@ -357,94 +369,19 @@ void Audio::handleAudioInput() {
// loudness /= BUFFER_LENGTH_SAMPLES_PER_CHANNEL; // loudness /= BUFFER_LENGTH_SAMPLES_PER_CHANNEL;
_lastInputLoudness = loudness; _lastInputLoudness = loudness;
// we aren't muted - pull our input audio to send off to the mixer
linearResampling((int16_t*) inputByteArray.data(),
monoAudioSamples,
inputByteArray.size() / sizeof(int16_t),
numResampledNetworkInputSamples,
_inputFormat, _desiredInputFormat);
} else { } else {
_lastInputLoudness = 0; _lastInputLoudness = 0;
} }
nodeList->getNodeSocket().writeDatagram(monoAudioDataPacket, nodeList->getNodeSocket().writeDatagram(monoAudioDataPacket,
numResampledNetworkInputBytes + leadingBytes, NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL + leadingBytes,
audioMixer->getActiveSocket()->getAddress(), audioMixer->getActiveSocket()->getAddress(),
audioMixer->getActiveSocket()->getPort()); audioMixer->getActiveSocket()->getPort());
Application::getInstance()->getBandwidthMeter()->outputStream(BandwidthMeter::AUDIO) Application::getInstance()->getBandwidthMeter()->outputStream(BandwidthMeter::AUDIO)
.updateValue(BUFFER_LENGTH_BYTES_PER_CHANNEL + leadingBytes); .updateValue(NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL + leadingBytes);
} else {
nodeList->pingPublicAndLocalSocketsForInactiveNode(audioMixer);
} }
} }
if (_outputDevice) {
int numRequiredNetworkOutputSamples = numResampledNetworkInputSamples
* (_desiredOutputFormat.channelCount() / _desiredInputFormat.channelCount());
int numResampledOutputBytes = _inputBuffer.size() * inputToOutputRatio;
// linearResampling((int16_t*) inputByteArray.data(),
// monoAudioSamples,
// inputByteArray.size() / sizeof(int16_t),
// numResampledNetworkInputSamples,
// _inputFormat, _desiredInputFormat);
// copy the packet from the RB to the output
// linearResampling(monoAudioSamples,
// (int16_t*) _outputBuffer.data(),
// numResampledNetworkInputSamples,
// numResampledOutputBytes / sizeof(int16_t),
// _desiredInputFormat, _outputFormat);
// if there is anything in the ring buffer, decide what to do
if (false) {
if (!_ringBuffer.isNotStarvedOrHasMinimumSamples(numRequiredNetworkOutputSamples)) {
// starved and we don't have enough to start, keep waiting
qDebug() << "Buffer is starved and doesn't have enough samples to start. Held back.\n";
} else {
// We are either already playing back, or we have enough audio to start playing back.
if (_ringBuffer.isStarved()) {
_ringBuffer.setIsStarved(false);
}
int numResampledOutputBytes = inputByteArray.size() * inputToOutputRatio;
// copy the samples we'll resample from the ring buffer - this also
// pushes the read pointer of the ring buffer forwards
int16_t ringBufferSamples[numRequiredNetworkOutputSamples];
_ringBuffer.read(ringBufferSamples, numRequiredNetworkOutputSamples);
// copy the packet from the RB to the output
linearResampling(ringBufferSamples,
(int16_t*) _outputBuffer.data(),
numRequiredNetworkOutputSamples,
numResampledOutputBytes / sizeof(int16_t),
_desiredOutputFormat, _outputFormat);
}
} else if (_audioOutput->bytesFree() == _audioOutput->bufferSize()) {
// we don't have any audio data left in the output buffer, and the ring buffer from
// the network has nothing in it either - we just starved
_ringBuffer.setIsStarved(true);
_numFramesDisplayStarve = 10;
}
// add output (@speakers) data just written to the scope
// QMetaObject::invokeMethod(_scope, "addStereoSamples", Qt::QueuedConnection,
// Q_ARG(QByteArray, stereoOutputBuffer), Q_ARG(bool, false));
_outputDevice->write(_outputBuffer);
}
gettimeofday(&_lastCallbackTime, NULL);
} }
void Audio::addReceivedAudioToBuffer(const QByteArray& audioByteArray) { void Audio::addReceivedAudioToBuffer(const QByteArray& audioByteArray) {
@ -466,7 +403,7 @@ void Audio::addReceivedAudioToBuffer(const QByteArray& audioByteArray) {
_measuredJitter = _stdev.getStDev(); _measuredJitter = _stdev.getStDev();
_stdev.reset(); _stdev.reset();
// Set jitter buffer to be a multiple of the measured standard deviation // Set jitter buffer to be a multiple of the measured standard deviation
const int MAX_JITTER_BUFFER_SAMPLES = RING_BUFFER_LENGTH_SAMPLES / 2; const int MAX_JITTER_BUFFER_SAMPLES = _ringBuffer.getSampleCapacity() / 2;
const float NUM_STANDARD_DEVIATIONS = 3.f; const float NUM_STANDARD_DEVIATIONS = 3.f;
if (Menu::getInstance()->getAudioJitterBufferSamples() == 0) { if (Menu::getInstance()->getAudioJitterBufferSamples() == 0) {
float newJitterBufferSamples = (NUM_STANDARD_DEVIATIONS * _measuredJitter) / 1000.f * SAMPLE_RATE; float newJitterBufferSamples = (NUM_STANDARD_DEVIATIONS * _measuredJitter) / 1000.f * SAMPLE_RATE;
@ -476,8 +413,70 @@ void Audio::addReceivedAudioToBuffer(const QByteArray& audioByteArray) {
_ringBuffer.parseData((unsigned char*) audioByteArray.data(), audioByteArray.size()); _ringBuffer.parseData((unsigned char*) audioByteArray.data(), audioByteArray.size());
Application::getInstance()->getBandwidthMeter()->inputStream(BandwidthMeter::AUDIO).updateValue(PACKET_LENGTH_BYTES static float networkOutputToOutputRatio = (_desiredOutputFormat.sampleRate() / (float) _outputFormat.sampleRate())
+ sizeof(PACKET_TYPE)); * (_desiredOutputFormat.channelCount() / (float) _outputFormat.channelCount());
static int numRequiredOutputSamples = NETWORK_BUFFER_LENGTH_SAMPLES_STEREO / networkOutputToOutputRatio;
int16_t outputBuffer[numRequiredOutputSamples];
// linearResampling((int16_t*) inputByteArray.data(),
// monoAudioSamples,
// inputByteArray.size() / sizeof(int16_t),
// numResampledNetworkInputSamples,
// _inputFormat, _desiredInputFormat);
// copy the packet from the RB to the output
// linearResampling(monoAudioSamples,
// (int16_t*) _outputBuffer.data(),
// numResampledNetworkInputSamples,
// numResampledOutputBytes / sizeof(int16_t),
// _desiredInputFormat, _outputFormat);
// if there is anything in the ring buffer, decide what to do
if (_ringBuffer.samplesAvailable() > 0) {
if (!_ringBuffer.isNotStarvedOrHasMinimumSamples(NETWORK_BUFFER_LENGTH_SAMPLES_STEREO
+ (_jitterBufferSamples * 2))) {
// starved and we don't have enough to start, keep waiting
qDebug() << "Buffer is starved and doesn't have enough samples to start. Held back.\n";
} else {
// We are either already playing back, or we have enough audio to start playing back.
if (_ringBuffer.isStarved()) {
_ringBuffer.setIsStarved(false);
}
// copy the samples we'll resample from the ring buffer - this also
// pushes the read pointer of the ring buffer forwards
int16_t ringBufferSamples[NETWORK_BUFFER_LENGTH_SAMPLES_STEREO];
_ringBuffer.readSamples(ringBufferSamples, NETWORK_BUFFER_LENGTH_SAMPLES_STEREO);
// copy the packet from the RB to the output
linearResampling(ringBufferSamples,
outputBuffer,
NETWORK_BUFFER_LENGTH_SAMPLES_STEREO,
numRequiredOutputSamples,
_desiredOutputFormat, _outputFormat);
if (_outputDevice) {
_outputDevice->write((char*) outputBuffer, numRequiredOutputSamples * sizeof(int16_t));
}
}
} else if (_audioOutput->bytesFree() == _audioOutput->bufferSize()) {
// we don't have any audio data left in the output buffer, and the ring buffer from
// the network has nothing in it either - we just starved
_ringBuffer.setIsStarved(true);
_numFramesDisplayStarve = 10;
}
// add output (@speakers) data just written to the scope
// QMetaObject::invokeMethod(_scope, "addStereoSamples", Qt::QueuedConnection,
// Q_ARG(QByteArray, stereoOutputBuffer), Q_ARG(bool, false));
Application::getInstance()->getBandwidthMeter()->inputStream(BandwidthMeter::AUDIO).updateValue(audioByteArray.size());
_lastReceiveTime = currentReceiveTime; _lastReceiveTime = currentReceiveTime;
} }
@ -508,7 +507,7 @@ void Audio::render(int screenWidth, int screenHeight) {
glVertex2f(currentX, topY); glVertex2f(currentX, topY);
glVertex2f(currentX, bottomY); glVertex2f(currentX, bottomY);
for (int i = 0; i < RING_BUFFER_LENGTH_FRAMES / 2; i++) { for (int i = 0; i < _ringBuffer.getSampleCapacity() / 2; i++) {
glVertex2f(currentX, halfY); glVertex2f(currentX, halfY);
glVertex2f(currentX + frameWidth, halfY); glVertex2f(currentX + frameWidth, halfY);
currentX += frameWidth; currentX += frameWidth;

View file

@ -70,6 +70,7 @@ public slots:
void reset(); void reset();
private: private:
QByteArray firstInputFrame;
QAudioInput* _audioInput; QAudioInput* _audioInput;
QAudioFormat _desiredInputFormat; QAudioFormat _desiredInputFormat;
QAudioFormat _inputFormat; QAudioFormat _inputFormat;
@ -80,9 +81,8 @@ private:
QAudioFormat _desiredOutputFormat; QAudioFormat _desiredOutputFormat;
QAudioFormat _outputFormat; QAudioFormat _outputFormat;
QIODevice* _outputDevice; QIODevice* _outputDevice;
QByteArray _outputBuffer;
int _numOutputCallbackBytes; int _numOutputCallbackBytes;
int16_t* _nextOutputSamples; AudioRingBuffer _inputRingBuffer;
AudioRingBuffer _ringBuffer; AudioRingBuffer _ringBuffer;
Oscilloscope* _scope; Oscilloscope* _scope;
StDev _stdev; StDev _stdev;

View file

@ -15,13 +15,23 @@
#include "AudioRingBuffer.h" #include "AudioRingBuffer.h"
AudioRingBuffer::AudioRingBuffer(bool isStereo) : const short RING_BUFFER_LENGTH_FRAMES = 10;
AudioRingBuffer::AudioRingBuffer(int numFrameSamples) :
NodeData(NULL), NodeData(NULL),
_endOfLastWrite(NULL), _sampleCapacity(numFrameSamples * RING_BUFFER_LENGTH_FRAMES),
_isStarved(true) _isStarved(true),
_hasStarted(false)
{ {
_buffer = new int16_t[RING_BUFFER_LENGTH_SAMPLES]; if (numFrameSamples) {
_nextOutput = _buffer; _buffer = new int16_t[_sampleCapacity];
_nextOutput = _buffer;
_endOfLastWrite = _buffer;
} else {
_buffer = NULL;
_nextOutput = NULL;
_endOfLastWrite = NULL;
}
}; };
AudioRingBuffer::~AudioRingBuffer() { AudioRingBuffer::~AudioRingBuffer() {
@ -34,20 +44,64 @@ void AudioRingBuffer::reset() {
_isStarved = true; _isStarved = true;
} }
int AudioRingBuffer::parseData(unsigned char* sourceBuffer, int numBytes) { void AudioRingBuffer::resizeForFrameSize(qint64 numFrameSamples) {
int numBytesPacketHeader = numBytesForPacketHeader(sourceBuffer); delete[] _buffer;
return parseAudioSamples(sourceBuffer + numBytesPacketHeader, numBytes - numBytesPacketHeader); _sampleCapacity = numFrameSamples * RING_BUFFER_LENGTH_FRAMES;
_buffer = new int16_t[_sampleCapacity];
_nextOutput = _buffer;
_endOfLastWrite = _buffer;
} }
int AudioRingBuffer::parseAudioSamples(unsigned char* sourceBuffer, int numBytes) { int AudioRingBuffer::parseData(unsigned char* sourceBuffer, int numBytes) {
int numBytesPacketHeader = numBytesForPacketHeader(sourceBuffer);
return writeData((char*) sourceBuffer + numBytesPacketHeader, numBytes - numBytesPacketHeader);
}
qint64 AudioRingBuffer::readSamples(int16_t* destination, qint64 maxSamples) {
return readData((char*) destination, maxSamples * sizeof(int16_t));
}
qint64 AudioRingBuffer::readData(char *data, qint64 maxSize) {
// only copy up to the number of samples we have available
int numReadSamples = std::min((unsigned) (maxSize / sizeof(int16_t)), samplesAvailable());
if (_nextOutput + numReadSamples > _buffer + _sampleCapacity) {
// we're going to need to do two reads to get this data, it wraps around the edge
// read to the end of the buffer
int numSamplesToEnd = (_buffer + _sampleCapacity) - _nextOutput;
memcpy(data, _nextOutput, numSamplesToEnd * sizeof(int16_t));
// read the rest from the beginning of the buffer
memcpy(data + numSamplesToEnd, _buffer, (numReadSamples - numSamplesToEnd) * sizeof(int16_t));
} else {
// read the data
memcpy(data, _nextOutput, numReadSamples * sizeof(int16_t));
}
// push the position of _nextOutput by the number of samples read
_nextOutput = shiftedPositionAccomodatingWrap(_nextOutput, numReadSamples);
return numReadSamples * sizeof(int16_t);
}
qint64 AudioRingBuffer::writeSamples(const int16_t* source, qint64 maxSamples) {
return writeData((const char*) source, maxSamples * sizeof(int16_t));
}
qint64 AudioRingBuffer::writeData(const char* data, qint64 maxSize) {
// make sure we have enough bytes left for this to be the right amount of audio // make sure we have enough bytes left for this to be the right amount of audio
// otherwise we should not copy that data, and leave the buffer pointers where they are // otherwise we should not copy that data, and leave the buffer pointers where they are
int samplesToCopy = numBytes / sizeof(int16_t); int samplesToCopy = std::min(maxSize / sizeof(int16_t), (quint64) _sampleCapacity);
if (!_endOfLastWrite) { std::less<int16_t*> less;
_endOfLastWrite = _buffer; std::less_equal<int16_t*> lessEqual;
} else if (samplesToCopy > RING_BUFFER_LENGTH_SAMPLES - samplesAvailable()) {
if (_hasStarted
&& (less(_endOfLastWrite, _nextOutput)
&& lessEqual(_nextOutput, shiftedPositionAccomodatingWrap(_endOfLastWrite, samplesToCopy)))) {
// this read will cross the next output, so call us starved and reset the buffer // this read will cross the next output, so call us starved and reset the buffer
qDebug() << "Filled the ring buffer. Resetting.\n"; qDebug() << "Filled the ring buffer. Resetting.\n";
_endOfLastWrite = _buffer; _endOfLastWrite = _buffer;
@ -55,49 +109,28 @@ int AudioRingBuffer::parseAudioSamples(unsigned char* sourceBuffer, int numBytes
_isStarved = true; _isStarved = true;
} }
if (_endOfLastWrite + samplesToCopy <= _buffer + RING_BUFFER_LENGTH_SAMPLES) { _hasStarted = true;
memcpy(_endOfLastWrite, sourceBuffer, numBytes);
if (_endOfLastWrite + samplesToCopy <= _buffer + _sampleCapacity) {
memcpy(_endOfLastWrite, data, samplesToCopy * sizeof(int16_t));
} else { } else {
int numSamplesToEnd = (_buffer + RING_BUFFER_LENGTH_SAMPLES) - _endOfLastWrite; int numSamplesToEnd = (_buffer + _sampleCapacity) - _endOfLastWrite;
memcpy(_endOfLastWrite, sourceBuffer, numSamplesToEnd * sizeof(int16_t)); memcpy(_endOfLastWrite, data, numSamplesToEnd * sizeof(int16_t));
memcpy(_buffer, sourceBuffer + (numSamplesToEnd * sizeof(int16_t)), (samplesToCopy - numSamplesToEnd) * sizeof(int16_t)); memcpy(_buffer, data + (numSamplesToEnd * sizeof(int16_t)), (samplesToCopy - numSamplesToEnd) * sizeof(int16_t));
} }
_endOfLastWrite = shiftedPositionAccomodatingWrap(_endOfLastWrite, samplesToCopy); _endOfLastWrite = shiftedPositionAccomodatingWrap(_endOfLastWrite, samplesToCopy);
return numBytes; return samplesToCopy * sizeof(int16_t);
} }
int16_t& AudioRingBuffer::operator[](const int index) { int16_t& AudioRingBuffer::operator[](const int index) {
// make sure this is a valid index // make sure this is a valid index
assert(index > -RING_BUFFER_LENGTH_SAMPLES && index < RING_BUFFER_LENGTH_SAMPLES); assert(index > -_sampleCapacity && index < _sampleCapacity);
return *shiftedPositionAccomodatingWrap(_nextOutput, index); return *shiftedPositionAccomodatingWrap(_nextOutput, index);
} }
void AudioRingBuffer::read(int16_t* destination, unsigned int maxSamples) {
// only copy up to the number of samples we have available
int numReadSamples = std::min(maxSamples, samplesAvailable());
if (_nextOutput + numReadSamples > _buffer + RING_BUFFER_LENGTH_SAMPLES) {
// we're going to need to do two reads to get this data, it wraps around the edge
// read to the end of the buffer
int numSamplesToEnd = (_buffer + RING_BUFFER_LENGTH_SAMPLES) - _nextOutput;
memcpy(destination, _nextOutput, numSamplesToEnd * sizeof(int16_t));
// read the rest from the beginning of the buffer
memcpy(destination + numSamplesToEnd, _buffer, (numReadSamples - numSamplesToEnd) * sizeof(int16_t));
} else {
// read the data
memcpy(destination, _nextOutput, numReadSamples * sizeof(int16_t));
}
// push the position of _nextOutput by the number of samples read
_nextOutput = shiftedPositionAccomodatingWrap(_nextOutput, numReadSamples);
}
void AudioRingBuffer::shiftReadPosition(unsigned int numSamples) { void AudioRingBuffer::shiftReadPosition(unsigned int numSamples) {
_nextOutput = shiftedPositionAccomodatingWrap(_nextOutput, numSamples); _nextOutput = shiftedPositionAccomodatingWrap(_nextOutput, numSamples);
} }
@ -109,7 +142,7 @@ unsigned int AudioRingBuffer::samplesAvailable() const {
int sampleDifference = _endOfLastWrite - _nextOutput; int sampleDifference = _endOfLastWrite - _nextOutput;
if (sampleDifference < 0) { if (sampleDifference < 0) {
sampleDifference += RING_BUFFER_LENGTH_SAMPLES; sampleDifference += _sampleCapacity;
} }
return sampleDifference; return sampleDifference;
@ -126,12 +159,12 @@ bool AudioRingBuffer::isNotStarvedOrHasMinimumSamples(unsigned int numRequiredSa
int16_t* AudioRingBuffer::shiftedPositionAccomodatingWrap(int16_t* position, int numSamplesShift) const { int16_t* AudioRingBuffer::shiftedPositionAccomodatingWrap(int16_t* position, int numSamplesShift) const {
if (numSamplesShift > 0 && position + numSamplesShift >= _buffer + RING_BUFFER_LENGTH_SAMPLES) { if (numSamplesShift > 0 && position + numSamplesShift >= _buffer + _sampleCapacity) {
// this shift will wrap the position around to the beginning of the ring // this shift will wrap the position around to the beginning of the ring
return position + numSamplesShift - RING_BUFFER_LENGTH_SAMPLES; return position + numSamplesShift - _sampleCapacity;
} else if (numSamplesShift < 0 && position + numSamplesShift < _buffer) { } else if (numSamplesShift < 0 && position + numSamplesShift < _buffer) {
// this shift will go around to the end of the ring // this shift will go around to the end of the ring
return position + numSamplesShift - RING_BUFFER_LENGTH_SAMPLES; return position + numSamplesShift - _sampleCapacity;
} else { } else {
return position + numSamplesShift; return position + numSamplesShift;
} }

View file

@ -10,35 +10,41 @@
#define __interface__AudioRingBuffer__ #define __interface__AudioRingBuffer__
#include <stdint.h> #include <stdint.h>
#include <map>
#include <glm/glm.hpp> #include <glm/glm.hpp>
#include <QtCore/QIODevice>
#include "NodeData.h" #include "NodeData.h"
const int SAMPLE_RATE = 24000; const int SAMPLE_RATE = 24000;
const int BUFFER_LENGTH_BYTES_STEREO = 1024; const int NETWORK_BUFFER_LENGTH_BYTES_STEREO = 1024;
const int BUFFER_LENGTH_BYTES_PER_CHANNEL = 512; const int NETWORK_BUFFER_LENGTH_SAMPLES_STEREO = NETWORK_BUFFER_LENGTH_BYTES_STEREO / sizeof(int16_t);
const int BUFFER_LENGTH_SAMPLES_PER_CHANNEL = BUFFER_LENGTH_BYTES_PER_CHANNEL / sizeof(int16_t); const int NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL = 512;
const int NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL = NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL / sizeof(int16_t);
const short RING_BUFFER_LENGTH_FRAMES = 20;
const short RING_BUFFER_LENGTH_SAMPLES = RING_BUFFER_LENGTH_FRAMES * BUFFER_LENGTH_SAMPLES_PER_CHANNEL;
class AudioRingBuffer : public NodeData { class AudioRingBuffer : public NodeData {
Q_OBJECT
public: public:
AudioRingBuffer(bool isStereo); AudioRingBuffer(int numFrameSamples);
~AudioRingBuffer(); ~AudioRingBuffer();
void reset(); void reset();
void resizeForFrameSize(qint64 numFrameSamples);
int getSampleCapacity() const { return _sampleCapacity; }
int parseData(unsigned char* sourceBuffer, int numBytes); int parseData(unsigned char* sourceBuffer, int numBytes);
int parseAudioSamples(unsigned char* sourceBuffer, int numBytes);
qint64 readSamples(int16_t* destination, qint64 maxSamples);
qint64 writeSamples(const int16_t* source, qint64 maxSamples);
qint64 readData(char* data, qint64 maxSize);
qint64 writeData(const char* data, qint64 maxSize);
int16_t& operator[](const int index); int16_t& operator[](const int index);
void read(int16_t* destination, unsigned int numSamples);
void shiftReadPosition(unsigned int numSamples); void shiftReadPosition(unsigned int numSamples);
unsigned int samplesAvailable() const; unsigned int samplesAvailable() const;
@ -54,10 +60,12 @@ protected:
int16_t* shiftedPositionAccomodatingWrap(int16_t* position, int numSamplesShift) const; int16_t* shiftedPositionAccomodatingWrap(int16_t* position, int numSamplesShift) const;
int _sampleCapacity;
int16_t* _nextOutput; int16_t* _nextOutput;
int16_t* _endOfLastWrite; int16_t* _endOfLastWrite;
int16_t* _buffer; int16_t* _buffer;
bool _isStarved; bool _isStarved;
bool _hasStarted;
}; };
#endif /* defined(__interface__AudioRingBuffer__) */ #endif /* defined(__interface__AudioRingBuffer__) */

View file

@ -42,7 +42,7 @@ int InjectedAudioRingBuffer::parseData(unsigned char* sourceBuffer, int numBytes
unsigned int attenuationByte = *(currentBuffer++); unsigned int attenuationByte = *(currentBuffer++);
_attenuationRatio = attenuationByte / (float) MAX_INJECTOR_VOLUME; _attenuationRatio = attenuationByte / (float) MAX_INJECTOR_VOLUME;
currentBuffer += parseAudioSamples(currentBuffer, numBytes - (currentBuffer - sourceBuffer)); currentBuffer += writeData((char*) currentBuffer, numBytes - (currentBuffer - sourceBuffer));
return currentBuffer - sourceBuffer; return currentBuffer - sourceBuffer;
} }

View file

@ -15,7 +15,7 @@
#include "PositionalAudioRingBuffer.h" #include "PositionalAudioRingBuffer.h"
PositionalAudioRingBuffer::PositionalAudioRingBuffer(PositionalAudioRingBuffer::Type type) : PositionalAudioRingBuffer::PositionalAudioRingBuffer(PositionalAudioRingBuffer::Type type) :
AudioRingBuffer(false), AudioRingBuffer(NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL),
_type(type), _type(type),
_position(0.0f, 0.0f, 0.0f), _position(0.0f, 0.0f, 0.0f),
_orientation(0.0f, 0.0f, 0.0f, 0.0f), _orientation(0.0f, 0.0f, 0.0f, 0.0f),
@ -31,7 +31,7 @@ int PositionalAudioRingBuffer::parseData(unsigned char* sourceBuffer, int numByt
unsigned char* currentBuffer = sourceBuffer + numBytesForPacketHeader(sourceBuffer); unsigned char* currentBuffer = sourceBuffer + numBytesForPacketHeader(sourceBuffer);
currentBuffer += NUM_BYTES_RFC4122_UUID; // the source UUID currentBuffer += NUM_BYTES_RFC4122_UUID; // the source UUID
currentBuffer += parsePositionalData(currentBuffer, numBytes - (currentBuffer - sourceBuffer)); currentBuffer += parsePositionalData(currentBuffer, numBytes - (currentBuffer - sourceBuffer));
currentBuffer += parseAudioSamples(currentBuffer, numBytes - (currentBuffer - sourceBuffer)); currentBuffer += writeData((char*) currentBuffer, numBytes - (currentBuffer - sourceBuffer));
return currentBuffer - sourceBuffer; return currentBuffer - sourceBuffer;
} }
@ -47,8 +47,7 @@ int PositionalAudioRingBuffer::parsePositionalData(unsigned char* sourceBuffer,
// if this node sent us a NaN for first float in orientation then don't consider this good audio and bail // if this node sent us a NaN for first float in orientation then don't consider this good audio and bail
if (std::isnan(_orientation.x)) { if (std::isnan(_orientation.x)) {
_endOfLastWrite = _nextOutput = _buffer; reset();
_isStarved = true;
return 0; return 0;
} }
@ -56,19 +55,17 @@ int PositionalAudioRingBuffer::parsePositionalData(unsigned char* sourceBuffer,
} }
bool PositionalAudioRingBuffer::shouldBeAddedToMix(int numJitterBufferSamples) { bool PositionalAudioRingBuffer::shouldBeAddedToMix(int numJitterBufferSamples) {
if (_endOfLastWrite) { if (!isNotStarvedOrHasMinimumSamples(NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL + numJitterBufferSamples)) {
if (!isNotStarvedOrHasMinimumSamples(BUFFER_LENGTH_SAMPLES_PER_CHANNEL + numJitterBufferSamples)) { qDebug() << "Starved and do not have minimum samples to start. Buffer held back.\n";
qDebug() << "Starved and do not have minimum samples to start. Buffer held back.\n"; return false;
return false; } else if (samplesAvailable() < NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL) {
} else if (samplesAvailable() < BUFFER_LENGTH_SAMPLES_PER_CHANNEL) { qDebug() << "Do not have number of samples needed for interval. Buffer starved.\n";
qDebug() << "Do not have number of samples needed for interval. Buffer starved.\n"; _isStarved = true;
_isStarved = true; return false;
return false; } else {
} else { // good buffer, add this to the mix
// good buffer, add this to the mix _isStarved = false;
_isStarved = false; return true;
return true;
}
} }
return false; return false;