mirror of
https://github.com/lubosz/overte.git
synced 2025-08-07 20:06:02 +02:00
cleanup AudioRingBuffer API
This commit is contained in:
parent
d5aadf6598
commit
f17ee1af7a
6 changed files with 112 additions and 91 deletions
|
@ -160,27 +160,21 @@ void AudioMixer::addBufferToMixForListeningNodeWithBuffer(PositionalAudioRingBuf
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
int16_t* sourceBuffer = bufferToAdd->getNextOutput();
|
|
||||||
|
|
||||||
// if the bearing relative angle to source is > 0 then the delayed channel is the right one
|
// if the bearing relative angle to source is > 0 then the delayed channel is the right one
|
||||||
int delayedChannelOffset = (bearingRelativeAngleToSource > 0.0f) ? 1 : 0;
|
int delayedChannelOffset = (bearingRelativeAngleToSource > 0.0f) ? 1 : 0;
|
||||||
int goodChannelOffset = delayedChannelOffset == 0 ? 1 : 0;
|
int goodChannelOffset = delayedChannelOffset == 0 ? 1 : 0;
|
||||||
|
|
||||||
int16_t* delaySamplePointer = bufferToAdd->getNextOutput() == bufferToAdd->getBuffer()
|
|
||||||
? bufferToAdd->getBuffer() + RING_BUFFER_LENGTH_SAMPLES - numSamplesDelay
|
|
||||||
: bufferToAdd->getNextOutput() - numSamplesDelay;
|
|
||||||
|
|
||||||
for (int s = 0; s < BUFFER_LENGTH_SAMPLES_PER_CHANNEL * 2; s += 2) {
|
for (int s = 0; s < BUFFER_LENGTH_SAMPLES_PER_CHANNEL * 2; s += 2) {
|
||||||
if (s < numSamplesDelay) {
|
if (s < numSamplesDelay) {
|
||||||
// pull the earlier sample for the delayed channel
|
// pull the earlier sample for the delayed channel
|
||||||
int earlierSample = delaySamplePointer[s / 2] * attenuationCoefficient * weakChannelAmplitudeRatio;
|
int earlierSample = (*bufferToAdd)[(s / 2) - numSamplesDelay] * attenuationCoefficient * weakChannelAmplitudeRatio;
|
||||||
|
|
||||||
_clientSamples[s + delayedChannelOffset] = glm::clamp(_clientSamples[s + delayedChannelOffset] + earlierSample,
|
_clientSamples[s + delayedChannelOffset] = glm::clamp(_clientSamples[s + delayedChannelOffset] + earlierSample,
|
||||||
MIN_SAMPLE_VALUE, MAX_SAMPLE_VALUE);
|
MIN_SAMPLE_VALUE, MAX_SAMPLE_VALUE);
|
||||||
}
|
}
|
||||||
|
|
||||||
// pull the current sample for the good channel
|
// pull the current sample for the good channel
|
||||||
int16_t currentSample = sourceBuffer[s / 2] * attenuationCoefficient;
|
int16_t currentSample = (*bufferToAdd)[s / 2] * attenuationCoefficient;
|
||||||
_clientSamples[s + goodChannelOffset] = glm::clamp(_clientSamples[s + goodChannelOffset] + currentSample,
|
_clientSamples[s + goodChannelOffset] = glm::clamp(_clientSamples[s + goodChannelOffset] + currentSample,
|
||||||
MIN_SAMPLE_VALUE, MAX_SAMPLE_VALUE);
|
MIN_SAMPLE_VALUE, MAX_SAMPLE_VALUE);
|
||||||
|
|
||||||
|
|
|
@ -90,17 +90,15 @@ void AudioMixerClientData::pushBuffersAfterFrameSend() {
|
||||||
// this was a used buffer, push the output pointer forwards
|
// this was a used buffer, push the output pointer forwards
|
||||||
PositionalAudioRingBuffer* audioBuffer = _ringBuffers[i];
|
PositionalAudioRingBuffer* audioBuffer = _ringBuffers[i];
|
||||||
|
|
||||||
if (audioBuffer->willBeAddedToMix()) {
|
if (audioBuffer->willBeAddedToMix()) {
|
||||||
audioBuffer->setNextOutput(audioBuffer->getNextOutput() + BUFFER_LENGTH_SAMPLES_PER_CHANNEL);
|
audioBuffer->shiftReadPosition(BUFFER_LENGTH_SAMPLES_PER_CHANNEL);
|
||||||
|
|
||||||
if (audioBuffer->getNextOutput() >= audioBuffer->getBuffer() + RING_BUFFER_LENGTH_SAMPLES) {
|
|
||||||
audioBuffer->setNextOutput(audioBuffer->getBuffer());
|
|
||||||
}
|
|
||||||
|
|
||||||
audioBuffer->setWillBeAddedToMix(false);
|
audioBuffer->setWillBeAddedToMix(false);
|
||||||
} else if (audioBuffer->hasStarted() && audioBuffer->isStarved()) {
|
} else if (audioBuffer->isStarved()) {
|
||||||
delete audioBuffer;
|
// this was previously the kill for injected audio from a client
|
||||||
_ringBuffers.erase(_ringBuffers.begin() + i);
|
// fix when that is added back
|
||||||
|
// delete audioBuffer;
|
||||||
|
// _ringBuffers.erase(_ringBuffers.begin() + i);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -388,50 +388,43 @@ void Audio::handleAudioInput() {
|
||||||
}
|
}
|
||||||
|
|
||||||
if (_outputDevice) {
|
if (_outputDevice) {
|
||||||
// if there is anything in the ring buffer, decide what to do
|
|
||||||
|
|
||||||
if (_ringBuffer.getEndOfLastWrite()) {
|
// if there is anything in the ring buffer, decide what to do
|
||||||
if (_ringBuffer.isStarved() && _ringBuffer.diffLastWriteNextOutput() <
|
if (_ringBuffer.samplesAvailable() > 0) {
|
||||||
((_outputBuffer.size() / sizeof(int16_t)) + _jitterBufferSamples * (_ringBuffer.isStereo() ? 2 : 1))) {
|
|
||||||
// If not enough audio has arrived to start playback, keep waiting
|
int numRequiredNetworkOutputBytes = numResampledNetworkInputBytes
|
||||||
} else if (!_ringBuffer.isStarved() && _ringBuffer.diffLastWriteNextOutput() == 0) {
|
* (_desiredOutputFormat.channelCount() / _desiredInputFormat.channelCount());
|
||||||
// If we have started and now have run out of audio to send to the audio device,
|
int numRequiredNetworkOutputSamples = numRequiredNetworkOutputBytes / sizeof(int16_t);
|
||||||
// this means we've starved and should restart.
|
|
||||||
_ringBuffer.setIsStarved(true);
|
if (!_ringBuffer.isNotStarvedOrHasMinimumSamples(numRequiredNetworkOutputSamples)) {
|
||||||
|
// starved and we don't have enough to start, keep waiting
|
||||||
// show a starve in the GUI for 10 frames
|
qDebug() << "Buffer is starved and doesn't have enough samples to start. Held back.\n";
|
||||||
_numFramesDisplayStarve = 10;
|
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
// We are either already playing back, or we have enough audio to start playing back.
|
// We are either already playing back, or we have enough audio to start playing back.
|
||||||
if (_ringBuffer.isStarved()) {
|
if (_ringBuffer.isStarved()) {
|
||||||
_ringBuffer.setIsStarved(false);
|
_ringBuffer.setIsStarved(false);
|
||||||
_ringBuffer.setHasStarted(true);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int numRequiredNetworkOutputBytes = numResampledNetworkInputBytes * 2;
|
|
||||||
int numRequiredNetworkOutputSamples = numRequiredNetworkOutputBytes / sizeof(int16_t);
|
|
||||||
|
|
||||||
int numResampledOutputBytes = inputByteArray.size() * inputToOutputRatio;
|
int numResampledOutputBytes = inputByteArray.size() * inputToOutputRatio;
|
||||||
|
|
||||||
if (_ringBuffer.getNextOutput() + numRequiredNetworkOutputSamples
|
// copy the samples we'll resample from the ring buffer - this also
|
||||||
> _ringBuffer.getBuffer() + RING_BUFFER_LENGTH_SAMPLES) {
|
// pushes the read pointer of the ring buffer forwards
|
||||||
numRequiredNetworkOutputSamples = (_ringBuffer.getBuffer() + RING_BUFFER_LENGTH_SAMPLES) - _ringBuffer.getNextOutput();
|
int16_t ringBufferSamples[numRequiredNetworkOutputSamples];
|
||||||
}
|
_ringBuffer.read(ringBufferSamples, numRequiredNetworkOutputSamples);
|
||||||
|
|
||||||
// copy the packet from the RB to the output
|
// copy the packet from the RB to the output
|
||||||
linearResampling(_ringBuffer.getNextOutput(),
|
linearResampling(ringBufferSamples,
|
||||||
(int16_t*) _outputBuffer.data(),
|
(int16_t*) _outputBuffer.data(),
|
||||||
numRequiredNetworkOutputSamples,
|
numRequiredNetworkOutputSamples,
|
||||||
numResampledOutputBytes / sizeof(int16_t),
|
numResampledOutputBytes / sizeof(int16_t),
|
||||||
_desiredOutputFormat, _outputFormat);
|
_desiredOutputFormat, _outputFormat);
|
||||||
|
|
||||||
_ringBuffer.setNextOutput(_ringBuffer.getNextOutput() + numRequiredNetworkOutputSamples);
|
|
||||||
|
|
||||||
if (_ringBuffer.getNextOutput() >= _ringBuffer.getBuffer() + RING_BUFFER_LENGTH_SAMPLES) {
|
|
||||||
_ringBuffer.setNextOutput(_ringBuffer.getBuffer());
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
} else if (_audioOutput->bytesFree() == _audioOutput->bufferSize()) {
|
||||||
|
// we don't have any audio data left in the output buffer, and the ring buffer from
|
||||||
|
// the network has nothing in it either - we just starved
|
||||||
|
_ringBuffer.setIsStarved(true);
|
||||||
|
_numFramesDisplayStarve = 10;
|
||||||
}
|
}
|
||||||
|
|
||||||
// add output (@speakers) data just written to the scope
|
// add output (@speakers) data just written to the scope
|
||||||
|
@ -471,18 +464,6 @@ void Audio::addReceivedAudioToBuffer(const QByteArray& audioByteArray) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// if (_ringBuffer.diffLastWriteNextOutput() + PACKET_LENGTH_SAMPLES >
|
|
||||||
// PACKET_LENGTH_SAMPLES + (ceilf((float) (_jitterBufferSamples * 2) / PACKET_LENGTH_SAMPLES) * PACKET_LENGTH_SAMPLES)) {
|
|
||||||
// // this packet would give us more than the required amount for play out
|
|
||||||
// // discard the first packet in the buffer
|
|
||||||
//
|
|
||||||
// _ringBuffer.setNextOutput(_ringBuffer.getNextOutput() + PACKET_LENGTH_SAMPLES);
|
|
||||||
//
|
|
||||||
// if (_ringBuffer.getNextOutput() >= _ringBuffer.getBuffer() + RING_BUFFER_LENGTH_SAMPLES) {
|
|
||||||
// _ringBuffer.setNextOutput(_ringBuffer.getBuffer());
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
|
|
||||||
_ringBuffer.parseData((unsigned char*) audioByteArray.data(), audioByteArray.size());
|
_ringBuffer.parseData((unsigned char*) audioByteArray.data(), audioByteArray.size());
|
||||||
|
|
||||||
Application::getInstance()->getBandwidthMeter()->inputStream(BandwidthMeter::AUDIO).updateValue(PACKET_LENGTH_BYTES
|
Application::getInstance()->getBandwidthMeter()->inputStream(BandwidthMeter::AUDIO).updateValue(PACKET_LENGTH_BYTES
|
||||||
|
@ -536,8 +517,7 @@ void Audio::render(int screenWidth, int screenHeight) {
|
||||||
timeLeftInCurrentBuffer = AUDIO_CALLBACK_MSECS - diffclock(&_lastCallbackTime, ¤tTime);
|
timeLeftInCurrentBuffer = AUDIO_CALLBACK_MSECS - diffclock(&_lastCallbackTime, ¤tTime);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (_ringBuffer.getEndOfLastWrite() != NULL)
|
remainingBuffer = PACKET_LENGTH_SAMPLES / PACKET_LENGTH_SAMPLES * AUDIO_CALLBACK_MSECS;
|
||||||
remainingBuffer = _ringBuffer.diffLastWriteNextOutput() / PACKET_LENGTH_SAMPLES * AUDIO_CALLBACK_MSECS;
|
|
||||||
|
|
||||||
if (_numFramesDisplayStarve == 0) {
|
if (_numFramesDisplayStarve == 0) {
|
||||||
glColor3f(0, 1, 0);
|
glColor3f(0, 1, 0);
|
||||||
|
|
|
@ -18,9 +18,7 @@
|
||||||
AudioRingBuffer::AudioRingBuffer(bool isStereo) :
|
AudioRingBuffer::AudioRingBuffer(bool isStereo) :
|
||||||
NodeData(NULL),
|
NodeData(NULL),
|
||||||
_endOfLastWrite(NULL),
|
_endOfLastWrite(NULL),
|
||||||
_isStarved(true),
|
_isStarved(true)
|
||||||
_hasStarted(false),
|
|
||||||
_isStereo(isStereo)
|
|
||||||
{
|
{
|
||||||
_buffer = new int16_t[RING_BUFFER_LENGTH_SAMPLES];
|
_buffer = new int16_t[RING_BUFFER_LENGTH_SAMPLES];
|
||||||
_nextOutput = _buffer;
|
_nextOutput = _buffer;
|
||||||
|
@ -34,7 +32,6 @@ void AudioRingBuffer::reset() {
|
||||||
_endOfLastWrite = _buffer;
|
_endOfLastWrite = _buffer;
|
||||||
_nextOutput = _buffer;
|
_nextOutput = _buffer;
|
||||||
_isStarved = true;
|
_isStarved = true;
|
||||||
_hasStarted = false;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int AudioRingBuffer::parseData(unsigned char* sourceBuffer, int numBytes) {
|
int AudioRingBuffer::parseData(unsigned char* sourceBuffer, int numBytes) {
|
||||||
|
@ -50,24 +47,62 @@ int AudioRingBuffer::parseAudioSamples(unsigned char* sourceBuffer, int numBytes
|
||||||
|
|
||||||
if (!_endOfLastWrite) {
|
if (!_endOfLastWrite) {
|
||||||
_endOfLastWrite = _buffer;
|
_endOfLastWrite = _buffer;
|
||||||
} else if (diffLastWriteNextOutput() > RING_BUFFER_LENGTH_SAMPLES - samplesToCopy) {
|
} else if (samplesToCopy > RING_BUFFER_LENGTH_SAMPLES - samplesAvailable()) {
|
||||||
|
// this read will cross the next output, so call us starved and reset the buffer
|
||||||
|
qDebug() << "Filled the ring buffer. Resetting.\n";
|
||||||
_endOfLastWrite = _buffer;
|
_endOfLastWrite = _buffer;
|
||||||
_nextOutput = _buffer;
|
_nextOutput = _buffer;
|
||||||
_isStarved = true;
|
_isStarved = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
memcpy(_endOfLastWrite, sourceBuffer, numBytes);
|
if (_endOfLastWrite + samplesToCopy <= _buffer + RING_BUFFER_LENGTH_SAMPLES) {
|
||||||
|
memcpy(_endOfLastWrite, sourceBuffer, numBytes);
|
||||||
_endOfLastWrite += samplesToCopy;
|
} else {
|
||||||
|
int numSamplesToEnd = (_buffer + RING_BUFFER_LENGTH_SAMPLES) - _endOfLastWrite;
|
||||||
if (_endOfLastWrite >= _buffer + RING_BUFFER_LENGTH_SAMPLES) {
|
memcpy(_endOfLastWrite, sourceBuffer, numSamplesToEnd * sizeof(int16_t));
|
||||||
_endOfLastWrite = _buffer;
|
memcpy(_buffer, sourceBuffer + (numSamplesToEnd * sizeof(int16_t)), (samplesToCopy - numSamplesToEnd) * sizeof(int16_t));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
_endOfLastWrite = shiftedPositionAccomodatingWrap(_endOfLastWrite, samplesToCopy);
|
||||||
|
|
||||||
return numBytes;
|
return numBytes;
|
||||||
}
|
}
|
||||||
|
|
||||||
int AudioRingBuffer::diffLastWriteNextOutput() const {
|
int16_t& AudioRingBuffer::operator[](const int index) {
|
||||||
|
// make sure this is a valid index
|
||||||
|
assert(index > -RING_BUFFER_LENGTH_SAMPLES && index < RING_BUFFER_LENGTH_SAMPLES);
|
||||||
|
|
||||||
|
return *shiftedPositionAccomodatingWrap(_nextOutput, index);
|
||||||
|
}
|
||||||
|
|
||||||
|
void AudioRingBuffer::read(int16_t* destination, unsigned int maxSamples) {
|
||||||
|
|
||||||
|
// only copy up to the number of samples we have available
|
||||||
|
int numReadSamples = std::min(maxSamples, samplesAvailable());
|
||||||
|
|
||||||
|
if (_nextOutput + numReadSamples > _buffer + RING_BUFFER_LENGTH_SAMPLES) {
|
||||||
|
// we're going to need to do two reads to get this data, it wraps around the edge
|
||||||
|
|
||||||
|
// read to the end of the buffer
|
||||||
|
int numSamplesToEnd = (_buffer + RING_BUFFER_LENGTH_SAMPLES) - _nextOutput;
|
||||||
|
memcpy(destination, _nextOutput, numSamplesToEnd * sizeof(int16_t));
|
||||||
|
|
||||||
|
// read the rest from the beginning of the buffer
|
||||||
|
memcpy(destination + numSamplesToEnd, _buffer, (numReadSamples - numSamplesToEnd) * sizeof(int16_t));
|
||||||
|
} else {
|
||||||
|
// read the data
|
||||||
|
memcpy(destination, _nextOutput, numReadSamples * sizeof(int16_t));
|
||||||
|
}
|
||||||
|
|
||||||
|
// push the position of _nextOutput by the number of samples read
|
||||||
|
_nextOutput = shiftedPositionAccomodatingWrap(_nextOutput, numReadSamples);
|
||||||
|
}
|
||||||
|
|
||||||
|
void AudioRingBuffer::shiftReadPosition(unsigned int numSamples) {
|
||||||
|
_nextOutput = shiftedPositionAccomodatingWrap(_nextOutput, numSamples);
|
||||||
|
}
|
||||||
|
|
||||||
|
unsigned int AudioRingBuffer::samplesAvailable() const {
|
||||||
if (!_endOfLastWrite) {
|
if (!_endOfLastWrite) {
|
||||||
return 0;
|
return 0;
|
||||||
} else {
|
} else {
|
||||||
|
@ -80,3 +115,24 @@ int AudioRingBuffer::diffLastWriteNextOutput() const {
|
||||||
return sampleDifference;
|
return sampleDifference;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool AudioRingBuffer::isNotStarvedOrHasMinimumSamples(unsigned int numRequiredSamples) const {
|
||||||
|
if (!_isStarved) {
|
||||||
|
return true;
|
||||||
|
} else {
|
||||||
|
return samplesAvailable() >= numRequiredSamples;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
int16_t* AudioRingBuffer::shiftedPositionAccomodatingWrap(int16_t* position, int numSamplesShift) const {
|
||||||
|
|
||||||
|
if (numSamplesShift > 0 && position + numSamplesShift >= _buffer + RING_BUFFER_LENGTH_SAMPLES) {
|
||||||
|
// this shift will wrap the position around to the beginning of the ring
|
||||||
|
return position + numSamplesShift - RING_BUFFER_LENGTH_SAMPLES;
|
||||||
|
} else if (numSamplesShift < 0 && position + numSamplesShift < _buffer) {
|
||||||
|
// this shift will go around to the end of the ring
|
||||||
|
return position + numSamplesShift - RING_BUFFER_LENGTH_SAMPLES;
|
||||||
|
} else {
|
||||||
|
return position + numSamplesShift;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -34,36 +34,30 @@ public:
|
||||||
|
|
||||||
int parseData(unsigned char* sourceBuffer, int numBytes);
|
int parseData(unsigned char* sourceBuffer, int numBytes);
|
||||||
int parseAudioSamples(unsigned char* sourceBuffer, int numBytes);
|
int parseAudioSamples(unsigned char* sourceBuffer, int numBytes);
|
||||||
|
|
||||||
int16_t* getNextOutput() const { return _nextOutput; }
|
|
||||||
void setNextOutput(int16_t* nextOutput) { _nextOutput = nextOutput; }
|
|
||||||
|
|
||||||
int16_t* getEndOfLastWrite() const { return _endOfLastWrite; }
|
int16_t& operator[](const int index);
|
||||||
void setEndOfLastWrite(int16_t* endOfLastWrite) { _endOfLastWrite = endOfLastWrite; }
|
|
||||||
|
|
||||||
int16_t* getBuffer() const { return _buffer; }
|
void read(int16_t* destination, unsigned int numSamples);
|
||||||
|
|
||||||
|
void shiftReadPosition(unsigned int numSamples);
|
||||||
|
|
||||||
|
unsigned int samplesAvailable() const;
|
||||||
|
|
||||||
|
bool isNotStarvedOrHasMinimumSamples(unsigned int numRequiredSamples) const;
|
||||||
|
|
||||||
bool isStarved() const { return _isStarved; }
|
bool isStarved() const { return _isStarved; }
|
||||||
void setIsStarved(bool isStarved) { _isStarved = isStarved; }
|
void setIsStarved(bool isStarved) { _isStarved = isStarved; }
|
||||||
|
|
||||||
bool hasStarted() const { return _hasStarted; }
|
|
||||||
void setHasStarted(bool hasStarted) { _hasStarted = hasStarted; }
|
|
||||||
|
|
||||||
int diffLastWriteNextOutput() const;
|
|
||||||
|
|
||||||
bool isStereo() const { return _isStereo; }
|
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
// disallow copying of AudioRingBuffer objects
|
// disallow copying of AudioRingBuffer objects
|
||||||
AudioRingBuffer(const AudioRingBuffer&);
|
AudioRingBuffer(const AudioRingBuffer&);
|
||||||
AudioRingBuffer& operator= (const AudioRingBuffer&);
|
AudioRingBuffer& operator= (const AudioRingBuffer&);
|
||||||
|
|
||||||
|
int16_t* shiftedPositionAccomodatingWrap(int16_t* position, int numSamplesShift) const;
|
||||||
|
|
||||||
int16_t* _nextOutput;
|
int16_t* _nextOutput;
|
||||||
int16_t* _endOfLastWrite;
|
int16_t* _endOfLastWrite;
|
||||||
int16_t* _buffer;
|
int16_t* _buffer;
|
||||||
bool _isStarved;
|
bool _isStarved;
|
||||||
bool _hasStarted;
|
|
||||||
bool _isStereo;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif /* defined(__interface__AudioRingBuffer__) */
|
#endif /* defined(__interface__AudioRingBuffer__) */
|
||||||
|
|
|
@ -57,17 +57,16 @@ int PositionalAudioRingBuffer::parsePositionalData(unsigned char* sourceBuffer,
|
||||||
|
|
||||||
bool PositionalAudioRingBuffer::shouldBeAddedToMix(int numJitterBufferSamples) {
|
bool PositionalAudioRingBuffer::shouldBeAddedToMix(int numJitterBufferSamples) {
|
||||||
if (_endOfLastWrite) {
|
if (_endOfLastWrite) {
|
||||||
if (_isStarved && diffLastWriteNextOutput() <= BUFFER_LENGTH_SAMPLES_PER_CHANNEL + numJitterBufferSamples) {
|
if (!isNotStarvedOrHasMinimumSamples(BUFFER_LENGTH_SAMPLES_PER_CHANNEL + numJitterBufferSamples)) {
|
||||||
printf("Buffer held back\n");
|
qDebug() << "Starved and do not have minimum samples to start. Buffer held back.\n";
|
||||||
return false;
|
return false;
|
||||||
} else if (diffLastWriteNextOutput() < BUFFER_LENGTH_SAMPLES_PER_CHANNEL) {
|
} else if (samplesAvailable() < BUFFER_LENGTH_SAMPLES_PER_CHANNEL) {
|
||||||
printf("Buffer starved.\n");
|
qDebug() << "Do not have number of samples needed for interval. Buffer starved.\n";
|
||||||
_isStarved = true;
|
_isStarved = true;
|
||||||
return false;
|
return false;
|
||||||
} else {
|
} else {
|
||||||
// good buffer, add this to the mix
|
// good buffer, add this to the mix
|
||||||
_isStarved = false;
|
_isStarved = false;
|
||||||
_hasStarted = true;
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue