use proper audio frame naming

This commit is contained in:
Zach Pomerantz 2016-10-04 18:20:01 -07:00
parent 972a611d03
commit 00fabb77b4
4 changed files with 29 additions and 31 deletions

View file

@ -121,11 +121,11 @@ int InboundAudioStream::parseData(ReceivedMessage& message) {
packetReceivedUpdateTimingStats(); packetReceivedUpdateTimingStats();
int networkSamples; int networkFrames;
// parse the info after the seq number and before the audio data (the stream properties) // parse the info after the seq number and before the audio data (the stream properties)
int prePropertyPosition = message.getPosition(); int prePropertyPosition = message.getPosition();
int propertyBytes = parseStreamProperties(message.getType(), message.readWithoutCopy(message.getBytesLeftToRead()), networkSamples); int propertyBytes = parseStreamProperties(message.getType(), message.readWithoutCopy(message.getBytesLeftToRead()), networkFrames);
message.seek(prePropertyPosition + propertyBytes); message.seek(prePropertyPosition + propertyBytes);
// handle this packet based on its arrival status. // handle this packet based on its arrival status.
@ -135,7 +135,7 @@ int InboundAudioStream::parseData(ReceivedMessage& message) {
// NOTE: we assume that each dropped packet contains the same number of samples // NOTE: we assume that each dropped packet contains the same number of samples
// as the packet we just received. // as the packet we just received.
int packetsDropped = arrivalInfo._seqDiffFromExpected; int packetsDropped = arrivalInfo._seqDiffFromExpected;
writeSamplesForDroppedPackets(packetsDropped * networkSamples); writeFramesForDroppedPackets(packetsDropped * networkFrames);
// fall through to OnTime case // fall through to OnTime case
} }
@ -143,7 +143,7 @@ int InboundAudioStream::parseData(ReceivedMessage& message) {
// Packet is on time; parse its data to the ringbuffer // Packet is on time; parse its data to the ringbuffer
if (message.getType() == PacketType::SilentAudioFrame) { if (message.getType() == PacketType::SilentAudioFrame) {
// FIXME - Some codecs need to know about these silent frames... and can produce better output // FIXME - Some codecs need to know about these silent frames... and can produce better output
writeDroppableSilentSamples(networkSamples); writeDroppableSilentFrames(networkFrames);
} else { } else {
// note: PCM and no codec are identical // note: PCM and no codec are identical
bool selectedPCM = _selectedCodecName == "pcm" || _selectedCodecName == ""; bool selectedPCM = _selectedCodecName == "pcm" || _selectedCodecName == "";
@ -153,7 +153,7 @@ int InboundAudioStream::parseData(ReceivedMessage& message) {
parseAudioData(message.getType(), afterProperties); parseAudioData(message.getType(), afterProperties);
} else { } else {
qDebug() << "Codec mismatch: expected" << _selectedCodecName << "got" << codecInPacket << "writing silence"; qDebug() << "Codec mismatch: expected" << _selectedCodecName << "got" << codecInPacket << "writing silence";
writeDroppableSilentSamples(networkSamples); writeDroppableSilentFrames(networkFrames);
// inform others of the mismatch // inform others of the mismatch
auto sendingNode = DependencyManager::get<NodeList>()->nodeWithUUID(message.getSourceID()); auto sendingNode = DependencyManager::get<NodeList>()->nodeWithUUID(message.getSourceID());
emit mismatchedAudioCodec(sendingNode, _selectedCodecName, codecInPacket); emit mismatchedAudioCodec(sendingNode, _selectedCodecName, codecInPacket);
@ -218,9 +218,9 @@ int InboundAudioStream::parseAudioData(PacketType type, const QByteArray& packet
return _ringBuffer.writeData(decodedBuffer.data(), actualSize); return _ringBuffer.writeData(decodedBuffer.data(), actualSize);
} }
int InboundAudioStream::writeDroppableSilentSamples(int silentSamples) { int InboundAudioStream::writeDroppableSilentFrames(int silentFrames) {
if (_decoder) { if (_decoder) {
_decoder->trackLostFrames(silentSamples); _decoder->trackLostFrames(silentFrames);
} }
// calculate how many silent frames we should drop. // calculate how many silent frames we should drop.
@ -228,12 +228,12 @@ int InboundAudioStream::writeDroppableSilentSamples(int silentSamples) {
int desiredJitterBufferFramesPlusPadding = _desiredJitterBufferFrames + DESIRED_JITTER_BUFFER_FRAMES_PADDING; int desiredJitterBufferFramesPlusPadding = _desiredJitterBufferFrames + DESIRED_JITTER_BUFFER_FRAMES_PADDING;
int numSilentFramesToDrop = 0; int numSilentFramesToDrop = 0;
if (silentSamples >= samplesPerFrame && _currentJitterBufferFrames > desiredJitterBufferFramesPlusPadding) { if (silentFrames >= samplesPerFrame && _currentJitterBufferFrames > desiredJitterBufferFramesPlusPadding) {
// our avg jitter buffer size exceeds its desired value, so ignore some silent // our avg jitter buffer size exceeds its desired value, so ignore some silent
// frames to get that size as close to desired as possible // frames to get that size as close to desired as possible
int numSilentFramesToDropDesired = _currentJitterBufferFrames - desiredJitterBufferFramesPlusPadding; int numSilentFramesToDropDesired = _currentJitterBufferFrames - desiredJitterBufferFramesPlusPadding;
int numSilentFramesReceived = silentSamples / samplesPerFrame; int numSilentFramesReceived = silentFrames / samplesPerFrame;
numSilentFramesToDrop = std::min(numSilentFramesToDropDesired, numSilentFramesReceived); numSilentFramesToDrop = std::min(numSilentFramesToDropDesired, numSilentFramesReceived);
// dont reset _currentJitterBufferFrames here; we want to be able to drop further silent frames // dont reset _currentJitterBufferFrames here; we want to be able to drop further silent frames
@ -247,7 +247,7 @@ int InboundAudioStream::writeDroppableSilentSamples(int silentSamples) {
_framesAvailableStat.reset(); _framesAvailableStat.reset();
} }
int ret = _ringBuffer.addSilentSamples(silentSamples - numSilentFramesToDrop * samplesPerFrame); int ret = _ringBuffer.addSilentSamples(silentFrames - numSilentFramesToDrop * samplesPerFrame);
return ret; return ret;
} }
@ -414,8 +414,8 @@ void InboundAudioStream::packetReceivedUpdateTimingStats() {
_lastPacketReceivedTime = now; _lastPacketReceivedTime = now;
} }
int InboundAudioStream::writeSamplesForDroppedPackets(int networkSamples) { int InboundAudioStream::writeFramesForDroppedPackets(int networkFrames) {
return writeLastFrameRepeatedWithFade(networkSamples); return writeLastFrameRepeatedWithFade(networkFrames);
} }
int InboundAudioStream::writeLastFrameRepeatedWithFade(int samples) { int InboundAudioStream::writeLastFrameRepeatedWithFade(int samples) {

View file

@ -115,7 +115,7 @@ public slots:
private: private:
void packetReceivedUpdateTimingStats(); void packetReceivedUpdateTimingStats();
int writeSamplesForDroppedPackets(int networkSamples); int writeFramesForDroppedPackets(int networkFrames);
void popSamplesNoCheck(int samples); void popSamplesNoCheck(int samples);
void framesAvailableChanged(); void framesAvailableChanged();
@ -134,12 +134,12 @@ protected:
/// default implementation assumes packet contains raw audio samples after stream properties /// default implementation assumes packet contains raw audio samples after stream properties
virtual int parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties); virtual int parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties);
/// writes silent samples to the buffer that may be dropped to reduce latency caused by the buffer /// writes silent frames to the buffer that may be dropped to reduce latency caused by the buffer
virtual int writeDroppableSilentSamples(int silentSamples); virtual int writeDroppableSilentFrames(int silentFrames);
/// writes the last written frame repeatedly, gradually fading to silence. /// writes the last written frame repeatedly, gradually fading to silence.
/// used for writing samples for dropped packets. /// used for writing samples for dropped packets.
virtual int writeLastFrameRepeatedWithFade(int samples); virtual int writeLastFrameRepeatedWithFade(int frames);
protected: protected:

View file

@ -25,20 +25,18 @@ void MixedProcessedAudioStream::outputFormatChanged(int sampleRate, int channelC
_ringBuffer.resizeForFrameSize(deviceOutputFrameSamples); _ringBuffer.resizeForFrameSize(deviceOutputFrameSamples);
} }
int MixedProcessedAudioStream::writeDroppableSilentSamples(int silentSamples) { int MixedProcessedAudioStream::writeDroppableSilentFrames(int silentFrames) {
int deviceSilentFrames = networkToDeviceFrames(silentSamples / STEREO_FACTOR); int deviceSilentFrames = networkToDeviceFrames(silentFrames);
int deviceSilentSamples = deviceSilentFrames * STEREO_FACTOR; int deviceSilentFramesWritten = InboundAudioStream::writeDroppableSilentFrames(deviceSilentFrames);
int deviceSilentSamplesWritten = InboundAudioStream::writeDroppableSilentSamples(deviceSilentSamples); emit addedSilence(deviceToNetworkFrames(deviceSilentFramesWritten));
emit addedSilence(deviceToNetworkFrames(deviceSilentSamplesWritten / STEREO_FACTOR)); return deviceSilentFramesWritten;
return deviceSilentSamplesWritten;
} }
int MixedProcessedAudioStream::writeLastFrameRepeatedWithFade(int samples) { int MixedProcessedAudioStream::writeLastFrameRepeatedWithFade(int frames) {
int deviceFrames = networkToDeviceFrames(samples / STEREO_FACTOR); int deviceFrames = networkToDeviceFrames(frames);
int deviceSamples = deviceFrames * STEREO_FACTOR; int deviceFramesWritten = InboundAudioStream::writeLastFrameRepeatedWithFade(deviceFrames);
int deviceSamplesWritten = InboundAudioStream::writeLastFrameRepeatedWithFade(deviceSamples); emit addedLastFrameRepeatedWithFade(deviceToNetworkFrames(deviceFramesWritten));
emit addedLastFrameRepeatedWithFade(deviceToNetworkFrames(deviceSamplesWritten / STEREO_FACTOR)); return deviceFramesWritten;
return deviceSamplesWritten;
} }
int MixedProcessedAudioStream::parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties) { int MixedProcessedAudioStream::parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties) {

View file

@ -33,8 +33,8 @@ public:
void outputFormatChanged(int sampleRate, int channelCount); void outputFormatChanged(int sampleRate, int channelCount);
protected: protected:
int writeDroppableSilentSamples(int silentSamples) override; int writeDroppableSilentFrames(int silentFrames) override;
int writeLastFrameRepeatedWithFade(int samples) override; int writeLastFrameRepeatedWithFade(int frames) override;
int parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties) override; int parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties) override;
private: private: