mirror of
https://github.com/JulianGro/overte.git
synced 2025-04-13 22:27:13 +02:00
use proper audio frame naming
This commit is contained in:
parent
972a611d03
commit
00fabb77b4
4 changed files with 29 additions and 31 deletions
|
@ -121,11 +121,11 @@ int InboundAudioStream::parseData(ReceivedMessage& message) {
|
|||
|
||||
packetReceivedUpdateTimingStats();
|
||||
|
||||
int networkSamples;
|
||||
|
||||
int networkFrames;
|
||||
|
||||
// parse the info after the seq number and before the audio data (the stream properties)
|
||||
int prePropertyPosition = message.getPosition();
|
||||
int propertyBytes = parseStreamProperties(message.getType(), message.readWithoutCopy(message.getBytesLeftToRead()), networkSamples);
|
||||
int propertyBytes = parseStreamProperties(message.getType(), message.readWithoutCopy(message.getBytesLeftToRead()), networkFrames);
|
||||
message.seek(prePropertyPosition + propertyBytes);
|
||||
|
||||
// handle this packet based on its arrival status.
|
||||
|
@ -135,7 +135,7 @@ int InboundAudioStream::parseData(ReceivedMessage& message) {
|
|||
// NOTE: we assume that each dropped packet contains the same number of samples
|
||||
// as the packet we just received.
|
||||
int packetsDropped = arrivalInfo._seqDiffFromExpected;
|
||||
writeSamplesForDroppedPackets(packetsDropped * networkSamples);
|
||||
writeFramesForDroppedPackets(packetsDropped * networkFrames);
|
||||
|
||||
// fall through to OnTime case
|
||||
}
|
||||
|
@ -143,7 +143,7 @@ int InboundAudioStream::parseData(ReceivedMessage& message) {
|
|||
// Packet is on time; parse its data to the ringbuffer
|
||||
if (message.getType() == PacketType::SilentAudioFrame) {
|
||||
// FIXME - Some codecs need to know about these silent frames... and can produce better output
|
||||
writeDroppableSilentSamples(networkSamples);
|
||||
writeDroppableSilentFrames(networkFrames);
|
||||
} else {
|
||||
// note: PCM and no codec are identical
|
||||
bool selectedPCM = _selectedCodecName == "pcm" || _selectedCodecName == "";
|
||||
|
@ -153,7 +153,7 @@ int InboundAudioStream::parseData(ReceivedMessage& message) {
|
|||
parseAudioData(message.getType(), afterProperties);
|
||||
} else {
|
||||
qDebug() << "Codec mismatch: expected" << _selectedCodecName << "got" << codecInPacket << "writing silence";
|
||||
writeDroppableSilentSamples(networkSamples);
|
||||
writeDroppableSilentFrames(networkFrames);
|
||||
// inform others of the mismatch
|
||||
auto sendingNode = DependencyManager::get<NodeList>()->nodeWithUUID(message.getSourceID());
|
||||
emit mismatchedAudioCodec(sendingNode, _selectedCodecName, codecInPacket);
|
||||
|
@ -218,9 +218,9 @@ int InboundAudioStream::parseAudioData(PacketType type, const QByteArray& packet
|
|||
return _ringBuffer.writeData(decodedBuffer.data(), actualSize);
|
||||
}
|
||||
|
||||
int InboundAudioStream::writeDroppableSilentSamples(int silentSamples) {
|
||||
int InboundAudioStream::writeDroppableSilentFrames(int silentFrames) {
|
||||
if (_decoder) {
|
||||
_decoder->trackLostFrames(silentSamples);
|
||||
_decoder->trackLostFrames(silentFrames);
|
||||
}
|
||||
|
||||
// calculate how many silent frames we should drop.
|
||||
|
@ -228,12 +228,12 @@ int InboundAudioStream::writeDroppableSilentSamples(int silentSamples) {
|
|||
int desiredJitterBufferFramesPlusPadding = _desiredJitterBufferFrames + DESIRED_JITTER_BUFFER_FRAMES_PADDING;
|
||||
int numSilentFramesToDrop = 0;
|
||||
|
||||
if (silentSamples >= samplesPerFrame && _currentJitterBufferFrames > desiredJitterBufferFramesPlusPadding) {
|
||||
if (silentFrames >= samplesPerFrame && _currentJitterBufferFrames > desiredJitterBufferFramesPlusPadding) {
|
||||
|
||||
// our avg jitter buffer size exceeds its desired value, so ignore some silent
|
||||
// frames to get that size as close to desired as possible
|
||||
int numSilentFramesToDropDesired = _currentJitterBufferFrames - desiredJitterBufferFramesPlusPadding;
|
||||
int numSilentFramesReceived = silentSamples / samplesPerFrame;
|
||||
int numSilentFramesReceived = silentFrames / samplesPerFrame;
|
||||
numSilentFramesToDrop = std::min(numSilentFramesToDropDesired, numSilentFramesReceived);
|
||||
|
||||
// dont reset _currentJitterBufferFrames here; we want to be able to drop further silent frames
|
||||
|
@ -247,7 +247,7 @@ int InboundAudioStream::writeDroppableSilentSamples(int silentSamples) {
|
|||
_framesAvailableStat.reset();
|
||||
}
|
||||
|
||||
int ret = _ringBuffer.addSilentSamples(silentSamples - numSilentFramesToDrop * samplesPerFrame);
|
||||
int ret = _ringBuffer.addSilentSamples(silentFrames - numSilentFramesToDrop * samplesPerFrame);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -414,8 +414,8 @@ void InboundAudioStream::packetReceivedUpdateTimingStats() {
|
|||
_lastPacketReceivedTime = now;
|
||||
}
|
||||
|
||||
int InboundAudioStream::writeSamplesForDroppedPackets(int networkSamples) {
|
||||
return writeLastFrameRepeatedWithFade(networkSamples);
|
||||
int InboundAudioStream::writeFramesForDroppedPackets(int networkFrames) {
|
||||
return writeLastFrameRepeatedWithFade(networkFrames);
|
||||
}
|
||||
|
||||
int InboundAudioStream::writeLastFrameRepeatedWithFade(int samples) {
|
||||
|
|
|
@ -115,7 +115,7 @@ public slots:
|
|||
private:
|
||||
void packetReceivedUpdateTimingStats();
|
||||
|
||||
int writeSamplesForDroppedPackets(int networkSamples);
|
||||
int writeFramesForDroppedPackets(int networkFrames);
|
||||
|
||||
void popSamplesNoCheck(int samples);
|
||||
void framesAvailableChanged();
|
||||
|
@ -134,12 +134,12 @@ protected:
|
|||
/// default implementation assumes packet contains raw audio samples after stream properties
|
||||
virtual int parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties);
|
||||
|
||||
/// writes silent samples to the buffer that may be dropped to reduce latency caused by the buffer
|
||||
virtual int writeDroppableSilentSamples(int silentSamples);
|
||||
/// writes silent frames to the buffer that may be dropped to reduce latency caused by the buffer
|
||||
virtual int writeDroppableSilentFrames(int silentFrames);
|
||||
|
||||
/// writes the last written frame repeatedly, gradually fading to silence.
|
||||
/// used for writing samples for dropped packets.
|
||||
virtual int writeLastFrameRepeatedWithFade(int samples);
|
||||
virtual int writeLastFrameRepeatedWithFade(int frames);
|
||||
|
||||
protected:
|
||||
|
||||
|
|
|
@ -25,20 +25,18 @@ void MixedProcessedAudioStream::outputFormatChanged(int sampleRate, int channelC
|
|||
_ringBuffer.resizeForFrameSize(deviceOutputFrameSamples);
|
||||
}
|
||||
|
||||
int MixedProcessedAudioStream::writeDroppableSilentSamples(int silentSamples) {
|
||||
int deviceSilentFrames = networkToDeviceFrames(silentSamples / STEREO_FACTOR);
|
||||
int deviceSilentSamples = deviceSilentFrames * STEREO_FACTOR;
|
||||
int deviceSilentSamplesWritten = InboundAudioStream::writeDroppableSilentSamples(deviceSilentSamples);
|
||||
emit addedSilence(deviceToNetworkFrames(deviceSilentSamplesWritten / STEREO_FACTOR));
|
||||
return deviceSilentSamplesWritten;
|
||||
int MixedProcessedAudioStream::writeDroppableSilentFrames(int silentFrames) {
|
||||
int deviceSilentFrames = networkToDeviceFrames(silentFrames);
|
||||
int deviceSilentFramesWritten = InboundAudioStream::writeDroppableSilentFrames(deviceSilentFrames);
|
||||
emit addedSilence(deviceToNetworkFrames(deviceSilentFramesWritten));
|
||||
return deviceSilentFramesWritten;
|
||||
}
|
||||
|
||||
int MixedProcessedAudioStream::writeLastFrameRepeatedWithFade(int samples) {
|
||||
int deviceFrames = networkToDeviceFrames(samples / STEREO_FACTOR);
|
||||
int deviceSamples = deviceFrames * STEREO_FACTOR;
|
||||
int deviceSamplesWritten = InboundAudioStream::writeLastFrameRepeatedWithFade(deviceSamples);
|
||||
emit addedLastFrameRepeatedWithFade(deviceToNetworkFrames(deviceSamplesWritten / STEREO_FACTOR));
|
||||
return deviceSamplesWritten;
|
||||
int MixedProcessedAudioStream::writeLastFrameRepeatedWithFade(int frames) {
|
||||
int deviceFrames = networkToDeviceFrames(frames);
|
||||
int deviceFramesWritten = InboundAudioStream::writeLastFrameRepeatedWithFade(deviceFrames);
|
||||
emit addedLastFrameRepeatedWithFade(deviceToNetworkFrames(deviceFramesWritten));
|
||||
return deviceFramesWritten;
|
||||
}
|
||||
|
||||
int MixedProcessedAudioStream::parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties) {
|
||||
|
|
|
@ -33,8 +33,8 @@ public:
|
|||
void outputFormatChanged(int sampleRate, int channelCount);
|
||||
|
||||
protected:
|
||||
int writeDroppableSilentSamples(int silentSamples) override;
|
||||
int writeLastFrameRepeatedWithFade(int samples) override;
|
||||
int writeDroppableSilentFrames(int silentFrames) override;
|
||||
int writeLastFrameRepeatedWithFade(int frames) override;
|
||||
int parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties) override;
|
||||
|
||||
private:
|
||||
|
|
Loading…
Reference in a new issue