mirror of
https://thingvellir.net/git/overte
synced 2025-03-27 23:52:03 +01:00
CR feedback
This commit is contained in:
parent
60daaba525
commit
2d595c1f87
2 changed files with 18 additions and 19 deletions
|
@ -77,8 +77,6 @@ void AudioNoiseGate::gateSamples(int16_t* samples, int numSamples) {
|
|||
// More means better rejection but also can reject continuous things like singing.
|
||||
// NUMBER_OF_NOISE_SAMPLE_FRAMES: How often should we re-evaluate the noise floor?
|
||||
|
||||
_closedInLastFrame = false;
|
||||
|
||||
float loudness = 0;
|
||||
int thisSample = 0;
|
||||
int samplesOverNoiseGate = 0;
|
||||
|
@ -142,14 +140,13 @@ void AudioNoiseGate::gateSamples(int16_t* samples, int numSamples) {
|
|||
_sampleCounter = 0;
|
||||
|
||||
}
|
||||
|
||||
if (samplesOverNoiseGate > NOISE_GATE_WIDTH) {
|
||||
_isOpen = true;
|
||||
_framesToClose = NOISE_GATE_CLOSE_FRAME_DELAY;
|
||||
} else {
|
||||
if (--_framesToClose == 0) {
|
||||
if (_isOpen) {
|
||||
_closedInLastFrame = true;
|
||||
}
|
||||
_closedInLastFrame = !_isOpen;
|
||||
_isOpen = false;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -136,9 +136,9 @@ int InboundAudioStream::parseData(ReceivedMessage& message) {
|
|||
break;
|
||||
}
|
||||
case SequenceNumberStats::Early: {
|
||||
// Packet is early treat the packets as if all the packets between the last
|
||||
// Packet is early. Treat the packets as if all the packets between the last
|
||||
// OnTime packet and this packet was lost. If we're using a codec this will
|
||||
// also result in allowing the codec to flush its internal state. Then
|
||||
// also result in allowing the codec to interpolate lost data. Then
|
||||
// fall through to the "on time" logic to actually handle this packet
|
||||
int packetsDropped = arrivalInfo._seqDiffFromExpected;
|
||||
lostAudioData(packetsDropped);
|
||||
|
@ -150,8 +150,6 @@ int InboundAudioStream::parseData(ReceivedMessage& message) {
|
|||
if (message.getType() == PacketType::SilentAudioFrame) {
|
||||
// If we recieved a SilentAudioFrame from our sender, we might want to drop
|
||||
// some of the samples in order to catch up to our desired jitter buffer size.
|
||||
// NOTE: If we're using a codec we will be calling the codec's lostFrame()
|
||||
// method to allow the codec to flush its internal state.
|
||||
writeDroppableSilentFrames(networkFrames);
|
||||
} else {
|
||||
// note: PCM and no codec are identical
|
||||
|
@ -249,17 +247,21 @@ int InboundAudioStream::parseAudioData(PacketType type, const QByteArray& packet
|
|||
|
||||
int InboundAudioStream::writeDroppableSilentFrames(int silentFrames) {
|
||||
|
||||
// if we have a decoder, we still want to tell the decoder about our
|
||||
// lost frame. this will flush the internal state of the decoder
|
||||
// we can safely ignore the output of the codec in this case, because
|
||||
// we've enforced that on the sending side, the encoder ran at least
|
||||
// one frame of truly silent audio before we sent the "droppable" silent
|
||||
// frame. Technically we could leave this out, if we know for certain
|
||||
// that the sender has really sent us an encoded packet of zeros, but
|
||||
// since we can trust all encoders to always encode at least one silent
|
||||
// frame (open source, someone code modify it), we will go ahead and
|
||||
// tell our decoder about the lost frame.
|
||||
// We can't guarentee that all clients have faded the stream down
|
||||
// to silence and encode that silence before sending us a
|
||||
// SilentAudioFrame. The encoder may have truncated the stream and
|
||||
// left the decoder holding some loud state. To handle this case
|
||||
// we will call the decoder's lostFrame() method, which indicates
|
||||
// that it should interpolate from it's last known state (which may
|
||||
// be some unknown loud state) down toward silence.
|
||||
if (_decoder) {
|
||||
// FIXME - We could potentially use the output from the codec, in which
|
||||
// case we might get a cleaner fade toward silence. NOTE: The below logic
|
||||
// attempts to catch up in the event that the jitter buffers have grown.
|
||||
// The better long term fix is to use the output from the decode, detect
|
||||
// when it actually reaches silence, and then delete the silent portions
|
||||
// of the jitter buffers. Or petentially do a cross fade from the decode
|
||||
// output to silence.
|
||||
QByteArray decodedBuffer;
|
||||
_decoder->lostFrame(decodedBuffer);
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue