mirror of
https://github.com/overte-org/overte.git
synced 2025-08-10 00:13:29 +02:00
CR feedback
This commit is contained in:
parent
60daaba525
commit
2d595c1f87
2 changed files with 18 additions and 19 deletions
|
@ -77,8 +77,6 @@ void AudioNoiseGate::gateSamples(int16_t* samples, int numSamples) {
|
||||||
// More means better rejection but also can reject continuous things like singing.
|
// More means better rejection but also can reject continuous things like singing.
|
||||||
// NUMBER_OF_NOISE_SAMPLE_FRAMES: How often should we re-evaluate the noise floor?
|
// NUMBER_OF_NOISE_SAMPLE_FRAMES: How often should we re-evaluate the noise floor?
|
||||||
|
|
||||||
_closedInLastFrame = false;
|
|
||||||
|
|
||||||
float loudness = 0;
|
float loudness = 0;
|
||||||
int thisSample = 0;
|
int thisSample = 0;
|
||||||
int samplesOverNoiseGate = 0;
|
int samplesOverNoiseGate = 0;
|
||||||
|
@ -142,14 +140,13 @@ void AudioNoiseGate::gateSamples(int16_t* samples, int numSamples) {
|
||||||
_sampleCounter = 0;
|
_sampleCounter = 0;
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (samplesOverNoiseGate > NOISE_GATE_WIDTH) {
|
if (samplesOverNoiseGate > NOISE_GATE_WIDTH) {
|
||||||
_isOpen = true;
|
_isOpen = true;
|
||||||
_framesToClose = NOISE_GATE_CLOSE_FRAME_DELAY;
|
_framesToClose = NOISE_GATE_CLOSE_FRAME_DELAY;
|
||||||
} else {
|
} else {
|
||||||
if (--_framesToClose == 0) {
|
if (--_framesToClose == 0) {
|
||||||
if (_isOpen) {
|
_closedInLastFrame = !_isOpen;
|
||||||
_closedInLastFrame = true;
|
|
||||||
}
|
|
||||||
_isOpen = false;
|
_isOpen = false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -136,9 +136,9 @@ int InboundAudioStream::parseData(ReceivedMessage& message) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case SequenceNumberStats::Early: {
|
case SequenceNumberStats::Early: {
|
||||||
// Packet is early treat the packets as if all the packets between the last
|
// Packet is early. Treat the packets as if all the packets between the last
|
||||||
// OnTime packet and this packet was lost. If we're using a codec this will
|
// OnTime packet and this packet was lost. If we're using a codec this will
|
||||||
// also result in allowing the codec to flush its internal state. Then
|
// also result in allowing the codec to interpolate lost data. Then
|
||||||
// fall through to the "on time" logic to actually handle this packet
|
// fall through to the "on time" logic to actually handle this packet
|
||||||
int packetsDropped = arrivalInfo._seqDiffFromExpected;
|
int packetsDropped = arrivalInfo._seqDiffFromExpected;
|
||||||
lostAudioData(packetsDropped);
|
lostAudioData(packetsDropped);
|
||||||
|
@ -150,8 +150,6 @@ int InboundAudioStream::parseData(ReceivedMessage& message) {
|
||||||
if (message.getType() == PacketType::SilentAudioFrame) {
|
if (message.getType() == PacketType::SilentAudioFrame) {
|
||||||
// If we recieved a SilentAudioFrame from our sender, we might want to drop
|
// If we recieved a SilentAudioFrame from our sender, we might want to drop
|
||||||
// some of the samples in order to catch up to our desired jitter buffer size.
|
// some of the samples in order to catch up to our desired jitter buffer size.
|
||||||
// NOTE: If we're using a codec we will be calling the codec's lostFrame()
|
|
||||||
// method to allow the codec to flush its internal state.
|
|
||||||
writeDroppableSilentFrames(networkFrames);
|
writeDroppableSilentFrames(networkFrames);
|
||||||
} else {
|
} else {
|
||||||
// note: PCM and no codec are identical
|
// note: PCM and no codec are identical
|
||||||
|
@ -249,17 +247,21 @@ int InboundAudioStream::parseAudioData(PacketType type, const QByteArray& packet
|
||||||
|
|
||||||
int InboundAudioStream::writeDroppableSilentFrames(int silentFrames) {
|
int InboundAudioStream::writeDroppableSilentFrames(int silentFrames) {
|
||||||
|
|
||||||
// if we have a decoder, we still want to tell the decoder about our
|
// We can't guarentee that all clients have faded the stream down
|
||||||
// lost frame. this will flush the internal state of the decoder
|
// to silence and encode that silence before sending us a
|
||||||
// we can safely ignore the output of the codec in this case, because
|
// SilentAudioFrame. The encoder may have truncated the stream and
|
||||||
// we've enforced that on the sending side, the encoder ran at least
|
// left the decoder holding some loud state. To handle this case
|
||||||
// one frame of truly silent audio before we sent the "droppable" silent
|
// we will call the decoder's lostFrame() method, which indicates
|
||||||
// frame. Technically we could leave this out, if we know for certain
|
// that it should interpolate from it's last known state (which may
|
||||||
// that the sender has really sent us an encoded packet of zeros, but
|
// be some unknown loud state) down toward silence.
|
||||||
// since we can trust all encoders to always encode at least one silent
|
|
||||||
// frame (open source, someone code modify it), we will go ahead and
|
|
||||||
// tell our decoder about the lost frame.
|
|
||||||
if (_decoder) {
|
if (_decoder) {
|
||||||
|
// FIXME - We could potentially use the output from the codec, in which
|
||||||
|
// case we might get a cleaner fade toward silence. NOTE: The below logic
|
||||||
|
// attempts to catch up in the event that the jitter buffers have grown.
|
||||||
|
// The better long term fix is to use the output from the decode, detect
|
||||||
|
// when it actually reaches silence, and then delete the silent portions
|
||||||
|
// of the jitter buffers. Or petentially do a cross fade from the decode
|
||||||
|
// output to silence.
|
||||||
QByteArray decodedBuffer;
|
QByteArray decodedBuffer;
|
||||||
_decoder->lostFrame(decodedBuffer);
|
_decoder->lostFrame(decodedBuffer);
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue