Merge pull request #2350 from birarda/audio-scaling

suppress silent streams from interface and agent
This commit is contained in:
AndrewMeadows 2014-03-18 11:48:27 -07:00
commit e7f113e616
8 changed files with 101 additions and 21 deletions

View file

@ -286,7 +286,7 @@ void AudioMixer::prepareMixForListeningNode(Node* node) {
AvatarAudioRingBuffer* nodeRingBuffer = ((AudioMixerClientData*) node->getLinkedData())->getAvatarAudioRingBuffer();
// zero out the client mix for this node
memset(_clientSamples, 0, sizeof(_clientSamples));
memset(_clientSamples, 0, NETWORK_BUFFER_LENGTH_BYTES_STEREO);
// loop through all other nodes that have sufficient audio to mix
foreach (const SharedNodePointer& otherNode, NodeList::getInstance()->getNodeHash()) {
@ -321,7 +321,8 @@ void AudioMixer::readPendingDatagrams() {
PacketType mixerPacketType = packetTypeForPacket(receivedPacket);
if (mixerPacketType == PacketTypeMicrophoneAudioNoEcho
|| mixerPacketType == PacketTypeMicrophoneAudioWithEcho
|| mixerPacketType == PacketTypeInjectAudio) {
|| mixerPacketType == PacketTypeInjectAudio
|| mixerPacketType == PacketTypeSilentAudioFrame) {
nodeList->findNodeAndUpdateWithDataFromPacket(receivedPacket);
} else {

View file

@ -41,7 +41,8 @@ AvatarAudioRingBuffer* AudioMixerClientData::getAvatarAudioRingBuffer() const {
int AudioMixerClientData::parseData(const QByteArray& packet) {
PacketType packetType = packetTypeForPacket(packet);
if (packetType == PacketTypeMicrophoneAudioWithEcho
|| packetType == PacketTypeMicrophoneAudioNoEcho) {
|| packetType == PacketTypeMicrophoneAudioNoEcho
|| packetType == PacketTypeSilentAudioFrame) {
// grab the AvatarAudioRingBuffer from the vector (or create it if it doesn't exist)
AvatarAudioRingBuffer* avatarRingBuffer = getAvatarAudioRingBuffer();

View file

@ -436,9 +436,8 @@ void Audio::handleAudioInput() {
}
}
if (!_noiseGateOpen) {
for (int i = 0; i < NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL; i++) {
monoAudioSamples[i] = 0;
}
memset(monoAudioSamples, 0, NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL);
_lastInputLoudness = 0;
}
}
@ -483,8 +482,25 @@ void Audio::handleAudioInput() {
// we need the amount of bytes in the buffer + 1 for type
// + 12 for 3 floats for position + float for bearing + 1 attenuation byte
PacketType packetType = Menu::getInstance()->isOptionChecked(MenuOption::EchoServerAudio)
? PacketTypeMicrophoneAudioWithEcho : PacketTypeMicrophoneAudioNoEcho;
int numAudioBytes = 0;
PacketType packetType;
if (_lastInputLoudness == 0) {
packetType = PacketTypeSilentAudioFrame;
// we need to indicate how many silent samples this is to the audio mixer
monoAudioSamples[0] = NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL;
numAudioBytes = sizeof(int16_t);
} else {
numAudioBytes = NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL;
if (Menu::getInstance()->isOptionChecked(MenuOption::EchoServerAudio)) {
packetType = PacketTypeMicrophoneAudioWithEcho;
} else {
packetType = PacketTypeMicrophoneAudioNoEcho;
}
}
char* currentPacketPtr = monoAudioDataPacket + populatePacketHeader(monoAudioDataPacket, packetType);
@ -496,12 +512,10 @@ void Audio::handleAudioInput() {
memcpy(currentPacketPtr, &headOrientation, sizeof(headOrientation));
currentPacketPtr += sizeof(headOrientation);
nodeList->writeDatagram(monoAudioDataPacket,
NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL + leadingBytes,
audioMixer);
nodeList->writeDatagram(monoAudioDataPacket, numAudioBytes + leadingBytes, audioMixer);
Application::getInstance()->getBandwidthMeter()->outputStream(BandwidthMeter::AUDIO)
.updateValue(NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL + leadingBytes);
.updateValue(numAudioBytes + leadingBytes);
}
delete[] inputAudioSamples;
}
@ -638,7 +652,14 @@ void Audio::addProceduralSounds(int16_t* monoInput, int numSamples) {
int16_t collisionSample = (int16_t) sample;
_lastInputLoudness = 0;
monoInput[i] = glm::clamp(monoInput[i] + collisionSample, MIN_SAMPLE_VALUE, MAX_SAMPLE_VALUE);
_lastInputLoudness += fabsf(monoInput[i]);
_lastInputLoudness /= numSamples;
_lastInputLoudness /= MAX_SAMPLE_VALUE;
_localProceduralSamples[i] = glm::clamp(_localProceduralSamples[i] + collisionSample,
MIN_SAMPLE_VALUE, MAX_SAMPLE_VALUE);
@ -662,7 +683,14 @@ void Audio::addProceduralSounds(int16_t* monoInput, int numSamples) {
int16_t collisionSample = (int16_t) sample;
_lastInputLoudness = 0;
monoInput[i] = glm::clamp(monoInput[i] + collisionSample, MIN_SAMPLE_VALUE, MAX_SAMPLE_VALUE);
_lastInputLoudness += fabsf(monoInput[i]);
_lastInputLoudness /= numSamples;
_lastInputLoudness /= MAX_SAMPLE_VALUE;
_localProceduralSamples[i] = glm::clamp(_localProceduralSamples[i] + collisionSample,
MIN_SAMPLE_VALUE, MAX_SAMPLE_VALUE);

View file

@ -156,6 +156,21 @@ unsigned int AudioRingBuffer::samplesAvailable() const {
}
}
void AudioRingBuffer::addSilentFrame(int numSilentSamples) {
// memset zeroes into the buffer, accomodate a wrap around the end
// push the _endOfLastWrite to the correct spot
if (_endOfLastWrite + numSilentSamples <= _buffer + _sampleCapacity) {
memset(_endOfLastWrite, 0, numSilentSamples * sizeof(int16_t));
_endOfLastWrite += numSilentSamples;
} else {
int numSamplesToEnd = (_buffer + _sampleCapacity) - _endOfLastWrite;
memset(_endOfLastWrite, 0, numSilentSamples * sizeof(int16_t));
memset(_buffer, 0, (numSilentSamples - numSamplesToEnd) * sizeof(int16_t));
_endOfLastWrite = _buffer + (numSilentSamples - numSamplesToEnd);
}
}
bool AudioRingBuffer::isNotStarvedOrHasMinimumSamples(unsigned int numRequiredSamples) const {
if (!_isStarved) {
return true;

View file

@ -70,6 +70,8 @@ public:
void setIsStarved(bool isStarved) { _isStarved = isStarved; }
bool hasStarted() const { return _hasStarted; }
void addSilentFrame(int numSilentSamples);
protected:
// disallow copying of AudioRingBuffer objects
AudioRingBuffer(const AudioRingBuffer&);

View file

@ -44,8 +44,20 @@ int PositionalAudioRingBuffer::parseData(const QByteArray& packet) {
packetStream.skipRawData(numBytesForPacketHeader(packet));
packetStream.skipRawData(parsePositionalData(packet.mid(packetStream.device()->pos())));
packetStream.skipRawData(writeData(packet.data() + packetStream.device()->pos(),
packet.size() - packetStream.device()->pos()));
if (packetTypeForPacket(packet) == PacketTypeSilentAudioFrame) {
// this source had no audio to send us, but this counts as a packet
// write silence equivalent to the number of silent samples they just sent us
int16_t numSilentSamples;
packetStream.readRawData(reinterpret_cast<char*>(&numSilentSamples), sizeof(int16_t));
addSilentFrame(numSilentSamples);
} else {
// there is audio data to read
packetStream.skipRawData(writeData(packet.data() + packetStream.device()->pos(),
packet.size() - packetStream.device()->pos()));
}
return packetStream.device()->pos();
}

View file

@ -267,15 +267,36 @@ void ScriptEngine::run() {
if (_avatarAudioBuffer && _numAvatarAudioBufferSamples > 0) {
// if have an avatar audio stream then send it out to our audio-mixer
QByteArray audioPacket = byteArrayWithPopulatedHeader(PacketTypeMicrophoneAudioNoEcho);
bool silentFrame = true;
// check if the all of the _numAvatarAudioBufferSamples to be sent are silence
for (int i = 0; i < _numAvatarAudioBufferSamples; ++i) {
if (_avatarAudioBuffer[i] != 0) {
silentFrame = false;
break;
}
}
QByteArray audioPacket = byteArrayWithPopulatedHeader(silentFrame
? PacketTypeSilentAudioFrame
: PacketTypeMicrophoneAudioNoEcho);
QDataStream packetStream(&audioPacket, QIODevice::Append);
// use the orientation and position of this avatar for the source of this audio
packetStream.writeRawData(reinterpret_cast<const char*>(&_avatarData->getPosition()), sizeof(glm::vec3));
glm::quat headOrientation = _avatarData->getHeadOrientation();
packetStream.writeRawData(reinterpret_cast<const char*>(&headOrientation), sizeof(glm::quat));
packetStream.writeRawData(reinterpret_cast<const char*>(_avatarAudioBuffer),
_numAvatarAudioBufferSamples * sizeof(int16_t));
if (silentFrame) {
// write the number of silent samples so the audio-mixer can uphold timing
int16_t numSilentSamples = _numAvatarAudioBufferSamples;
packetStream.writeRawData(reinterpret_cast<const char*>(&numSilentSamples), sizeof(int16_t));
} else {
// write the raw audio data
packetStream.writeRawData(reinterpret_cast<const char*>(_avatarAudioBuffer),
_numAvatarAudioBufferSamples * sizeof(int16_t));
}
nodeList->broadcastToNodes(audioPacket, NodeSet() << NodeType::AudioMixer);
}

View file

@ -32,7 +32,7 @@ enum PacketType {
PacketTypeMicrophoneAudioNoEcho,
PacketTypeMicrophoneAudioWithEcho,
PacketTypeBulkAvatarData,
PacketTypeTransmitterData, // usable
PacketTypeSilentAudioFrame,
PacketTypeEnvironmentData,
PacketTypeDomainListRequest,
PacketTypeRequestAssignment,