Replace inline audio mixing with functions that do gain interpolation

This commit is contained in:
Ken Cooke 2019-04-01 12:24:37 -07:00
parent 181a4e9bdc
commit 85368e6836
2 changed files with 12 additions and 29 deletions

View file

@ -549,38 +549,28 @@ void AudioMixerSlave::addStream(AudioMixerClientData::MixableStream& mixableStre
// grab the stream from the ring buffer
AudioRingBuffer::ConstIterator streamPopOutput = streamToAdd->getLastPopOutput();
// stereo sources are not passed through HRTF
if (streamToAdd->isStereo()) {
// apply the avatar gain adjustment
gain *= mixableStream.hrtf->getGainAdjustment();
streamPopOutput.readSamples(_bufferSamples, AudioConstants::NETWORK_FRAME_SAMPLES_STEREO);
const float scale = 1 / 32768.0f; // int16_t to float
for (int i = 0; i < AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL; i++) {
_mixSamples[2*i+0] += (float)streamPopOutput[2*i+0] * gain * scale;
_mixSamples[2*i+1] += (float)streamPopOutput[2*i+1] * gain * scale;
}
// stereo sources are not passed through HRTF
mixableStream.hrtf->mixStereo(_bufferSamples, _mixSamples, gain, AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);
++stats.manualStereoMixes;
} else if (isEcho) {
streamPopOutput.readSamples(_bufferSamples, AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);
// echo sources are not passed through HRTF
const float scale = 1/32768.0f; // int16_t to float
for (int i = 0; i < AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL; i++) {
float sample = (float)streamPopOutput[i] * gain * scale;
_mixSamples[2*i+0] += sample;
_mixSamples[2*i+1] += sample;
}
mixableStream.hrtf->mixMono(_bufferSamples, _mixSamples, gain, AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);
++stats.manualEchoMixes;
} else {
streamPopOutput.readSamples(_bufferSamples, AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);
mixableStream.hrtf->render(_bufferSamples, _mixSamples, HRTF_DATASET_INDEX, azimuth, distance, gain,
AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);
++stats.hrtfRenders;
}
}

View file

@ -1397,7 +1397,6 @@ bool AudioClient::mixLocalAudioInjectors(float* mixBuffer) {
// spatialize into mixBuffer
injector->getLocalFOA().render(_localScratchBuffer, mixBuffer, HRTF_DATASET_INDEX,
qw, qx, qy, qz, gain, AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);
} else if (options.stereo) {
if (options.positionSet) {
@ -1409,11 +1408,8 @@ bool AudioClient::mixLocalAudioInjectors(float* mixBuffer) {
}
// direct mix into mixBuffer
for (int i = 0; i < AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL; i++) {
mixBuffer[2*i+0] += convertToFloat(_localScratchBuffer[2*i+0]) * gain;
mixBuffer[2*i+1] += convertToFloat(_localScratchBuffer[2*i+1]) * gain;
}
injector->getLocalHRTF().mixStereo(_localScratchBuffer, mixBuffer, gain,
AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);
} else { // injector is mono
if (options.positionSet) {
@ -1431,11 +1427,8 @@ bool AudioClient::mixLocalAudioInjectors(float* mixBuffer) {
} else {
// direct mix into mixBuffer
for (int i = 0; i < AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL; i++) {
float sample = convertToFloat(_localScratchBuffer[i]) * gain;
mixBuffer[2*i+0] += sample;
mixBuffer[2*i+1] += sample;
}
injector->getLocalHRTF().mixMono(_localScratchBuffer, mixBuffer, gain,
AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);
}
}