Merge pull request #6416 from kencooke/audio-reverb

New reverb
This commit is contained in:
Brad Hefta-Gaub 2015-12-02 11:22:12 -08:00
commit 33b859df33
9 changed files with 2171 additions and 121 deletions

View file

@ -0,0 +1,69 @@
//
// reverbTest.js
// examples
//
// Created by Ken Cooke on 11/23/2015.
// Copyright 2015 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
Script.include("cookies.js");
var audioOptions = new AudioEffectOptions({
maxRoomSize: 50,
roomSize: 50,
reverbTime: 4,
damping: 0.50,
inputBandwidth: 0.8,
earlyLevel: 0,
tailLevel: 0,
dryLevel: -6,
wetLevel: -6
});
AudioDevice.setReverbOptions(audioOptions);
AudioDevice.setReverb(true);
print("Reverb is ON.");
var panel = new Panel(10, 200);
var parameters = [
{ name: "roomSize", min: 0, max: 100, units: " feet" },
{ name: "reverbTime", min: 0, max: 10, units: " sec" },
{ name: "damping", min: 0, max: 1, units: " " },
{ name: "inputBandwidth", min: 0, max: 1, units: " " },
{ name: "earlyLevel", min: -48, max: 0, units: " dB" },
{ name: "tailLevel", min: -48, max: 0, units: " dB" },
{ name: "wetLevel", min: -48, max: 0, units: " dB" },
]
function setter(name) {
return function(value) { audioOptions[name] = value; AudioDevice.setReverbOptions(audioOptions); }
}
function getter(name) {
return function() { return audioOptions[name]; }
}
function displayer(units) {
return function(value) { return (value).toFixed(1) + units; };
}
// create a slider for each parameter
for (var i = 0; i < parameters.length; i++) {
var p = parameters[i];
panel.newSlider(p.name, p.min, p.max, setter(p.name), getter(p.name), displayer(p.units));
}
Controller.mouseMoveEvent.connect(function panelMouseMoveEvent(event) { return panel.mouseMoveEvent(event); });
Controller.mousePressEvent.connect( function panelMousePressEvent(event) { return panel.mousePressEvent(event); });
Controller.mouseReleaseEvent.connect(function(event) { return panel.mouseReleaseEvent(event); });
function scriptEnding() {
panel.destroy();
AudioDevice.setReverb(false);
print("Reverb is OFF.");
}
Script.scriptEnding.connect(scriptEnding);

View file

@ -33,29 +33,6 @@
#include <QtMultimedia/QAudioInput>
#include <QtMultimedia/QAudioOutput>
#if defined(__GNUC__) && !defined(__clang__)
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wdouble-promotion"
#endif
#ifdef WIN32
#pragma warning (push)
#pragma warning (disable: 4273 4305)
#endif
extern "C" {
#include <gverb/gverb.h>
#include <gverb/gverbdsp.h>
}
#ifdef WIN32
#pragma warning (pop)
#endif
#if defined(__GNUC__) && !defined(__clang__)
#pragma GCC diagnostic pop
#endif
#include <NodeList.h>
#include <udt/PacketHeaders.h>
#include <PositionalAudioStream.h>
@ -120,7 +97,6 @@ AudioClient::AudioClient() :
_audioSourceInjectEnabled(false),
_reverb(false),
_reverbOptions(&_scriptReverbOptions),
_gverb(NULL),
_inputToNetworkResampler(NULL),
_networkToOutputResampler(NULL),
_loopbackResampler(NULL),
@ -145,9 +121,7 @@ AudioClient::AudioClient() :
connect(updateTimer, &QTimer::timeout, this, &AudioClient::checkDevices);
updateTimer->start(DEVICE_CHECK_INTERVAL_MSECS);
// create GVerb filter
_gverb = createGverbFilter();
configureGverbFilter(_gverb);
configureReverb();
auto& packetReceiver = DependencyManager::get<NodeList>()->getPacketReceiver();
packetReceiver.registerListener(PacketType::AudioStreamStats, &_stats, "processStreamStatsPacket");
@ -160,10 +134,6 @@ AudioClient::AudioClient() :
AudioClient::~AudioClient() {
stop();
if (_gverb) {
gverb_free(_gverb);
}
}
void AudioClient::reset() {
@ -173,8 +143,8 @@ void AudioClient::reset() {
_toneSource.reset();
_sourceGain.reset();
_inputGain.reset();
gverb_flush(_gverb);
_sourceReverb.reset();
_listenerReverb.reset();
}
void AudioClient::audioMixerKilled() {
@ -569,27 +539,32 @@ bool AudioClient::switchOutputToAudioDevice(const QString& outputDeviceName) {
return switchOutputToAudioDevice(getNamedAudioDeviceForMode(QAudio::AudioOutput, outputDeviceName));
}
ty_gverb* AudioClient::createGverbFilter() {
// Initialize a new gverb instance
ty_gverb* filter = gverb_new(_outputFormat.sampleRate(), _reverbOptions->getMaxRoomSize(), _reverbOptions->getRoomSize(),
_reverbOptions->getReverbTime(), _reverbOptions->getDamping(), _reverbOptions->getSpread(),
_reverbOptions->getInputBandwidth(), _reverbOptions->getEarlyLevel(),
_reverbOptions->getTailLevel());
void AudioClient::configureReverb() {
ReverbParameters p;
_listenerReverb.getParameters(&p);
return filter;
// for now, reuse the gverb parameters
p.sampleRate = _outputFormat.sampleRate();
p.roomSize = _reverbOptions->getRoomSize();
p.reverbTime = _reverbOptions->getReverbTime();
p.highGain = -24.0f * (1.0f - _reverbOptions->getDamping());
p.bandwidth = 10000.0f * _reverbOptions->getInputBandwidth();
p.earlyGain = _reverbOptions->getEarlyLevel();
p.lateGain = _reverbOptions->getTailLevel();
p.wetDryMix = 100.0f * powf(10.0f, _reverbOptions->getWetLevel() * (1/20.0f));
_listenerReverb.setParameters(&p);
// used for adding self-reverb to loopback audio
p.wetDryMix = 100.0f;
p.preDelay = 0.0f;
p.earlyGain = -96.0f; // disable ER
p.lateGain -= 12.0f; // quieter than listener reverb
p.lateMixLeft = 0.0f;
p.lateMixRight = 0.0f;
_sourceReverb.setParameters(&p);
}
void AudioClient::configureGverbFilter(ty_gverb* filter) {
// Configure the instance (these functions are not super well named - they actually set several internal variables)
gverb_set_roomsize(filter, _reverbOptions->getRoomSize());
gverb_set_revtime(filter, _reverbOptions->getReverbTime());
gverb_set_damping(filter, _reverbOptions->getDamping());
gverb_set_inputbandwidth(filter, _reverbOptions->getInputBandwidth());
gverb_set_earlylevel(filter, DB_CO(_reverbOptions->getEarlyLevel()));
gverb_set_taillevel(filter, DB_CO(_reverbOptions->getTailLevel()));
}
void AudioClient::updateGverbOptions() {
void AudioClient::updateReverbOptions() {
bool reverbChanged = false;
if (_receivedAudioStream.hasReverb()) {
@ -599,7 +574,7 @@ void AudioClient::updateGverbOptions() {
}
if (_zoneReverbOptions.getWetLevel() != _receivedAudioStream.getWetLevel()) {
_zoneReverbOptions.setWetLevel(_receivedAudioStream.getWetLevel());
// Not part of actual filter config, no need to set reverbChanged to true
reverbChanged = true;
}
if (_reverbOptions != &_zoneReverbOptions) {
@ -612,9 +587,7 @@ void AudioClient::updateGverbOptions() {
}
if (reverbChanged) {
gverb_free(_gverb);
_gverb = createGverbFilter();
configureGverbFilter(_gverb);
configureReverb();
}
}
@ -622,7 +595,8 @@ void AudioClient::setReverb(bool reverb) {
_reverb = reverb;
if (!_reverb) {
gverb_flush(_gverb);
_sourceReverb.reset();
_listenerReverb.reset();
}
}
@ -642,47 +616,7 @@ void AudioClient::setReverbOptions(const AudioEffectOptions* options) {
if (_reverbOptions == &_scriptReverbOptions) {
// Apply them to the reverb instances
gverb_free(_gverb);
_gverb = createGverbFilter();
configureGverbFilter(_gverb);
}
}
void AudioClient::addReverb(ty_gverb* gverb, int16_t* samplesData, int16_t* reverbAlone, int numSamples,
QAudioFormat& audioFormat, bool noEcho) {
float wetFraction = DB_CO(_reverbOptions->getWetLevel());
float dryFraction = 1.0f - wetFraction;
float lValue,rValue;
for (int sample = 0; sample < numSamples; sample += audioFormat.channelCount()) {
// Run GVerb
float value = (float)samplesData[sample];
gverb_do(gverb, value, &lValue, &rValue);
// Mix, accounting for clipping, the left and right channels. Ignore the rest.
for (int j = sample; j < sample + audioFormat.channelCount(); j++) {
if (j == sample) {
// left channel
int lResult = glm::clamp((int)(samplesData[j] * dryFraction + lValue * wetFraction),
AudioConstants::MIN_SAMPLE_VALUE, AudioConstants::MAX_SAMPLE_VALUE);
samplesData[j] = (int16_t)lResult;
if (noEcho) {
reverbAlone[j] = (int16_t)lValue * wetFraction;
}
} else if (j == (sample + 1)) {
// right channel
int rResult = glm::clamp((int)(samplesData[j] * dryFraction + rValue * wetFraction),
AudioConstants::MIN_SAMPLE_VALUE, AudioConstants::MAX_SAMPLE_VALUE);
samplesData[j] = (int16_t)rResult;
if (noEcho) {
reverbAlone[j] = (int16_t)rValue * wetFraction;
}
} else {
// ignore channels above 2
}
}
configureReverb();
}
}
@ -716,30 +650,28 @@ void AudioClient::handleLocalEchoAndReverb(QByteArray& inputByteArray) {
_loopbackResampler = new AudioSRC(_inputFormat.sampleRate(), _outputFormat.sampleRate(), channelCount);
}
static QByteArray reverbAlone; // Intermediary for local reverb with no echo
static QByteArray loopBackByteArray;
int numInputSamples = inputByteArray.size() / sizeof(int16_t);
int numLoopbackSamples = numDestinationSamplesRequired(_inputFormat, _outputFormat, numInputSamples);
reverbAlone.resize(numInputSamples * sizeof(int16_t));
loopBackByteArray.resize(numLoopbackSamples * sizeof(int16_t));
int16_t* inputSamples = reinterpret_cast<int16_t*>(inputByteArray.data());
int16_t* reverbAloneSamples = reinterpret_cast<int16_t*>(reverbAlone.data());
int16_t* loopbackSamples = reinterpret_cast<int16_t*>(loopBackByteArray.data());
if (hasReverb) {
updateGverbOptions();
addReverb(_gverb, inputSamples, reverbAloneSamples, numInputSamples,
_inputFormat, !_shouldEchoLocally);
}
possibleResampling(_loopbackResampler,
(_shouldEchoLocally) ? inputSamples : reverbAloneSamples, loopbackSamples,
inputSamples, loopbackSamples,
numInputSamples, numLoopbackSamples,
_inputFormat, _outputFormat);
// apply stereo reverb at the source, to the loopback audio
if (!_shouldEchoLocally && hasReverb) {
assert(_outputFormat.channelCount() == 2);
updateReverbOptions();
_sourceReverb.render(loopbackSamples, loopbackSamples, numLoopbackSamples/2);
}
_loopbackOutputDevice->write(loopBackByteArray);
}
@ -871,12 +803,20 @@ void AudioClient::processReceivedSamples(const QByteArray& inputBuffer, QByteArr
outputBuffer.resize(numDeviceOutputSamples * sizeof(int16_t));
const int16_t* receivedSamples = reinterpret_cast<const int16_t*>(inputBuffer.data());
int16_t* outputSamples = reinterpret_cast<int16_t*>(outputBuffer.data());
// copy the packet from the RB to the output
possibleResampling(_networkToOutputResampler, receivedSamples,
reinterpret_cast<int16_t*>(outputBuffer.data()),
possibleResampling(_networkToOutputResampler, receivedSamples, outputSamples,
numNetworkOutputSamples, numDeviceOutputSamples,
_desiredOutputFormat, _outputFormat);
// apply stereo reverb at the listener, to the received audio
bool hasReverb = _reverb || _receivedAudioStream.hasReverb();
if (hasReverb) {
assert(_outputFormat.channelCount() == 2);
updateReverbOptions();
_listenerReverb.render(outputSamples, outputSamples, numDeviceOutputSamples/2);
}
}
void AudioClient::sendMuteEnvironmentPacket() {

View file

@ -46,6 +46,7 @@
#include "AudioIOStats.h"
#include "AudioNoiseGate.h"
#include "AudioSRC.h"
#include "AudioReverb.h"
#ifdef _WIN32
#pragma warning( push )
@ -75,8 +76,6 @@ class QAudioOutput;
class QIODevice;
typedef struct ty_gverb ty_gverb;
class Transform;
class NLPacket;
@ -263,7 +262,8 @@ private:
AudioEffectOptions _scriptReverbOptions;
AudioEffectOptions _zoneReverbOptions;
AudioEffectOptions* _reverbOptions;
ty_gverb* _gverb;
AudioReverb _sourceReverb { AudioConstants::SAMPLE_RATE };
AudioReverb _listenerReverb { AudioConstants::SAMPLE_RATE };
// possible streams needed for resample
AudioSRC* _inputToNetworkResampler;
@ -271,10 +271,8 @@ private:
AudioSRC* _loopbackResampler;
// Adds Reverb
ty_gverb* createGverbFilter();
void configureGverbFilter(ty_gverb* filter);
void updateGverbOptions();
void addReverb(ty_gverb* gverb, int16_t* samples, int16_t* reverbAlone, int numSamples, QAudioFormat& format, bool noEcho = false);
void configureReverb();
void updateReverbOptions();
void handleLocalEchoAndReverb(QByteArray& inputByteArray);

View file

@ -28,10 +28,10 @@ AudioEffectOptions::AudioEffectOptions(QScriptValue arguments) :
_damping(0.5f),
_spread(15.0f),
_inputBandwidth(0.75f),
_earlyLevel(-22.0f),
_tailLevel(-28.0f),
_earlyLevel(-12.0f),
_tailLevel(-18.0f),
_dryLevel(0.0f),
_wetLevel(6.0f) {
_wetLevel(0.0f) {
if (arguments.property(MAX_ROOM_SIZE_HANDLE).isNumber()) {
_maxRoomSize = arguments.property(MAX_ROOM_SIZE_HANDLE).toNumber();
}

View file

@ -15,6 +15,8 @@
#include <QtScript/QScriptContext>
#include <QtScript/QScriptEngine>
#include "AudioReverb.h"
class AudioEffectOptions : public QObject {
Q_OBJECT

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,76 @@
//
// AudioReverb.h
// libraries/audio/src
//
// Created by Ken Cooke on 10/11/15.
// Copyright 2015 High Fidelity, Inc.
//
#ifndef hifi_AudioReverb_h
#define hifi_AudioReverb_h
#include <stdint.h>
typedef struct ReverbParameters {
float sampleRate; // [24000, 48000] Hz
float bandwidth; // [20, 24000] Hz
float preDelay; // [0, 333] ms
float lateDelay; // [0, 166] ms
float reverbTime; // [0.1, 100] seconds
float earlyDiffusion; // [0, 100] percent
float lateDiffusion; // [0, 100] percent
float roomSize; // [0, 100] percent
float density; // [0, 100] percent
float bassMult; // [0.1, 10] ratio
float bassFreq; // [10, 500] Hz
float highGain; // [-24, 0] dB
float highFreq; // [1000, 12000] Hz
float modRate; // [0.1, 10] Hz
float modDepth; // [0, 100] percent
float earlyGain; // [-96, +24] dB
float lateGain; // [-96, +24] dB
float earlyMixLeft; // [0, 100] percent
float earlyMixRight; // [0, 100] percent
float lateMixLeft; // [0, 100] percent
float lateMixRight; // [0, 100] percent
float wetDryMix; // [0, 100] percent
} ReverbParameters;
class ReverbImpl;
class AudioReverb {
public:
AudioReverb(float sampleRate);
~AudioReverb();
void setParameters(ReverbParameters *p);
void getParameters(ReverbParameters *p);
void reset();
// deinterleaved float input/output (native format)
void render(float** inputs, float** outputs, int numFrames);
// interleaved int16_t input/output
void render(const int16_t* input, int16_t* output, int numFrames);
private:
ReverbImpl *_impl;
ReverbParameters _params;
float* _inout[2];
void convertInputFromInt16(const int16_t* input, float** outputs, int numFrames);
void convertOutputToInt16(float** inputs, int16_t* output, int numFrames);
};
#endif // hifi_AudioReverb_h

View file

@ -1218,7 +1218,7 @@ static inline float dither() {
rz = rz * 69069 + 1;
int32_t r0 = rz & 0xffff;
int32_t r1 = rz >> 16;
return (r0 - r1) * (1/65536.0f);
return (int32_t)(r0 - r1) * (1/65536.0f);
}
// convert float to int16_t, interleave stereo

View file

@ -12,7 +12,7 @@
#ifndef hifi_AudioSRC_h
#define hifi_AudioSRC_h
#include "stdint.h"
#include <stdint.h>
class AudioSRC {