diff --git a/assignment-client/src/Agent.cpp b/assignment-client/src/Agent.cpp index 4755d9137a..b66226e1a5 100644 --- a/assignment-client/src/Agent.cpp +++ b/assignment-client/src/Agent.cpp @@ -47,6 +47,7 @@ Agent::Agent(const QByteArray& packet) : _scriptEngine.getEntityScriptingInterface()->setPacketSender(&_entityEditSender); DependencyManager::set(); + DependencyManager::set(); } void Agent::readPendingDatagrams() { diff --git a/assignment-client/src/AssignmentClient.cpp b/assignment-client/src/AssignmentClient.cpp index 80f3cbab5e..bf67d4d597 100644 --- a/assignment-client/src/AssignmentClient.cpp +++ b/assignment-client/src/AssignmentClient.cpp @@ -136,7 +136,6 @@ AssignmentClient::AssignmentClient(int &argc, char **argv) : // Create Singleton objects on main thread NetworkAccessManager::getInstance(); - auto soundCache = DependencyManager::get(); } void AssignmentClient::sendAssignmentRequest() { diff --git a/assignment-client/src/audio/AudioMixer.cpp b/assignment-client/src/audio/AudioMixer.cpp index b862cd0c78..e865ab0035 100644 --- a/assignment-client/src/audio/AudioMixer.cpp +++ b/assignment-client/src/audio/AudioMixer.cpp @@ -479,10 +479,20 @@ void AudioMixer::sendAudioEnvironmentPacket(SharedNodePointer node) { for (int i = 0; i < _zoneReverbSettings.size(); ++i) { AudioMixerClientData* data = static_cast(node->getLinkedData()); glm::vec3 streamPosition = data->getAvatarAudioStream()->getPosition(); - if (_audioZones[_zoneReverbSettings[i].zone].contains(streamPosition)) { + AABox box = _audioZones[_zoneReverbSettings[i].zone]; + if (box.contains(streamPosition)) { hasReverb = true; reverbTime = _zoneReverbSettings[i].reverbTime; wetLevel = _zoneReverbSettings[i].wetLevel; + + // Modulate wet level with distance to wall + float MIN_ATTENUATION_DISTANCE = 2.0f; + float MAX_ATTENUATION = -12; // dB + glm::vec3 distanceToWalls = (box.getDimensions() / 2.0f) - glm::abs(streamPosition - box.calcCenter()); + float distanceToClosestWall = glm::min(distanceToWalls.x, distanceToWalls.z); + if (distanceToClosestWall < MIN_ATTENUATION_DISTANCE) { + wetLevel += MAX_ATTENUATION * (1.0f - distanceToClosestWall / MIN_ATTENUATION_DISTANCE); + } break; } } diff --git a/cmake/externals/gverb/CMakeLists.txt b/cmake/externals/gverb/CMakeLists.txt index 972b5b602f..c62284a133 100644 --- a/cmake/externals/gverb/CMakeLists.txt +++ b/cmake/externals/gverb/CMakeLists.txt @@ -7,7 +7,7 @@ endif () include(ExternalProject) ExternalProject_Add( ${EXTERNAL_NAME} - GIT_REPOSITORY https://github.com/birarda/gverb.git + URL http://hifi-public.s3.amazonaws.com/dependencies/gverb-master.zip CMAKE_ARGS ${ANDROID_CMAKE_ARGS} -DCMAKE_INSTALL_PREFIX:PATH= LOG_DOWNLOAD ON ) diff --git a/examples/controllers/oculus/goTo.js b/examples/controllers/oculus/goTo.js index 91ae8e6141..5a5c3429ed 100644 --- a/examples/controllers/oculus/goTo.js +++ b/examples/controllers/oculus/goTo.js @@ -18,6 +18,7 @@ Script.include("../../libraries/globals.js"); Script.include("../../libraries/virtualKeyboard.js"); +Script.include("../../libraries/soundArray.js"); const MAX_SHOW_INSTRUCTION_TIMES = 2; const INSTRUCTIONS_SETTING = "GoToInstructionsShowCounter" @@ -82,6 +83,12 @@ var textFontSize = 9; var text = null; var locationURL = ""; +var randomSounds = new SoundArray({}, true); +var numberOfSounds = 7; +for (var i = 1; i <= numberOfSounds; i++) { + randomSounds.addSound(HIFI_PUBLIC_BUCKET + "sounds/UI/virtualKeyboard-press" + i + ".raw"); +} + function appendChar(char) { locationURL += char; updateTextOverlay(); @@ -107,6 +114,7 @@ function updateTextOverlay() { } keyboard.onKeyPress = function(event) { + randomSounds.playRandom(); if (event.event == 'keypress') { appendChar(event.char); } diff --git a/examples/controllers/oculus/virtualKeyboardTextEntityExample.js b/examples/controllers/oculus/virtualKeyboardTextEntityExample.js index c3cb3c6316..cf36fdbffb 100644 --- a/examples/controllers/oculus/virtualKeyboardTextEntityExample.js +++ b/examples/controllers/oculus/virtualKeyboardTextEntityExample.js @@ -17,6 +17,7 @@ Script.include("../../libraries/globals.js"); Script.include("../../libraries/virtualKeyboard.js"); +Script.include("../../libraries/soundArray.js"); const SPAWN_DISTANCE = 1; const DEFAULT_TEXT_DIMENSION_Z = 0.02; @@ -34,6 +35,12 @@ var text = null; var textText = ""; var textSizeMeasureOverlay = Overlays.addOverlay("text3d", {visible: false}); +var randomSounds = new SoundArray({}, true); +var numberOfSounds = 7; +for (var i = 1; i <= numberOfSounds; i++) { + randomSounds.addSound(HIFI_PUBLIC_BUCKET + "sounds/UI/virtualKeyboard-press" + i + ".raw"); +} + function appendChar(char) { textText += char; updateTextOverlay(); @@ -58,6 +65,7 @@ function updateTextOverlay() { } keyboard.onKeyPress = function(event) { + randomSounds.playRandom(); if (event.event == 'keypress') { appendChar(event.char); } else if (event.event == 'enter') { diff --git a/examples/libraries/soundArray.js b/examples/libraries/soundArray.js new file mode 100644 index 0000000000..813621fb4b --- /dev/null +++ b/examples/libraries/soundArray.js @@ -0,0 +1,42 @@ +/** + * An array for sounds, allows you to randomly play a sound + * taken from the removed editVoxels.js + */ +SoundArray = function(audioOptions, autoUpdateAudioPosition) { + this.audioOptions = audioOptions !== undefined ? audioOptions : {}; + this.autoUpdateAudioPosition = autoUpdateAudioPosition !== undefined ? autoUpdateAudioPosition : false; + if (this.audioOptions.position === undefined) { + this.audioOptions.position = Vec3.sum(MyAvatar.position, { x: 0, y: 1, z: 0}), + } + if (this.audioOptions.volume === undefined) { + this.audioOptions.volume = 1.0; + } + this.sounds = new Array(); + this.addSound = function (soundURL) { + this.sounds[this.sounds.length] = SoundCache.getSound(soundURL); + }; + this.play = function (index) { + if (0 <= index && index < this.sounds.length) { + if (this.autoUpdateAudioPosition) { + this.updateAudioPosition(); + } + if (this.sounds[index].downloaded) { + Audio.playSound(this.sounds[index], this.audioOptions); + } + } else { + print("[ERROR] libraries/soundArray.js:play() : Index " + index + " out of range."); + } + }; + this.playRandom = function () { + if (this.sounds.length > 0) { + this.play(Math.floor(Math.random() * this.sounds.length)); + } else { + print("[ERROR] libraries/soundArray.js:playRandom() : Array is empty."); + } + }; + this.updateAudioPosition = function() { + var position = MyAvatar.position; + var forwardVector = Quat.getFront(MyAvatar.orientation); + this.audioOptions.position = Vec3.sum(position, forwardVector); + }; +}; diff --git a/examples/notifications.js b/examples/notifications.js index 1b512634d7..287bfd0b36 100644 --- a/examples/notifications.js +++ b/examples/notifications.js @@ -1,13 +1,13 @@ -// -// notifications.js -// Version 0.801 -// Created by Adrian +// +// notifications.js +// Version 0.801 +// Created by Adrian // // Adrian McCarlie 8-10-14 // This script demonstrates on-screen overlay type notifications. // Copyright 2014 High Fidelity, Inc. // -// +// // Distributed under the Apache License, Version 2.0. // See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html @@ -20,29 +20,29 @@ // CTRL/m for mic mute and unmute. // System generated notifications: -// Displays users online at startup. +// Displays users online at startup. // If Screen is resized. // Triggers notification if @MyUserName is mentioned in chat. // Announces existing user logging out. // Announces new user logging in. // If mic is muted for any reason. -// +// // To add a new System notification type: // -// 1. Set the Event Connector at the bottom of the script. -// example: +// 1. Set the Event Connector at the bottom of the script. +// example: // GlobalServices.incomingMessage.connect(onIncomingMessage); // -// 2. Create a new function to produce a text string, do not include new line returns. +// 2. Create a new function to produce a text string, do not include new line returns. // example: // function onIncomingMessage(user, message) { -// //do stuff here; +// //do stuff here; // var text = "This is a notification"; // wordWrap(text); // } // // This new function must call wordWrap(text) if the length of message is longer than 42 chars or unknown. -// wordWrap() will format the text to fit the notifications overlay and send it to createNotification(text). +// wordWrap() will format the text to fit the notifications overlay and send it to createNotification(text). // If the message is 42 chars or less you should bypass wordWrap() and call createNotification() directly. @@ -57,6 +57,8 @@ // var welcome = "There are " + GlobalServices.onlineUsers.length + " users online now."; // createNotification(welcome); // } +Script.include("./libraries/globals.js"); +Script.include("./libraries/soundArray.js"); var width = 340.0; //width of notification overlay var windowDimensions = Controller.getViewportDimensions(); // get the size of the interface window @@ -66,7 +68,7 @@ var locationY = 20.0; // position down from top of interface window var topMargin = 13.0; var leftMargin = 10.0; var textColor = { red: 228, green: 228, blue: 228}; // text color -var backColor = { red: 2, green: 2, blue: 2}; // background color was 38,38,38 +var backColor = { red: 2, green: 2, blue: 2}; // background color was 38,38,38 var backgroundAlpha = 0; var fontSize = 12.0; var PERSIST_TIME_2D = 10.0; // Time in seconds before notification fades @@ -81,6 +83,22 @@ var last_users = GlobalServices.onlineUsers; var users = []; var ctrlIsPressed = false; var ready = true; + +var randomSounds = new SoundArray({}, true); +var numberOfSounds = 2; +for (var i = 1; i <= numberOfSounds; i++) { + randomSounds.addSound(HIFI_PUBLIC_BUCKET + "sounds/UI/notification-general" + i + ".raw"); +} + +// When our script shuts down, we should clean up all of our overlays +function scriptEnding() { + for (i = 0; i < notifications.length; i++) { + Overlays.deleteOverlay(notifications[i]); + Overlays.deleteOverlay(buttons[i]); + } +} +Script.scriptEnding.connect(scriptEnding); + var notifications = []; var buttons = []; var times = []; @@ -193,6 +211,8 @@ function notify(notice, button, height) { positions, last; + randomSounds.playRandom(); + if (isOnHMD) { // Calculate 3D values from 2D overlay properties. @@ -454,7 +474,6 @@ function onOnlineUsersChanged(users) { if (last_users.indexOf(users[i]) === -1.0) { createNotification(users[i] + " has joined"); } - } for (i = 0; i < last_users.length; i += 1) { diff --git a/libraries/audio-client/src/AudioClient.cpp b/libraries/audio-client/src/AudioClient.cpp index b427d5ba42..e1410d3304 100644 --- a/libraries/audio-client/src/AudioClient.cpp +++ b/libraries/audio-client/src/AudioClient.cpp @@ -109,7 +109,6 @@ AudioClient::AudioClient() : _audioSourceInjectEnabled(false), _reverb(false), _reverbOptions(&_scriptReverbOptions), - _gverbLocal(NULL), _gverb(NULL), _inputToNetworkResampler(NULL), _networkToOutputResampler(NULL), @@ -126,26 +125,23 @@ AudioClient::AudioClient() : connect(&_receivedAudioStream, &MixedProcessedAudioStream::processSamples, this, &AudioClient::processReceivedSamples, Qt::DirectConnection); - - // Initialize GVerb - initGverb(); - - const qint64 DEVICE_CHECK_INTERVAL_MSECS = 2 * 1000; _inputDevices = getDeviceNames(QAudio::AudioInput); _outputDevices = getDeviceNames(QAudio::AudioOutput); + const qint64 DEVICE_CHECK_INTERVAL_MSECS = 2 * 1000; QTimer* updateTimer = new QTimer(this); connect(updateTimer, &QTimer::timeout, this, &AudioClient::checkDevices); updateTimer->start(DEVICE_CHECK_INTERVAL_MSECS); + + // create GVerb filter + _gverb = createGverbFilter(); + configureGverbFilter(_gverb); } AudioClient::~AudioClient() { stop(); - if (_gverbLocal) { - gverb_free(_gverbLocal); - } if (_gverb) { gverb_free(_gverb); } @@ -158,6 +154,8 @@ void AudioClient::reset() { _toneSource.reset(); _sourceGain.reset(); _inputGain.reset(); + + gverb_flush(_gverb); } void AudioClient::audioMixerKilled() { @@ -491,8 +489,8 @@ void AudioClient::start() { _sourceGain.initialize(); _noiseSource.initialize(); _toneSource.initialize(); - _sourceGain.setParameters(0.25f,0.0f); - _inputGain.setParameters(1.0f,0.0f); + _sourceGain.setParameters(0.25f, 0.0f); + _inputGain.setParameters(1.0f, 0.0f); } void AudioClient::stop() { @@ -535,38 +533,24 @@ bool AudioClient::switchOutputToAudioDevice(const QString& outputDeviceName) { return switchOutputToAudioDevice(getNamedAudioDeviceForMode(QAudio::AudioOutput, outputDeviceName)); } -void AudioClient::initGverb() { +ty_gverb* AudioClient::createGverbFilter() { // Initialize a new gverb instance - if (_gverbLocal) { - gverb_free(_gverbLocal); - } - _gverbLocal = gverb_new(_outputFormat.sampleRate(), _reverbOptions->getMaxRoomSize(), _reverbOptions->getRoomSize(), - _reverbOptions->getReverbTime(), _reverbOptions->getDamping(), _reverbOptions->getSpread(), - _reverbOptions->getInputBandwidth(), _reverbOptions->getEarlyLevel(), - _reverbOptions->getTailLevel()); - - if (_gverb) { - gverb_free(_gverb); - } - _gverb = gverb_new(_outputFormat.sampleRate(), _reverbOptions->getMaxRoomSize(), _reverbOptions->getRoomSize(), - _reverbOptions->getReverbTime(), _reverbOptions->getDamping(), _reverbOptions->getSpread(), - _reverbOptions->getInputBandwidth(), _reverbOptions->getEarlyLevel(), - _reverbOptions->getTailLevel()); + ty_gverb* filter = gverb_new(_outputFormat.sampleRate(), _reverbOptions->getMaxRoomSize(), _reverbOptions->getRoomSize(), + _reverbOptions->getReverbTime(), _reverbOptions->getDamping(), _reverbOptions->getSpread(), + _reverbOptions->getInputBandwidth(), _reverbOptions->getEarlyLevel(), + _reverbOptions->getTailLevel()); + return filter; +} + +void AudioClient::configureGverbFilter(ty_gverb* filter) { // Configure the instance (these functions are not super well named - they actually set several internal variables) - gverb_set_roomsize(_gverbLocal, _reverbOptions->getRoomSize()); - gverb_set_revtime(_gverbLocal, _reverbOptions->getReverbTime()); - gverb_set_damping(_gverbLocal, _reverbOptions->getDamping()); - gverb_set_inputbandwidth(_gverbLocal, _reverbOptions->getInputBandwidth()); - gverb_set_earlylevel(_gverbLocal, DB_CO(_reverbOptions->getEarlyLevel())); - gverb_set_taillevel(_gverbLocal, DB_CO(_reverbOptions->getTailLevel())); - - gverb_set_roomsize(_gverb, _reverbOptions->getRoomSize()); - gverb_set_revtime(_gverb, _reverbOptions->getReverbTime()); - gverb_set_damping(_gverb, _reverbOptions->getDamping()); - gverb_set_inputbandwidth(_gverb, _reverbOptions->getInputBandwidth()); - gverb_set_earlylevel(_gverb, DB_CO(_reverbOptions->getEarlyLevel())); - gverb_set_taillevel(_gverb, DB_CO(_reverbOptions->getTailLevel())); + gverb_set_roomsize(filter, _reverbOptions->getRoomSize()); + gverb_set_revtime(filter, _reverbOptions->getReverbTime()); + gverb_set_damping(filter, _reverbOptions->getDamping()); + gverb_set_inputbandwidth(filter, _reverbOptions->getInputBandwidth()); + gverb_set_earlylevel(filter, DB_CO(_reverbOptions->getEarlyLevel())); + gverb_set_taillevel(filter, DB_CO(_reverbOptions->getTailLevel())); } void AudioClient::updateGverbOptions() { @@ -579,7 +563,7 @@ void AudioClient::updateGverbOptions() { } if (_zoneReverbOptions.getWetLevel() != _receivedAudioStream.getWetLevel()) { _zoneReverbOptions.setWetLevel(_receivedAudioStream.getWetLevel()); - reverbChanged = true; + // Not part of actual filter config, no need to set reverbChanged to true } if (_reverbOptions != &_zoneReverbOptions) { @@ -592,7 +576,17 @@ void AudioClient::updateGverbOptions() { } if (reverbChanged) { - initGverb(); + gverb_free(_gverb); + _gverb = createGverbFilter(); + configureGverbFilter(_gverb); + } +} + +void AudioClient::setReverb(bool reverb) { + _reverb = reverb; + + if (!_reverb) { + gverb_flush(_gverb); } } @@ -611,14 +605,17 @@ void AudioClient::setReverbOptions(const AudioEffectOptions* options) { _scriptReverbOptions.setWetLevel(options->getWetLevel()); if (_reverbOptions == &_scriptReverbOptions) { - // Apply them to the reverb instance(s) - initGverb(); + // Apply them to the reverb instances + gverb_free(_gverb); + _gverb = createGverbFilter(); + configureGverbFilter(_gverb); } } -void AudioClient::addReverb(ty_gverb* gverb, int16_t* samplesData, int numSamples, QAudioFormat& audioFormat, bool noEcho) { +void AudioClient::addReverb(ty_gverb* gverb, int16_t* samplesData, int16_t* reverbAlone, int numSamples, + QAudioFormat& audioFormat, bool noEcho) { float wetFraction = DB_CO(_reverbOptions->getWetLevel()); - float dryFraction = (noEcho) ? 0.0f : (1.0f - wetFraction); + float dryFraction = 1.0f - wetFraction; float lValue,rValue; for (int sample = 0; sample < numSamples; sample += audioFormat.channelCount()) { @@ -633,11 +630,19 @@ void AudioClient::addReverb(ty_gverb* gverb, int16_t* samplesData, int numSample int lResult = glm::clamp((int)(samplesData[j] * dryFraction + lValue * wetFraction), AudioConstants::MIN_SAMPLE_VALUE, AudioConstants::MAX_SAMPLE_VALUE); samplesData[j] = (int16_t)lResult; + + if (noEcho) { + reverbAlone[j] = (int16_t)lValue * wetFraction; + } } else if (j == (sample + 1)) { // right channel int rResult = glm::clamp((int)(samplesData[j] * dryFraction + rValue * wetFraction), AudioConstants::MIN_SAMPLE_VALUE, AudioConstants::MAX_SAMPLE_VALUE); samplesData[j] = (int16_t)rResult; + + if (noEcho) { + reverbAlone[j] = (int16_t)rValue * wetFraction; + } } else { // ignore channels above 2 } @@ -647,9 +652,8 @@ void AudioClient::addReverb(ty_gverb* gverb, int16_t* samplesData, int numSample void AudioClient::handleLocalEchoAndReverb(QByteArray& inputByteArray) { // If there is server echo, reverb will be applied to the recieved audio stream so no need to have it here. - bool hasLocalReverb = (_reverb || _receivedAudioStream.hasReverb()) && - !_shouldEchoToServer; - if (_muted || !_audioOutput || (!_shouldEchoLocally && !hasLocalReverb)) { + bool hasReverb = _reverb || _receivedAudioStream.hasReverb(); + if (_muted || !_audioOutput || (!_shouldEchoLocally && !hasReverb)) { return; } @@ -659,6 +663,10 @@ void AudioClient::handleLocalEchoAndReverb(QByteArray& inputByteArray) { if (!_loopbackOutputDevice && _loopbackAudioOutput) { // we didn't have the loopback output device going so set that up now _loopbackOutputDevice = _loopbackAudioOutput->start(); + + if (!_loopbackOutputDevice) { + return; + } } // do we need to setup a resampler? @@ -671,26 +679,31 @@ void AudioClient::handleLocalEchoAndReverb(QByteArray& inputByteArray) { } } + static QByteArray reverbAlone; // Intermediary for local reverb with no echo static QByteArray loopBackByteArray; - loopBackByteArray.resize(numDestinationSamplesRequired(_inputFormat, _outputFormat, - inputByteArray.size() / sizeof(int16_t)) * sizeof(int16_t)); + + int numInputSamples = inputByteArray.size() / sizeof(int16_t); + int numLoopbackSamples = numDestinationSamplesRequired(_inputFormat, _outputFormat, numInputSamples); + + reverbAlone.resize(numInputSamples * sizeof(int16_t)); + loopBackByteArray.resize(numLoopbackSamples * sizeof(int16_t)); + + int16_t* inputSamples = reinterpret_cast(inputByteArray.data()); + int16_t* reverbAloneSamples = reinterpret_cast(reverbAlone.data()); + int16_t* loopbackSamples = reinterpret_cast(loopBackByteArray.data()); + + if (hasReverb) { + updateGverbOptions(); + addReverb(_gverb, inputSamples, reverbAloneSamples, numInputSamples, + _inputFormat, !_shouldEchoLocally); + } possibleResampling(_loopbackResampler, - reinterpret_cast(inputByteArray.data()), - reinterpret_cast(loopBackByteArray.data()), - inputByteArray.size() / sizeof(int16_t), loopBackByteArray.size() / sizeof(int16_t), + (_shouldEchoLocally) ? inputSamples : reverbAloneSamples, loopbackSamples, + numInputSamples, numLoopbackSamples, _inputFormat, _outputFormat); - if (hasLocalReverb) { - int16_t* loopbackSamples = reinterpret_cast(loopBackByteArray.data()); - int numLoopbackSamples = loopBackByteArray.size() / sizeof(int16_t); - updateGverbOptions(); - addReverb(_gverbLocal, loopbackSamples, numLoopbackSamples, _outputFormat, !_shouldEchoLocally); - } - - if (_loopbackOutputDevice) { - _loopbackOutputDevice->write(loopBackByteArray); - } + _loopbackOutputDevice->write(loopBackByteArray); } void AudioClient::handleAudioInput() { @@ -884,11 +897,6 @@ void AudioClient::processReceivedSamples(const QByteArray& inputBuffer, QByteArr reinterpret_cast(outputBuffer.data()), numNetworkOutputSamples, numDeviceOutputSamples, _desiredOutputFormat, _outputFormat); - - if(_reverb || _receivedAudioStream.hasReverb()) { - updateGverbOptions(); - addReverb(_gverb, (int16_t*)outputBuffer.data(), numDeviceOutputSamples, _outputFormat); - } } void AudioClient::sendMuteEnvironmentPacket() { diff --git a/libraries/audio-client/src/AudioClient.h b/libraries/audio-client/src/AudioClient.h index 0095a885eb..f75843df5f 100644 --- a/libraries/audio-client/src/AudioClient.h +++ b/libraries/audio-client/src/AudioClient.h @@ -166,7 +166,7 @@ public slots: float getInputVolume() const { return (_audioInput) ? _audioInput->volume() : 0.0f; } void setInputVolume(float volume) { if (_audioInput) _audioInput->setVolume(volume); } - void setReverb(bool reverb) { _reverb = reverb; } + void setReverb(bool reverb); void setReverbOptions(const AudioEffectOptions* options); void outputNotify(); @@ -241,7 +241,6 @@ private: AudioEffectOptions _scriptReverbOptions; AudioEffectOptions _zoneReverbOptions; AudioEffectOptions* _reverbOptions; - ty_gverb* _gverbLocal; ty_gverb* _gverb; // possible soxr streams needed for resample @@ -250,9 +249,10 @@ private: soxr* _loopbackResampler; // Adds Reverb - void initGverb(); + ty_gverb* createGverbFilter(); + void configureGverbFilter(ty_gverb* filter); void updateGverbOptions(); - void addReverb(ty_gverb* gverb, int16_t* samples, int numSamples, QAudioFormat& format, bool noEcho = false); + void addReverb(ty_gverb* gverb, int16_t* samples, int16_t* reverbAlone, int numSamples, QAudioFormat& format, bool noEcho = false); void handleLocalEchoAndReverb(QByteArray& inputByteArray);