diff --git a/examples/audioReflectorTools.js b/examples/audioReflectorTools.js index 3cc6a1a21e..76869de578 100644 --- a/examples/audioReflectorTools.js +++ b/examples/audioReflectorTools.js @@ -19,6 +19,8 @@ var reflectiveScale = 100.0; var diffusionScale = 100.0; var absorptionScale = 100.0; var combFilterScale = 50.0; +var originalScale = 2.0; +var echoesScale = 2.0; // these three properties are bound together, if you change one, the others will also change var reflectiveRatio = AudioReflector.getReflectiveRatio(); @@ -421,6 +423,84 @@ var absorptionThumb = Overlays.addOverlay("image", { alpha: 1 }); +var originalY = topY; +topY += sliderHeight; + +var originalLabel = Overlays.addOverlay("text", { + x: 40, + y: originalY, + width: 60, + height: sliderHeight, + color: { red: 0, green: 0, blue: 0}, + textColor: { red: 255, green: 255, blue: 255}, + topMargin: 6, + leftMargin: 5, + text: "Original\nMix:" + }); + + +var originalSlider = Overlays.addOverlay("image", { + // alternate form of expressing bounds + bounds: { x: 100, y: originalY, width: 150, height: sliderHeight}, + subImage: { x: 46, y: 0, width: 200, height: 71 }, + imageURL: "https://s3-us-west-1.amazonaws.com/highfidelity-public/images/slider.png", + color: { red: 255, green: 255, blue: 255}, + alpha: 1 + }); + + +var originalMinThumbX = 110; +var originalMaxThumbX = originalMinThumbX + 110; +var originalThumbX = originalMinThumbX + ((originalMaxThumbX - originalMinThumbX) * (AudioReflector.getOriginalSourceAttenuation() / originalScale)); +var originalThumb = Overlays.addOverlay("image", { + x: originalThumbX, + y: originalY+9, + width: 18, + height: 17, + imageURL: "https://s3-us-west-1.amazonaws.com/highfidelity-public/images/thumb.png", + color: { red: 128, green: 128, blue: 0}, + alpha: 1 + }); + +var echoesY = topY; +topY += sliderHeight; + +var echoesLabel = Overlays.addOverlay("text", { + x: 40, + y: echoesY, + width: 60, + height: sliderHeight, + color: { red: 0, green: 0, blue: 0}, + textColor: { red: 255, green: 255, blue: 255}, + topMargin: 6, + leftMargin: 5, + text: "Echoes\nMix:" + }); + + +var echoesSlider = Overlays.addOverlay("image", { + // alternate form of expressing bounds + bounds: { x: 100, y: echoesY, width: 150, height: sliderHeight}, + subImage: { x: 46, y: 0, width: 200, height: 71 }, + imageURL: "https://s3-us-west-1.amazonaws.com/highfidelity-public/images/slider.png", + color: { red: 255, green: 255, blue: 255}, + alpha: 1 + }); + + +var echoesMinThumbX = 110; +var echoesMaxThumbX = echoesMinThumbX + 110; +var echoesThumbX = echoesMinThumbX + ((echoesMaxThumbX - echoesMinThumbX) * (AudioReflector.getEchoesAttenuation() / echoesScale)); +var echoesThumb = Overlays.addOverlay("image", { + x: echoesThumbX, + y: echoesY+9, + width: 18, + height: 17, + imageURL: "https://s3-us-west-1.amazonaws.com/highfidelity-public/images/thumb.png", + color: { red: 128, green: 128, blue: 0}, + alpha: 1 + }); + // When our script shuts down, we should clean up all of our overlays function scriptEnding() { @@ -460,6 +540,14 @@ function scriptEnding() { Overlays.deleteOverlay(absorptionThumb); Overlays.deleteOverlay(absorptionSlider); + Overlays.deleteOverlay(echoesLabel); + Overlays.deleteOverlay(echoesThumb); + Overlays.deleteOverlay(echoesSlider); + + Overlays.deleteOverlay(originalLabel); + Overlays.deleteOverlay(originalThumb); + Overlays.deleteOverlay(originalSlider); + } Script.scriptEnding.connect(scriptEnding); @@ -483,6 +571,8 @@ var movingSliderLocalFactor = false; var movingSliderReflective = false; var movingSliderDiffusion = false; var movingSliderAbsorption = false; +var movingSliderOriginal = false; +var movingSliderEchoes = false; var thumbClickOffsetX = 0; function mouseMoveEvent(event) { @@ -546,7 +636,6 @@ function mouseMoveEvent(event) { var combFilter = ((newThumbX - combFilterMinThumbX) / (combFilterMaxThumbX - combFilterMinThumbX)) * combFilterScale; AudioReflector.setCombFilterWindow(combFilter); } - if (movingSliderLocalFactor) { newThumbX = event.x - thumbClickOffsetX; if (newThumbX < localFactorMinThumbX) { @@ -598,6 +687,30 @@ function mouseMoveEvent(event) { var diffusion = ((newThumbX - diffusionMinThumbX) / (diffusionMaxThumbX - diffusionMinThumbX)) * diffusionScale; setDiffusionRatio(diffusion); } + if (movingSliderEchoes) { + newThumbX = event.x - thumbClickOffsetX; + if (newThumbX < echoesMinThumbX) { + newThumbX = echoesMminThumbX; + } + if (newThumbX > echoesMaxThumbX) { + newThumbX = echoesMaxThumbX; + } + Overlays.editOverlay(echoesThumb, { x: newThumbX } ); + var echoes = ((newThumbX - echoesMinThumbX) / (echoesMaxThumbX - echoesMinThumbX)) * echoesScale; + AudioReflector.setEchoesAttenuation(echoes); + } + if (movingSliderOriginal) { + newThumbX = event.x - thumbClickOffsetX; + if (newThumbX < originalMinThumbX) { + newThumbX = originalMminThumbX; + } + if (newThumbX > originalMaxThumbX) { + newThumbX = originalMaxThumbX; + } + Overlays.editOverlay(originalThumb, { x: newThumbX } ); + var original = ((newThumbX - originalMinThumbX) / (originalMaxThumbX - originalMinThumbX)) * originalScale; + AudioReflector.setOriginalSourceAttenuation(original); + } } @@ -640,7 +753,16 @@ function mousePressEvent(event) { movingSliderReflective = true; thumbClickOffsetX = event.x - reflectiveThumbX; } + if (clickedOverlay == originalThumb) { + movingSliderOriginal = true; + thumbClickOffsetX = event.x - originalThumbX; + } + if (clickedOverlay == echoesThumb) { + movingSliderEchoes = true; + thumbClickOffsetX = event.x - echoesThumbX; + } } + function mouseReleaseEvent(event) { if (movingSliderDelay) { movingSliderDelay = false; @@ -672,14 +794,12 @@ function mouseReleaseEvent(event) { AudioReflector.setCombFilterWindow(combFilter); combFilterThumbX = newThumbX; } - if (movingSliderLocalFactor) { movingSliderLocalFactor = false; var localFactor = ((newThumbX - localFactorMinThumbX) / (localFactorMaxThumbX - localFactorMinThumbX)) * localFactorScale; AudioReflector.setLocalAudioAttenuationFactor(localFactor); localFactorThumbX = newThumbX; } - if (movingSliderReflective) { movingSliderReflective = false; var reflective = ((newThumbX - reflectiveMinThumbX) / (reflectiveMaxThumbX - reflectiveMinThumbX)) * reflectiveScale; @@ -687,7 +807,6 @@ function mouseReleaseEvent(event) { reflectiveThumbX = newThumbX; updateRatioSliders(); } - if (movingSliderDiffusion) { movingSliderDiffusion = false; var diffusion = ((newThumbX - diffusionMinThumbX) / (diffusionMaxThumbX - diffusionMinThumbX)) * diffusionScale; @@ -695,7 +814,6 @@ function mouseReleaseEvent(event) { diffusionThumbX = newThumbX; updateRatioSliders(); } - if (movingSliderAbsorption) { movingSliderAbsorption = false; var absorption = ((newThumbX - absorptionMinThumbX) / (absorptionMaxThumbX - absorptionMinThumbX)) * absorptionScale; @@ -703,6 +821,18 @@ function mouseReleaseEvent(event) { absorptionThumbX = newThumbX; updateRatioSliders(); } + if (movingSliderEchoes) { + movingSliderEchoes = false; + var echoes = ((newThumbX - echoesMinThumbX) / (echoesMaxThumbX - echoesMinThumbX)) * echoesScale; + AudioReflector.setEchoesAttenuation(echoes); + echoesThumbX = newThumbX; + } + if (movingSliderOriginal) { + movingSliderOriginal = false; + var original = ((newThumbX - originalMinThumbX) / (originalMaxThumbX - originalMinThumbX)) * originalScale; + AudioReflector.setOriginalSourceAttenuation(original); + originalThumbX = newThumbX; + } } Controller.mouseMoveEvent.connect(mouseMoveEvent); diff --git a/interface/src/Application.cpp b/interface/src/Application.cpp index bd7a82b439..7480b16334 100644 --- a/interface/src/Application.cpp +++ b/interface/src/Application.cpp @@ -1679,8 +1679,12 @@ void Application::init() { _audioReflector.setMyAvatar(getAvatar()); _audioReflector.setVoxels(_voxels.getTree()); _audioReflector.setAudio(getAudio()); + _audioReflector.setAvatarManager(&_avatarManager); + connect(getAudio(), &Audio::processInboundAudio, &_audioReflector, &AudioReflector::processInboundAudio,Qt::DirectConnection); connect(getAudio(), &Audio::processLocalAudio, &_audioReflector, &AudioReflector::processLocalAudio,Qt::DirectConnection); + connect(getAudio(), &Audio::preProcessOriginalInboundAudio, &_audioReflector, + &AudioReflector::preProcessOriginalInboundAudio,Qt::DirectConnection); // save settings when avatar changes connect(_myAvatar, &MyAvatar::transformChanged, this, &Application::bumpSettings); diff --git a/interface/src/Audio.cpp b/interface/src/Audio.cpp index daa7c036eb..830e2fe69b 100644 --- a/interface/src/Audio.cpp +++ b/interface/src/Audio.cpp @@ -784,6 +784,7 @@ void Audio::processReceivedAudio(const QByteArray& audioByteArray) { _ringBuffer.readSamples((int16_t*)buffer.data(), numNetworkOutputSamples); // Accumulate direct transmission of audio from sender to receiver if (Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingIncludeOriginal)) { + emit preProcessOriginalInboundAudio(sampleTime, buffer, _desiredOutputFormat); addSpatialAudioToBuffer(sampleTime, buffer, numNetworkOutputSamples); } diff --git a/interface/src/Audio.h b/interface/src/Audio.h index 3b19d98146..96def43dd2 100644 --- a/interface/src/Audio.h +++ b/interface/src/Audio.h @@ -99,6 +99,7 @@ public slots: signals: bool muteToggled(); + void preProcessOriginalInboundAudio(unsigned int sampleTime, QByteArray& samples, const QAudioFormat& format); void processInboundAudio(unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format); void processLocalAudio(unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format); diff --git a/interface/src/AudioReflector.cpp b/interface/src/AudioReflector.cpp index 52558f1d59..67b6120354 100644 --- a/interface/src/AudioReflector.cpp +++ b/interface/src/AudioReflector.cpp @@ -25,6 +25,8 @@ const float SLIGHTLY_SHORT = 0.999f; // slightly inside the distance so we're on const float DEFAULT_ABSORPTION_RATIO = 0.125; // 12.5% is absorbed const float DEFAULT_DIFFUSION_RATIO = 0.125; // 12.5% is diffused +const float DEFAULT_ORIGINAL_ATTENUATION = 1.0f; +const float DEFAULT_ECHO_ATTENUATION = 1.0f; AudioReflector::AudioReflector(QObject* parent) : QObject(parent), @@ -36,6 +38,8 @@ AudioReflector::AudioReflector(QObject* parent) : _diffusionFanout(DEFAULT_DIFFUSION_FANOUT), _absorptionRatio(DEFAULT_ABSORPTION_RATIO), _diffusionRatio(DEFAULT_DIFFUSION_RATIO), + _originalSourceAttenuation(DEFAULT_ORIGINAL_ATTENUATION), + _allEchoesAttenuation(DEFAULT_ECHO_ATTENUATION), _withDiffusion(false), _lastPreDelay(DEFAULT_PRE_DELAY), _lastSoundMsPerMeter(DEFAULT_MS_DELAY_PER_METER), @@ -43,20 +47,29 @@ AudioReflector::AudioReflector(QObject* parent) : _lastLocalAudioAttenuationFactor(DEFAULT_LOCAL_ATTENUATION_FACTOR), _lastDiffusionFanout(DEFAULT_DIFFUSION_FANOUT), _lastAbsorptionRatio(DEFAULT_ABSORPTION_RATIO), - _lastDiffusionRatio(DEFAULT_DIFFUSION_RATIO) + _lastDiffusionRatio(DEFAULT_DIFFUSION_RATIO), + _lastDontDistanceAttenuate(false), + _lastAlternateDistanceAttenuate(false) { _reflections = 0; _diffusionPathCount = 0; - _averageAttenuation = 0.0f; - _maxAttenuation = 0.0f; - _minAttenuation = 0.0f; - _averageDelay = 0; - _maxDelay = 0; - _minDelay = 0; + _averageAttenuationOfficial = _averageAttenuation = 0.0f; + _maxAttenuationOfficial = _maxAttenuation = 0.0f; + _minAttenuationOfficial = _minAttenuation = 0.0f; + _averageDelayOfficial = _averageDelay = 0; + _maxDelayOfficial = _maxDelay = 0; + _minDelayOfficial = _minDelay = 0; + _inboundEchoesCount = 0; + _inboundEchoesSuppressedCount = 0; + _localEchoesCount = 0; + _localEchoesSuppressedCount = 0; } bool AudioReflector::haveAttributesChanged() { bool withDiffusion = Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingWithDiffusions); + bool dontDistanceAttenuate = Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingDontDistanceAttenuate); + bool alternateDistanceAttenuate = Menu::getInstance()->isOptionChecked( + MenuOption::AudioSpatialProcessingAlternateDistanceAttenuate); bool attributesChange = (_withDiffusion != withDiffusion || _lastPreDelay != _preDelay @@ -64,7 +77,9 @@ bool AudioReflector::haveAttributesChanged() { || _lastDistanceAttenuationScalingFactor != _distanceAttenuationScalingFactor || _lastDiffusionFanout != _diffusionFanout || _lastAbsorptionRatio != _absorptionRatio - || _lastDiffusionRatio != _diffusionRatio); + || _lastDiffusionRatio != _diffusionRatio + || _lastDontDistanceAttenuate != dontDistanceAttenuate + || _lastAlternateDistanceAttenuate != alternateDistanceAttenuate); if (attributesChange) { _withDiffusion = withDiffusion; @@ -74,6 +89,8 @@ bool AudioReflector::haveAttributesChanged() { _lastDiffusionFanout = _diffusionFanout; _lastAbsorptionRatio = _absorptionRatio; _lastDiffusionRatio = _diffusionRatio; + _lastDontDistanceAttenuate = dontDistanceAttenuate; + _lastAlternateDistanceAttenuate = alternateDistanceAttenuate; } return attributesChange; @@ -107,19 +124,47 @@ float AudioReflector::getDelayFromDistance(float distance) { // attenuation = from the Audio Mixer float AudioReflector::getDistanceAttenuationCoefficient(float distance) { - const float DISTANCE_SCALE = 2.5f; - const float GEOMETRIC_AMPLITUDE_SCALAR = 0.3f; - const float DISTANCE_LOG_BASE = 2.5f; - const float DISTANCE_SCALE_LOG = logf(DISTANCE_SCALE) / logf(DISTANCE_LOG_BASE); + + + bool doDistanceAttenuation = !Menu::getInstance()->isOptionChecked( + MenuOption::AudioSpatialProcessingDontDistanceAttenuate); + + bool originalFormula = !Menu::getInstance()->isOptionChecked( + MenuOption::AudioSpatialProcessingAlternateDistanceAttenuate); - float distanceSquareToSource = distance * distance; + + float distanceCoefficient = 1.0f; + + if (doDistanceAttenuation) { + + if (originalFormula) { + const float DISTANCE_SCALE = 2.5f; + const float GEOMETRIC_AMPLITUDE_SCALAR = 0.3f; + const float DISTANCE_LOG_BASE = 2.5f; + const float DISTANCE_SCALE_LOG = logf(DISTANCE_SCALE) / logf(DISTANCE_LOG_BASE); + + float distanceSquareToSource = distance * distance; - // calculate the distance coefficient using the distance to this node - float distanceCoefficient = powf(GEOMETRIC_AMPLITUDE_SCALAR, - DISTANCE_SCALE_LOG + - (0.5f * logf(distanceSquareToSource) / logf(DISTANCE_LOG_BASE)) - 1); - - distanceCoefficient = std::min(1.0f, distanceCoefficient * getDistanceAttenuationScalingFactor()); + // calculate the distance coefficient using the distance to this node + distanceCoefficient = powf(GEOMETRIC_AMPLITUDE_SCALAR, + DISTANCE_SCALE_LOG + + (0.5f * logf(distanceSquareToSource) / logf(DISTANCE_LOG_BASE)) - 1); + distanceCoefficient = std::min(1.0f, distanceCoefficient * getDistanceAttenuationScalingFactor()); + } else { + + // From Fred: If we wanted something that would produce a tail that could go up to 5 seconds in a + // really big room, that would suggest the sound still has to be in the audible after traveling about + // 1500 meters. If it’s a sound of average volume, we probably have about 30 db, or 5 base2 orders + // of magnitude we can drop down before the sound becomes inaudible. (That’s approximate headroom + // based on a few sloppy assumptions.) So we could try a factor like 1 / (2^(D/300)) for starters. + // 1 / (2^(D/300)) + const float DISTANCE_BASE = 2.0f; + const float DISTANCE_DENOMINATOR = 300.0f; + const float DISTANCE_NUMERATOR = 300.0f; + distanceCoefficient = DISTANCE_NUMERATOR / powf(DISTANCE_BASE, (distance / DISTANCE_DENOMINATOR )); + distanceCoefficient = std::min(1.0f, distanceCoefficient * getDistanceAttenuationScalingFactor()); + } + } return distanceCoefficient; } @@ -236,11 +281,13 @@ void AudioReflector::injectAudiblePoint(AudioSource source, const AudiblePoint& rightSample = originalSamplesData[(sample * NUMBER_OF_CHANNELS) + 1]; } - attenuatedLeftSamplesData[sample * NUMBER_OF_CHANNELS] = leftSample * leftEarAttenuation; + attenuatedLeftSamplesData[sample * NUMBER_OF_CHANNELS] = + leftSample * leftEarAttenuation * _allEchoesAttenuation; attenuatedLeftSamplesData[sample * NUMBER_OF_CHANNELS + 1] = 0; attenuatedRightSamplesData[sample * NUMBER_OF_CHANNELS] = 0; - attenuatedRightSamplesData[sample * NUMBER_OF_CHANNELS + 1] = rightSample * rightEarAttenuation; + attenuatedRightSamplesData[sample * NUMBER_OF_CHANNELS + 1] = + rightSample * rightEarAttenuation * _allEchoesAttenuation; } // now inject the attenuated array with the appropriate delay @@ -249,9 +296,25 @@ void AudioReflector::injectAudiblePoint(AudioSource source, const AudiblePoint& _audio->addSpatialAudioToBuffer(sampleTimeLeft, attenuatedLeftSamples, totalNumberOfSamples); _audio->addSpatialAudioToBuffer(sampleTimeRight, attenuatedRightSamples, totalNumberOfSamples); + + _injectedEchoes++; } } + +void AudioReflector::preProcessOriginalInboundAudio(unsigned int sampleTime, + QByteArray& samples, const QAudioFormat& format) { + + if (_originalSourceAttenuation != 1.0f) { + int numberOfSamples = (samples.size() / sizeof(int16_t)); + int16_t* sampleData = (int16_t*)samples.data(); + for (int i = 0; i < numberOfSamples; i++) { + sampleData[i] = sampleData[i] * _originalSourceAttenuation; + } + } + +} + void AudioReflector::processLocalAudio(unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format) { if (Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingProcessLocalAudio)) { const int NUM_CHANNELS_INPUT = 1; @@ -272,6 +335,8 @@ void AudioReflector::processLocalAudio(unsigned int sampleTime, const QByteArray _localAudioDelays.clear(); _localEchoesSuppressed.clear(); echoAudio(LOCAL_AUDIO, sampleTime, stereoInputData, outputFormat); + _localEchoesCount = _localAudioDelays.size(); + _localEchoesSuppressedCount = _localEchoesSuppressed.size(); } } } @@ -280,9 +345,13 @@ void AudioReflector::processInboundAudio(unsigned int sampleTime, const QByteArr _inboundAudioDelays.clear(); _inboundEchoesSuppressed.clear(); echoAudio(INBOUND_AUDIO, sampleTime, samples, format); + _inboundEchoesCount = _inboundAudioDelays.size(); + _inboundEchoesSuppressedCount = _inboundEchoesSuppressed.size(); } void AudioReflector::echoAudio(AudioSource source, unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format) { + QMutexLocker locker(&_mutex); + _maxDelay = 0; _maxAttenuation = 0.0f; _minDelay = std::numeric_limits::max(); @@ -292,14 +361,20 @@ void AudioReflector::echoAudio(AudioSource source, unsigned int sampleTime, cons _totalAttenuation = 0.0f; _attenuationCount = 0; - QMutexLocker locker(&_mutex); - // depending on if we're processing local or external audio, pick the correct points vector QVector& audiblePoints = source == INBOUND_AUDIO ? _inboundAudiblePoints : _localAudiblePoints; + int injectCalls = 0; + _injectedEchoes = 0; foreach(const AudiblePoint& audiblePoint, audiblePoints) { + injectCalls++; injectAudiblePoint(source, audiblePoint, samples, sampleTime, format.sampleRate()); } + + /* + qDebug() << "injectCalls=" << injectCalls; + qDebug() << "_injectedEchoes=" << _injectedEchoes; + */ _averageDelay = _delayCount == 0 ? 0 : _totalDelay / _delayCount; _averageAttenuation = _attenuationCount == 0 ? 0 : _totalAttenuation / _attenuationCount; @@ -308,6 +383,14 @@ void AudioReflector::echoAudio(AudioSource source, unsigned int sampleTime, cons _minDelay = 0.0f; _minAttenuation = 0.0f; } + + _maxDelayOfficial = _maxDelay; + _minDelayOfficial = _minDelay; + _maxAttenuationOfficial = _maxAttenuation; + _minAttenuationOfficial = _minAttenuation; + _averageDelayOfficial = _averageDelay; + _averageAttenuationOfficial = _averageAttenuation; + } void AudioReflector::drawVector(const glm::vec3& start, const glm::vec3& end, const glm::vec3& color) { @@ -359,6 +442,19 @@ void AudioReflector::addAudioPath(AudioSource source, const glm::vec3& origin, c audioPaths.push_back(path); } +// NOTE: This is a prototype of an eventual utility that will identify the speaking sources for the inbound audio +// stream. It's not currently called but will be added soon. +void AudioReflector::identifyAudioSources() { + // looking for audio sources.... + foreach (const AvatarSharedPointer& avatarPointer, _avatarManager->getAvatarHash()) { + Avatar* avatar = static_cast(avatarPointer.data()); + if (!avatar->isInitialized()) { + continue; + } + qDebug() << "avatar["<< avatar <<"] loudness:" << avatar->getAudioLoudness(); + } +} + void AudioReflector::calculateAllReflections() { // only recalculate when we've moved, or if the attributes have changed // TODO: what about case where new voxels are added in front of us??? diff --git a/interface/src/AudioReflector.h b/interface/src/AudioReflector.h index 2408b70a96..cd5aaad276 100644 --- a/interface/src/AudioReflector.h +++ b/interface/src/AudioReflector.h @@ -15,6 +15,7 @@ #include "Audio.h" #include "avatar/MyAvatar.h" +#include "avatar/AvatarManager.h" enum AudioSource { LOCAL_AUDIO, @@ -69,25 +70,27 @@ public: void setVoxels(VoxelTree* voxels) { _voxels = voxels; } void setMyAvatar(MyAvatar* myAvatar) { _myAvatar = myAvatar; } void setAudio(Audio* audio) { _audio = audio; } + void setAvatarManager(AvatarManager* avatarManager) { _avatarManager = avatarManager; } void render(); /// must be called in the application render loop + void preProcessOriginalInboundAudio(unsigned int sampleTime, QByteArray& samples, const QAudioFormat& format); void processInboundAudio(unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format); void processLocalAudio(unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format); public slots: // statistics int getReflections() const { return _reflections; } - float getAverageDelayMsecs() const { return _averageDelay; } - float getAverageAttenuation() const { return _averageAttenuation; } - float getMaxDelayMsecs() const { return _maxDelay; } - float getMaxAttenuation() const { return _maxAttenuation; } - float getMinDelayMsecs() const { return _minDelay; } - float getMinAttenuation() const { return _minAttenuation; } + float getAverageDelayMsecs() const { return _averageDelayOfficial; } + float getAverageAttenuation() const { return _averageAttenuationOfficial; } + float getMaxDelayMsecs() const { return _maxDelayOfficial; } + float getMaxAttenuation() const { return _maxAttenuationOfficial; } + float getMinDelayMsecs() const { return _minDelayOfficial; } + float getMinAttenuation() const { return _minAttenuationOfficial; } float getDelayFromDistance(float distance); int getDiffusionPathCount() const { return _diffusionPathCount; } - int getEchoesInjected() const { return _inboundAudioDelays.size() + _localAudioDelays.size(); } - int getEchoesSuppressed() const { return _inboundEchoesSuppressed.size() + _localEchoesSuppressed.size(); } + int getEchoesInjected() const { return _inboundEchoesCount + _localEchoesCount; } + int getEchoesSuppressed() const { return _inboundEchoesSuppressedCount + _localEchoesSuppressedCount; } /// ms of delay added to all echos float getPreDelay() const { return _preDelay; } @@ -126,12 +129,19 @@ public slots: float getReflectiveRatio() const { return (1.0f - (_absorptionRatio + _diffusionRatio)); } void setReflectiveRatio(float ratio); + // wet/dry mix - these don't affect any reflection calculations, only the final mix volumes + float getOriginalSourceAttenuation() const { return _originalSourceAttenuation; } + void setOriginalSourceAttenuation(float value) { _originalSourceAttenuation = value; } + float getEchoesAttenuation() const { return _allEchoesAttenuation; } + void setEchoesAttenuation(float value) { _allEchoesAttenuation = value; } + signals: private: VoxelTree* _voxels; // used to access voxel scene MyAvatar* _myAvatar; // access to listener Audio* _audio; // access to audio API + AvatarManager* _avatarManager; // access to avatar manager API // Helpers for drawing void drawVector(const glm::vec3& start, const glm::vec3& end, const glm::vec3& color); @@ -147,11 +157,18 @@ private: float _averageDelay; float _maxDelay; float _minDelay; + float _averageDelayOfficial; + float _maxDelayOfficial; + float _minDelayOfficial; int _attenuationCount; float _totalAttenuation; float _averageAttenuation; float _maxAttenuation; float _minAttenuation; + float _averageAttenuationOfficial; + float _maxAttenuationOfficial; + float _minAttenuationOfficial; + glm::vec3 _listenerPosition; glm::vec3 _origin; @@ -161,11 +178,15 @@ private: QVector _inboundAudiblePoints; /// the audible points that have been calculated from the inbound audio paths QMap _inboundAudioDelays; /// delay times for currently injected audio points QVector _inboundEchoesSuppressed; /// delay times for currently injected audio points + int _inboundEchoesCount; + int _inboundEchoesSuppressedCount; QVector _localAudioPaths; /// audio paths we're processing for local audio QVector _localAudiblePoints; /// the audible points that have been calculated from the local audio paths QMap _localAudioDelays; /// delay times for currently injected audio points QVector _localEchoesSuppressed; /// delay times for currently injected audio points + int _localEchoesCount; + int _localEchoesSuppressedCount; // adds a sound source to begin an audio path trace, these can be the initial sound sources with their directional properties, // as well as diffusion sound sources @@ -182,6 +203,7 @@ private: void calculateAllReflections(); int countDiffusionPaths(); glm::vec3 getFaceNormal(BoxFace face); + void identifyAudioSources(); void injectAudiblePoint(AudioSource source, const AudiblePoint& audiblePoint, const QByteArray& samples, unsigned int sampleTime, int sampleRate); void echoAudio(AudioSource source, unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format); @@ -197,13 +219,16 @@ private: float _distanceAttenuationScalingFactor; float _localAudioAttenuationFactor; float _combFilterWindow; - int _diffusionFanout; // number of points of diffusion from each reflection point // all elements have the same material for now... float _absorptionRatio; float _diffusionRatio; float _reflectiveRatio; + + // wet/dry mix - these don't affect any reflection calculations, only the final mix volumes + float _originalSourceAttenuation; /// each sample of original signal will be multiplied by this + float _allEchoesAttenuation; /// each sample of all echo signals will be multiplied by this // remember the last known values at calculation bool haveAttributesChanged(); @@ -216,6 +241,10 @@ private: int _lastDiffusionFanout; float _lastAbsorptionRatio; float _lastDiffusionRatio; + bool _lastDontDistanceAttenuate; + bool _lastAlternateDistanceAttenuate; + + int _injectedEchoes; }; diff --git a/interface/src/Menu.cpp b/interface/src/Menu.cpp index 85c3d5b0c3..b05e5c91bc 100644 --- a/interface/src/Menu.cpp +++ b/interface/src/Menu.cpp @@ -429,6 +429,14 @@ Menu::Menu() : Qt::CTRL | Qt::SHIFT | Qt::Key_A, true); + addCheckableActionToQMenuAndActionHash(spatialAudioMenu, MenuOption::AudioSpatialProcessingDontDistanceAttenuate, + Qt::CTRL | Qt::SHIFT | Qt::Key_Y, + false); + + addCheckableActionToQMenuAndActionHash(spatialAudioMenu, MenuOption::AudioSpatialProcessingAlternateDistanceAttenuate, + Qt::CTRL | Qt::SHIFT | Qt::Key_U, + false); + addActionToQMenuAndActionHash(developerMenu, MenuOption::PasteToVoxel, Qt::CTRL | Qt::SHIFT | Qt::Key_V, this, diff --git a/interface/src/Menu.h b/interface/src/Menu.h index a62f54b0c6..c17c9cc507 100644 --- a/interface/src/Menu.h +++ b/interface/src/Menu.h @@ -268,6 +268,10 @@ namespace MenuOption { const QString AudioSpatialProcessingSlightlyRandomSurfaces = "Slightly Random Surfaces"; const QString AudioSpatialProcessingStereoSource = "Stereo Source"; const QString AudioSpatialProcessingWithDiffusions = "With Diffusions"; + const QString AudioSpatialProcessingDontDistanceAttenuate = "Don't calculate distance attenuation"; + const QString AudioSpatialProcessingAlternateDistanceAttenuate = "Alternate distance attenuation"; + + const QString Avatars = "Avatars"; const QString Bandwidth = "Bandwidth Display"; diff --git a/interface/src/ui/Stats.cpp b/interface/src/ui/Stats.cpp index 64616cbdf8..a2d615e04d 100644 --- a/interface/src/ui/Stats.cpp +++ b/interface/src/ui/Stats.cpp @@ -343,7 +343,7 @@ void Stats::display( lines = _expanded ? 12 : 3; if (_expanded && Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessing)) { - lines += 8; // spatial audio processing adds 1 spacing line and 7 extra lines of info + lines += 9; // spatial audio processing adds 1 spacing line and 8 extra lines of info } drawBackground(backgroundColor, horizontalOffset, 0, glWidget->width() - horizontalOffset, lines * STATS_PELS_PER_LINE + 10); @@ -540,11 +540,19 @@ void Stats::display( verticalOffset += STATS_PELS_PER_LINE; drawText(horizontalOffset, verticalOffset, 0.10f, 0.f, 2.f, reflectionsStatus, color); + + bool distanceAttenuationDisabled = Menu::getInstance()->isOptionChecked( + MenuOption::AudioSpatialProcessingDontDistanceAttenuate); - sprintf(reflectionsStatus, "Attenuation: average %5.3f, max %5.3f, min %5.3f, Factor: %5.3f", + bool alternateDistanceAttenuationEnabled = Menu::getInstance()->isOptionChecked( + MenuOption::AudioSpatialProcessingAlternateDistanceAttenuate); + + sprintf(reflectionsStatus, "Attenuation: average %5.3f, max %5.3f, min %5.3f, %s: %5.3f", audioReflector->getAverageAttenuation(), audioReflector->getMaxAttenuation(), audioReflector->getMinAttenuation(), + (distanceAttenuationDisabled ? "Distance Factor [DISABLED]" : + alternateDistanceAttenuationEnabled ? "Distance Factor [ALTERNATE]" : "Distance Factor [STANARD]"), audioReflector->getDistanceAttenuationScalingFactor()); verticalOffset += STATS_PELS_PER_LINE; @@ -585,6 +593,13 @@ void Stats::display( verticalOffset += STATS_PELS_PER_LINE; drawText(horizontalOffset, verticalOffset, 0.10f, 0.f, 2.f, reflectionsStatus, color); + sprintf(reflectionsStatus, "Wet/Dry Mix: Original: %5.3f Echoes: %5.3f", + audioReflector->getOriginalSourceAttenuation(), + audioReflector->getEchoesAttenuation()); + + verticalOffset += STATS_PELS_PER_LINE; + drawText(horizontalOffset, verticalOffset, 0.10f, 0.f, 2.f, reflectionsStatus, color); + } }